GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 8707cfce authored by John Zhang's avatar John Zhang

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents 1f818744 28c3fcf9
......@@ -60,6 +60,17 @@ impl MuType {
v: v
}
}
pub fn get_referenced_ty(&self) -> Option<P<MuType>> {
use types::MuType_::*;
match self.v {
Ref(ref ty)
| IRef(ref ty)
| WeakRef(ref ty)
| UPtr(ref ty) => Some(ty.clone()),
_ => None
}
}
}
pub type StructTag = MuName;
......
......@@ -74,7 +74,7 @@ pub trait CodeGenerator {
fn emit_mov_mem8_imm8 (&mut self, dest: Mem, src: i8);
// lea
fn emit_lea_r64(&mut self, dest: Reg, src: Reg);
fn emit_lea_r64(&mut self, dest: Reg, src: Mem);
// and
fn emit_and_r64_imm32(&mut self, dest: Reg, src: i32);
......
......@@ -805,19 +805,16 @@ impl <'a> InstructionSelection {
unimplemented!()
}
}
Instruction_::GetIRef(op_index) => {
let ops = inst.ops.read().unwrap();
let ref op = ops[op_index];
let res_tmp = self.get_result_value(node);
let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
if hdr_size == 0 {
self.emit_move_node_to_value(&res_tmp, &op, f_content, f_context, vm);
} else {
self.emit_lea_base_immoffset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
}
// memory insts: calculate the address, then lea
Instruction_::GetIRef(_)
| Instruction_::GetFieldIRef{..}
| Instruction_::GetVarPartIRef{..}
| Instruction_::ShiftIRef{..} => {
let mem_addr = self.emit_get_mem_from_inst(node, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
self.backend.emit_lea_r64(&tmp_res, &mem_addr);
}
Instruction_::ThreadExit => {
......@@ -973,15 +970,15 @@ impl <'a> InstructionSelection {
})
}
fn make_memory_op_base_offsetreg(&mut self, base: &P<Value>, offset: &P<Value>, ty: P<MuType>, vm: &VM) -> P<Value> {
fn make_memory_op_base_index(&mut self, base: &P<Value>, index: &P<Value>, scale: u8, ty: P<MuType>, vm: &VM) -> P<Value> {
P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty.clone(),
v: Value_::Memory(MemoryLocation::Address{
base: base.clone(),
offset: Some(offset.clone()),
index: None,
scale: None
offset: None,
index: Some(index.clone()),
scale: Some(scale)
})
})
}
......@@ -1256,7 +1253,7 @@ impl <'a> InstructionSelection {
fn emit_udiv (
&mut self,
op1: &P<TreeNode>, op2: &P<TreeNode>,
op1: &TreeNode, op2: &TreeNode,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM)
......@@ -1295,7 +1292,7 @@ impl <'a> InstructionSelection {
fn emit_idiv (
&mut self,
op1: &P<TreeNode>, op2: &P<TreeNode>,
op1: &TreeNode, op2: &TreeNode,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM)
......@@ -1875,7 +1872,7 @@ impl <'a> InstructionSelection {
self.backend.emit_pop_r64(&x86_64::RBP);
}
fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
fn match_cmp_res(&mut self, op: &TreeNode) -> bool {
match op.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
......@@ -1887,7 +1884,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_cmp_res(&mut self, cond: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
fn emit_cmp_res(&mut self, cond: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
match cond.v {
TreeNode_::Instruction(ref inst) => {
let ops = inst.ops.read().unwrap();
......@@ -1982,7 +1979,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_ireg(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Instruction(_) => {
self.instruction_select(op, f_content, f_context, vm);
......@@ -2021,7 +2018,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_fpreg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_fpreg(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Instruction(_) => {
self.instruction_select(op, f_content, f_context, vm);
......@@ -2037,21 +2034,21 @@ impl <'a> InstructionSelection {
}
}
fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
fn match_iimm(&mut self, op: &TreeNode) -> bool {
match op.v {
TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
_ => false
}
}
fn node_iimm_to_i32(&mut self, op: &P<TreeNode>) -> i32 {
fn node_iimm_to_i32(&mut self, op: &TreeNode) -> i32 {
match op.v {
TreeNode_::Value(ref pv) => self.value_iimm_to_i32(pv),
_ => panic!("expected iimm")
}
}
fn node_iimm_to_value(&mut self, op: &P<TreeNode>) -> P<Value> {
fn node_iimm_to_value(&mut self, op: &TreeNode) -> P<Value> {
match op.v {
TreeNode_::Value(ref pv) => {
pv.clone()
......@@ -2071,7 +2068,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_node_addr_to_value(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_node_addr_to_value(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
......@@ -2108,19 +2105,74 @@ impl <'a> InstructionSelection {
TreeNode_::Instruction(_) => self.emit_get_mem_from_inst(op, f_content, f_context, vm)
}
}
fn emit_get_mem_from_inst(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let header_size = mm::objectmodel::OBJECT_HEADER_SIZE as i32;
fn emit_get_mem_from_inst(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let mem = self.emit_get_mem_from_inst_inner(op, f_content, f_context, vm);
P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ADDRESS_TYPE.clone(),
v: Value_::Memory(mem)
})
}
fn addr_const_offset_adjust(&mut self, mem: MemoryLocation, more_offset: u64, vm: &VM) -> MemoryLocation {
match mem {
MemoryLocation::Address { base, offset, index, scale } => {
let new_offset = match offset {
Some(pv) => {
let old_offset = pv.extract_int_const();
old_offset + more_offset
},
None => more_offset
};
MemoryLocation::Address {
base: base,
offset: Some(self.make_value_int_const(new_offset, vm)),
index: index,
scale: scale
}
},
_ => panic!("expected an address memory location")
}
}
fn addr_append_index_scale(&mut self, mem: MemoryLocation, index: P<Value>, scale: u8, vm: &VM) -> MemoryLocation {
match mem {
MemoryLocation::Address {base, offset, ..} => {
MemoryLocation::Address {
base: base,
offset: offset,
index: Some(index),
scale: Some(scale)
}
},
_ => panic!("expected an address memory location")
}
}
fn emit_get_mem_from_inst_inner(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> MemoryLocation {
let header_size = mm::objectmodel::OBJECT_HEADER_SIZE as u64;
match op.v {
TreeNode_::Instruction(ref inst) => {
let ref ops = inst.ops.read().unwrap();
match inst.v {
// GETIREF -> [base + HDR_SIZE]
Instruction_::GetIRef(op_index) => {
let ref op = ops[op_index];
let ref ref_op = ops[op_index];
let ret = MemoryLocation::Address {
base: ref_op.clone_value(),
offset: Some(self.make_value_int_const(header_size, vm)),
index: None,
scale: None
};
self.make_memory_op_base_offset(&op.clone_value(), header_size, ADDRESS_TYPE.clone(), vm)
trace!("MEM from GETIREF: {}", ret);
ret
}
Instruction_::GetFieldIRef{is_ptr, base, index} => {
let ref base = ops[base];
......@@ -2135,29 +2187,149 @@ impl <'a> InstructionSelection {
}
};
let ty_info = vm.get_backend_type_info(struct_ty.id());
let layout = match ty_info.struct_layout.as_ref() {
Some(layout) => layout,
None => panic!("a struct type does not have a layout yet: {:?}", ty_info)
};
debug_assert!(layout.len() > index);
let field_offset : i32 = layout[index] as i32;
let field_offset : i32 = self.get_field_offset(&struct_ty, index, vm);
match base.v {
// GETFIELDIREF(GETIREF) -> add FIELD_OFFSET to old offset
TreeNode_::Instruction(Instruction{v: Instruction_::GetIRef(op_index), ref ops, ..}) => {
let ops_guard = ops.read().unwrap();
let ref inner = ops_guard[op_index];
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_const_offset_adjust(mem, field_offset as u64, vm);
self.make_memory_op_base_offset(&inner.clone_value(), header_size + field_offset, ADDRESS_TYPE.clone(), vm)
trace!("MEM from GETFIELDIREF(GETIREF): {}", ret);
ret
},
// GETFIELDIREF(ireg) -> [base + FIELD_OFFSET]
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
let ret = MemoryLocation::Address {
base: tmp,
offset: Some(self.make_value_int_const(field_offset as u64, vm)),
index: None,
scale: None
};
trace!("MEM from GETFIELDIREF(ireg): {}", ret);
ret
}
}
}
Instruction_::GetVarPartIRef{is_ptr, base} => {
let ref base = ops[base];
let struct_ty = match base.clone_value().ty.get_referenced_ty() {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
let fix_part_size = vm.get_backend_type_info(struct_ty.id()).size;
match base.v {
// GETVARPARTIREF(GETIREF) -> add FIX_PART_SIZE to old offset
TreeNode_::Instruction(Instruction{v: Instruction_::GetIRef(_), ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_const_offset_adjust(mem, fix_part_size as u64, vm);
trace!("MEM from GETIVARPARTIREF(GETIREF): {}", ret);
ret
},
// GETVARPARTIREF(ireg) -> [base + VAR_PART_SIZE]
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
self.make_memory_op_base_offset(&tmp, field_offset, ADDRESS_TYPE.clone(), vm)
let ret = MemoryLocation::Address {
base: tmp,
offset: Some(self.make_value_int_const(fix_part_size as u64, vm)),
index: None,
scale: None
};
trace!("MEM from GETVARPARTIREF(ireg): {}", ret);
ret
}
}
}
Instruction_::ShiftIRef{is_ptr, base, offset} => {
let ref base = ops[base];
let ref offset = ops[offset];
let tmp_res = self.get_result_value(op);
let ref base_ty = base.clone_value().ty;
let ele_ty = match base_ty.get_referenced_ty() {
Some(ty) => ty,
None => panic!("expected op in ShiftIRef of type IRef, found type: {}", base_ty)
};
let ele_ty_size = vm.get_backend_type_info(ele_ty.id()).size;
if self.match_iimm(offset) {
let index = self.node_iimm_to_i32(offset);
let shift_size = ele_ty_size as i32 * index;
let mem = match base.v {
// SHIFTIREF(GETVARPARTIREF(_), imm) -> add shift_size to old offset
TreeNode_::Instruction(Instruction{v: Instruction_::GetVarPartIRef{..}, ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_const_offset_adjust(mem, shift_size as u64, vm);
trace!("MEM from SHIFTIREF(GETVARPARTIREF(_), imm): {}", ret);
ret
},
// SHIFTIREF(ireg, imm) -> [base + SHIFT_SIZE]
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
let ret = MemoryLocation::Address {
base: tmp,
offset: Some(self.make_value_int_const(shift_size as u64, vm)),
index: None,
scale: None
};
trace!("MEM from SHIFTIREF(ireg, imm): {}", ret);
ret
}
};
mem
} else {
let tmp_index = self.emit_ireg(offset, f_content, f_context, vm);
let scale : u8 = match ele_ty_size {
8 | 4 | 2 | 1 => ele_ty_size as u8,
_ => unimplemented!()
};
let mem = match base.v {
// SHIFTIREF(GETVARPARTIREF(_), ireg) -> add index and scale
TreeNode_::Instruction(Instruction{v: Instruction_::GetVarPartIRef{..}, ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_append_index_scale(mem, tmp_index, scale, vm);
trace!("MEM from SHIFTIREF(GETVARPARTIREF(_), ireg): {}", ret);
ret
},
// SHIFTIREF(ireg, ireg) -> base + index * scale
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
let ret = MemoryLocation::Address {
base: tmp,
offset: None,
index: Some(tmp_index),
scale: Some(scale)
};
trace!("MEM from SHIFTIREF(ireg, ireg): {}", ret);
ret
}
};
mem
}
}
_ => unimplemented!()
}
},
......@@ -2165,7 +2337,7 @@ impl <'a> InstructionSelection {
}
}
fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
fn match_funcref_const(&mut self, op: &TreeNode) -> bool {
match op.v {
TreeNode_::Value(ref pv) => {
let is_const = match pv.v {
......@@ -2185,7 +2357,7 @@ impl <'a> InstructionSelection {
}
}
fn node_funcref_const_to_id(&mut self, op: &P<TreeNode>) -> MuID {
fn node_funcref_const_to_id(&mut self, op: &TreeNode) -> MuID {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
......@@ -2198,7 +2370,7 @@ impl <'a> InstructionSelection {
}
#[allow(unused_variables)]
fn match_mem(&mut self, op: &P<TreeNode>) -> bool {
fn match_mem(&mut self, op: &TreeNode) -> bool {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
......@@ -2217,7 +2389,7 @@ impl <'a> InstructionSelection {
}
#[allow(unused_variables)]
fn emit_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
fn emit_mem(&mut self, op: &TreeNode, vm: &VM) -> P<Value> {
unimplemented!()
}
......@@ -2243,7 +2415,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
......@@ -2304,6 +2476,17 @@ impl <'a> InstructionSelection {
let tl = self.emit_get_threadlocal(None, f_content, f_context, vm);
self.emit_load_base_offset(exception_arg, &tl, *thread::EXCEPTION_OBJ_OFFSET as i32, vm);
}
fn get_field_offset(&mut self, ty: &P<MuType>, index: usize, vm: &VM) -> i32 {
let ty_info = vm.get_backend_type_info(ty.id());
let layout = match ty_info.struct_layout.as_ref() {
Some(layout) => layout,
None => panic!("a struct type does not have a layout yet: {:?}", ty_info)
};
debug_assert!(layout.len() > index);
layout[index] as i32
}
fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String {
let ret = {
......
......@@ -7,6 +7,7 @@ use std::sync::Arc;
use std::process::Command;
use std::process::Output;
use std::os::unix::process::ExitStatusExt;
pub mod aot;
pub mod c_api;
......@@ -40,6 +41,10 @@ pub fn exec_nocheck (mut cmd: Command) -> Output {
println!("---err---");
println!("{}", String::from_utf8_lossy(&output.stderr));
if output.status.signal().is_some() {
println!("terminated by a signal: {}", output.status.signal().unwrap());
}
output
}
......
......@@ -562,5 +562,452 @@ pub fn hybrid_fix_part_insts() -> VM {
vm.define_func_version(func_ver);
vm
}
#[test]
fn test_hybrid_var_part() {
VM::start_logging_trace();
let vm = Arc::new(hybrid_var_part_insts());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("hybrid_var_part_insts");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
let executable = aot::link_primordial(vec!["hybrid_var_part_insts".to_string()], "hybrid_var_part_insts_test");
let output = aot::execute_nocheck(executable);
assert!(output.status.code().is_some());
let ret_code = output.status.code().unwrap();
println!("return code: {}", ret_code);
assert!(ret_code == 20);
}
pub fn hybrid_var_part_insts() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(int64.as_entity(), Mu("int64"));
// .typedef @my_hybrid = hybrid<@int64 @int64 | @int64>
let my_hybrid = vm.declare_type(vm.next_id(), MuType_::hybrid("MyHybrid".to_string(), vec![int64.clone(), int64.clone()], int64.clone()));
vm.set_name(my_hybrid.as_entity(), Mu("my_hybrid"));
// .typedef @ref_hybrid = ref<@my_hybrid>
let ref_hybrid = vm.declare_type(vm.next_id(), MuType_::muref(my_hybrid.clone()));
vm.set_name(ref_hybrid.as_entity(), Mu("ref_hybrid"));
// .typedef @iref_hybrid = iref<@my_hybrid>
let iref_hybrid = vm.declare_type(vm.next_id(), MuType_::iref(my_hybrid.clone()));
vm.set_name(iref_hybrid.as_entity(), Mu("iref_hybrid"));
// .typedef @iref_int64 = iref<@int64>
let iref_int64 = vm.declare_type(vm.next_id(), MuType_::iref(int64.clone()));
vm.set_name(iref_int64.as_entity(), Mu("iref_int64"));
// .const @int64_0 <@int64> = 0
let int64_0 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(0));
vm.set_name(int64_0.as_entity(), Mu("int64_0"));
// .const @int64_1 <@int64> = 1
let int64_1 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(1));
vm.set_name(int64_1.as_entity(), Mu("int64_1"));
// .const @int64_2 <@int64> = 2
let int64_2 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(2));
vm.set_name(int64_2.as_entity(), Mu("int64_2"));
// .const @int64_3 <@int64> = 3
let int64_3 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(3));
vm.set_name(int64_3.as_entity(), Mu("int64_3"));
// .const @int64_4 <@int64> = 4
let int64_4 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(4));
vm.set_name(int64_4.as_entity(), Mu("int64_4"));
// .const @int64_10 <@int64> = 10
let int64_10 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(10));
vm.set_name(int64_10.as_entity(), Mu("int64_10"));
// .funcsig @noparam_noret_sig = () -> ()
let noparam_noret_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![]);
vm.set_name(noparam_noret_sig.as_entity(), Mu("noparam_noret_sig"));
// .funcdecl @hybrid_var_part_insts <@noparam_noret_sig>
let func = MuFunction::new(vm.next_id(), noparam_noret_sig.clone());
vm.set_name(func.as_entity(), Mu("hybrid_var_part_insts"));
let func_id = func.id();
vm.declare_func(func);
// .funcdef @hybrid_var_part_insts VERSION @hybrid_var_part_insts_v1 <@noparam_noret_si>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, noparam_noret_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("hybrid_var_part_insts_v1"));
// %entry():
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
// %a = NEWHYBRID <@my_hybrid @int64> @int64_10
let blk_entry_a = func_ver.new_ssa(vm.next_id(), ref_hybrid.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let int64_0_local = func_ver.new_constant(int64_0.clone());
let int64_1_local = func_ver.new_constant(int64_1.clone());
let int64_2_local = func_ver.new_constant(int64_2.clone());
let int64_3_local = func_ver.new_constant(int64_3.clone());
let int64_4_local = func_ver.new_constant(int64_4.clone());
let int64_10_local = func_ver.new_constant(int64_10.clone());
let blk_entry_inst_newhybrid = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_a.clone_value()]),
ops: RwLock::new(vec![int64_10_local.clone()]),
v: Instruction_::NewHybrid(my_hybrid.clone(), 0)
});
// %iref_a = GETIREF <@int64> %a
let blk_entry_iref_a = func_ver.new_ssa(vm.next_id(), iref_hybrid.clone());
vm.set_name(blk_entry_iref_a.as_entity(), Mu("blk_entry_iref_a"));
let blk_entry_inst_getiref = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_iref_a.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone()]),
v: Instruction_::GetIRef(0)
});
// %iref_var = GETVARPARTIREF <@my_hybrid> %iref_a
let blk_entry_iref_var = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_entry_iref_var.as_entity(), Mu("blk_entry_iref_var"));
let blk_entry_inst_getvarpart = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_iref_var.clone_value()]),
ops: RwLock::new(vec![blk_entry_iref_a.clone()]),
v: Instruction_::GetVarPartIRef{
is_ptr: false,
base: 0
}
});
// %var0 = SHIFTIREF <@int64> %iref_var %int64_0
let blk_entry_var0 = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_entry_var0.as_entity(), Mu("blk_entry_var0"));
let blk_entry_inst_shiftiref_0 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_var0.clone_value()]),
ops: RwLock::new(vec![blk_entry_iref_var.clone(), int64_0_local.clone()]),
v: Instruction_::ShiftIRef {
is_ptr: false,
base: 0,
offset: 1
}
});
// STORE <@int64> %var0 @int64_10
let blk_entry_inst_store_0 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_var0.clone(), int64_10_local.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0,
value: 1
}
});
// %var4 = SHIFTIREF <@int64> %iref_var %int64_4
let blk_entry_var4 = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_entry_var4.as_entity(), Mu("blk_entry_var4"));
let blk_entry_inst_shiftiref_4 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_var4.clone_value()]),
ops: RwLock::new(vec![blk_entry_iref_var.clone(), int64_4_local.clone()]),
v: Instruction_::ShiftIRef {
is_ptr: false,
base: 0,
offset: 1
}
});
// STORE <@int64> %var4 @int64_10
let blk_entry_inst_store_4 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_var4.clone(), int64_10_local.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0,
value: 1
}
});
// BRANCH %check(%a)
let blk_check_id = vm.next_id();
let blk_entry_branch = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_a]),
v: Instruction_::Branch1(Destination{
target: blk_check_id,