Commit ca802615 authored by qinsoon's avatar qinsoon

[wip] GetFieldIRef on hybrid

parent 8da4a884
......@@ -292,6 +292,19 @@ impl MuType_ {
MuType_::Hybrid(tag)
}
pub fn hybrid_put(tag: HybridTag, mut fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) {
let mut map_guard = HYBRID_TAG_MAP.write().unwrap();
match map_guard.get_mut(&tag) {
Some(hybrid_ty_) => {
hybrid_ty_.fix_tys.clear();
hybrid_ty_.fix_tys.append(&mut fix_tys);
hybrid_ty_.var_ty = var_ty;
},
None => panic!("call hybrid_empty() to create an empty struct before hybrid_put()")
}
}
pub fn hybrid(tag: HybridTag, fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) -> MuType_ {
let hybrid_ty_ = HybridType_{fix_tys: fix_tys, var_ty: var_ty};
......
......@@ -870,7 +870,7 @@ impl ASMCodeGen {
if cfg!(debug_assertions) {
match op.v {
Value_::Memory(_) => {},
_ => panic!("expecting register op")
_ => panic!("expecting memory op")
}
}
......@@ -890,7 +890,7 @@ impl ASMCodeGen {
match offset.v {
Value_::SSAVar(id) => {
// temp as offset
let (str, id, loc) = self.prepare_reg(offset, 0);
let (str, id, loc) = self.prepare_reg(offset, loc_cursor);
result_str.push_str(&str);
ids.push(id);
......
......@@ -816,7 +816,7 @@ impl <'a> InstructionSelection {
if hdr_size == 0 {
self.emit_move_node_to_value(&res_tmp, &op, f_content, f_context, vm);
} else {
self.emit_lea_base_offset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
self.emit_lea_base_immoffset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
}
}
......@@ -831,97 +831,108 @@ impl <'a> InstructionSelection {
}
Instruction_::New(ref ty) => {
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => panic!("cannot use NEW for hybrid, use NEWHYBRID instead"),
_ => {}
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let ty_size = ty_info.size;
let size = ty_info.size;
let ty_align= ty_info.alignment;
let const_size = self.make_value_int_const(size as u64, vm);
if ty_size > mm::LARGE_OBJECT_THRESHOLD {
// emit large object allocation
unimplemented!()
} else {
// emit immix allocation fast path
// ASM: %tl = get_thread_local()
let tmp_tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
// ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
let tmp_cursor = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
// alignup cursor (cursor + align - 1 & !(align - 1))
// ASM: lea align-1(%cursor) -> %start
let align = ty_info.alignment as i32;
let tmp_start = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
// ASM: and %start, !(align-1) -> %start
self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1) as i32);
// bump cursor
// ASM: lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
// check with limit
// ASM: cmp %end, [%tl + allocator_offset + limit_offset]
let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
self.backend.emit_cmp_mem64_r64(&mem_limit, &tmp_end);
// branch to slow path if end > limit (end - limit > 0)
// ASM: jg alloc_slow
let slowpath = format!("{}_allocslow", node.id());
self.backend.emit_jg(slowpath.clone());
// update cursor
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
// put start as result
// ASM: mov %start -> %result
let tmp_res = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_start);
// ASM jmp alloc_end
let allocend = format!("{}_allocend", node.id());
self.backend.emit_jmp(allocend.clone());
// finishing current block
self.backend.end_block(self.current_block.as_ref().unwrap().clone());
// alloc_slow:
// call alloc_slow(size, align) -> %ret
// new block (no livein)
self.current_block = Some(slowpath.clone());
self.backend.start_block(slowpath.clone());
self.backend.set_block_livein(slowpath.clone(), &vec![]);
// arg1: allocator address
let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// arg2: size
let const_size = self.make_value_int_const(ty_size as u64, vm);
// arg3: align
let const_align= self.make_value_int_const(ty_align as u64, vm);
let rets = self.emit_runtime_entry(
&entrypoints::ALLOC_SLOW,
vec![tmp_allocator, const_size, const_align],
Some(vec![
tmp_res.clone()
]),
Some(node), f_content, f_context, vm
);
// end block (no liveout other than result)
self.backend.end_block(slowpath.clone());
self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
// block: alloc_end
self.backend.start_block(allocend.clone());
self.current_block = Some(allocend.clone());
self.emit_alloc_sequence(const_size, ty_align, node, f_content, f_context, vm);
}
Instruction_::NewHybrid(ref ty, var_len) => {
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {},
_ => panic!("NEWHYBRID is only for allocating hybrid types, use NEW for others")
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size;
let var_ty_size = match ty.v {
MuType_::Hybrid(ref name) => {
let map_lock = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty_ = map_lock.get(name).unwrap();
let var_ty = hybrid_ty_.get_var_ty();
vm.get_backend_type_info(var_ty.id()).size
},
_ => panic!("only expect HYBRID type here")
};
// actual size = fix_part_size + var_ty_size * len
let actual_size = {
let ops = inst.ops.read().unwrap();
let ref var_len = ops[var_len];
if self.match_iimm(var_len) {
let var_len = self.node_iimm_to_i32(var_len);
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
self.make_value_int_const(actual_size as u64, vm)
} else {
let tmp_actual_size = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
let is_power_of_two = |x: usize| {
use std::i8;
let mut power_of_two = 1;
let mut i: i8 = 0;
while power_of_two < x && i < i8::MAX {
power_of_two *= 2;
i += 1;
}
if power_of_two == x {
Some(i)
} else {
None
}
};
match is_power_of_two(var_ty_size) {
Some(shift) => {
// a shift-left will get the total size of var part
self.backend.emit_shl_r64_imm8(&tmp_var_len, shift);
// add with fix-part size
self.backend.emit_add_r64_imm32(&tmp_var_len, fix_part_size as i32);
// mov result to tmp_actual_size
self.backend.emit_mov_r64_r64(&tmp_actual_size, &tmp_var_len);
}
None => {
// we need to do a multiply
// mov var_ty_size -> rax
self.backend.emit_mov_r64_imm32(&x86_64::RAX, var_ty_size as i32);
// mul tmp_var_len, rax -> rdx:rax
self.backend.emit_mul_r64(&tmp_var_len);
// add with fix-part size
self.backend.emit_add_r64_imm32(&x86_64::RAX, fix_part_size as i32);
// mov result to tmp_actual_size
self.backend.emit_mov_r64_r64(&tmp_actual_size, &x86_64::RAX);
}
}
tmp_actual_size
}
};
self.emit_alloc_sequence(actual_size, ty_align, node, f_content, f_context, vm);
}
Instruction_::Throw(op_index) => {
......@@ -961,6 +972,19 @@ impl <'a> InstructionSelection {
})
})
}
fn make_memory_op_base_offsetreg(&mut self, base: &P<Value>, offset: &P<Value>, ty: P<MuType>, vm: &VM) -> P<Value> {
P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty.clone(),
v: Value_::Memory(MemoryLocation::Address{
base: base.clone(),
offset: Some(offset.clone()),
index: None,
scale: None
})
})
}
fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> {
P(Value{
......@@ -968,7 +992,178 @@ impl <'a> InstructionSelection {
ty: UINT64_TYPE.clone(),
v: Value_::Constant(Constant::Int(val))
})
}
}
fn emit_alloc_sequence (&mut self, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
if size.is_int_const() {
// size known at compile time, we can choose to emit alloc_small or large now
if size.extract_int_const() > mm::LARGE_OBJECT_THRESHOLD as u64 {
self.emit_alloc_sequence_large(size, align, node, f_content, f_context, vm);
} else {
self.emit_alloc_sequence_small(size, align, node, f_content, f_context, vm);
}
} else {
// size is unknown at compile time
// we need to emit both alloc small and alloc large,
// and it is decided at runtime
// emit: cmp size, THRESHOLD
// emit: jg ALLOC_LARGE
// emit: >> small object alloc
// emit: jmp ALLOC_LARGE_END
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = format!("{}_alloc_large", node.id());
let blk_alloc_large_end = format!("{}_alloc_large_end", node.id());
self.backend.emit_cmp_imm32_r64(mm::LARGE_OBJECT_THRESHOLD as i32, &size);
self.backend.emit_jg(blk_alloc_large.clone());
// alloc small here
let tmp_res = self.emit_alloc_sequence_small(size.clone(), align, node, f_content, f_context, vm);
self.backend.emit_jmp(blk_alloc_large_end.clone());
// finishing current block
let cur_block = self.current_block.as_ref().unwrap().clone();
self.backend.end_block(cur_block.clone());
self.backend.set_block_liveout(cur_block.clone(), &vec![tmp_res.clone()]);
// alloc_large:
self.current_block = Some(blk_alloc_large.clone());
self.backend.start_block(blk_alloc_large.clone());
self.backend.set_block_livein(blk_alloc_large.clone(), &vec![size.clone()]);
let tmp_res = self.emit_alloc_sequence_large(size, align, node, f_content, f_context, vm);
self.backend.end_block(blk_alloc_large.clone());
self.backend.set_block_liveout(blk_alloc_large.clone(), &vec![tmp_res]);
// alloc_large_end:
self.backend.start_block(blk_alloc_large_end.clone());
self.current_block = Some(blk_alloc_large_end.clone());
}
}
fn emit_alloc_sequence_large (&mut self, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let tmp_res = self.get_result_value(node);
// ASM: %tl = get_thread_local()
let tmp_tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
// ASM: lea [%tl + allocator_offset] -> %tmp_allocator
let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_immoffset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// ASM: %tmp_res = call muentry_alloc_large(%allocator, size, align)
let const_align = self.make_value_int_const(align as u64, vm);
let rets = self.emit_runtime_entry(
&entrypoints::ALLOC_LARGE,
vec![tmp_allocator, size.clone(), const_align],
Some(vec![tmp_res.clone()]),
Some(node), f_content, f_context, vm
);
tmp_res
}
fn emit_alloc_sequence_small (&mut self, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// emit immix allocation fast path
// ASM: %tl = get_thread_local()
let tmp_tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
// ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
let tmp_cursor = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
// alignup cursor (cursor + align - 1 & !(align - 1))
// ASM: lea align-1(%cursor) -> %start
let align = align as i32;
let tmp_start = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_immoffset(&tmp_start, &tmp_cursor, align - 1, vm);
// ASM: and %start, !(align-1) -> %start
self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1) as i32);
// bump cursor
// ASM: add %size, %start -> %end
// or lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
if size.is_int_const() {
let offset = size.extract_int_const() as i32;
self.emit_lea_base_immoffset(&tmp_end, &tmp_start, offset, vm);
} else {
self.backend.emit_mov_r64_r64(&tmp_end, &tmp_start);
self.backend.emit_add_r64_r64(&tmp_end, &size);
}
// check with limit
// ASM: cmp %end, [%tl + allocator_offset + limit_offset]
let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
self.backend.emit_cmp_mem64_r64(&mem_limit, &tmp_end);
// branch to slow path if end > limit (end - limit > 0)
// ASM: jg alloc_slow
let slowpath = format!("{}_allocslow", node.id());
self.backend.emit_jg(slowpath.clone());
// update cursor
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
// put start as result
// ASM: mov %start -> %result
let tmp_res = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_start);
// ASM jmp alloc_end
let allocend = format!("{}_alloc_small_end", node.id());
self.backend.emit_jmp(allocend.clone());
// finishing current block
let cur_block = self.current_block.as_ref().unwrap().clone();
self.backend.end_block(cur_block.clone());
self.backend.set_block_liveout(cur_block.clone(), &vec![tmp_res.clone()]);
// alloc_slow:
// call alloc_slow(size, align) -> %ret
// new block (no livein)
self.current_block = Some(slowpath.clone());
self.backend.start_block(slowpath.clone());
self.backend.set_block_livein(slowpath.clone(), &vec![size.clone()]);
// arg1: allocator address
let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_immoffset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// arg2: size
// arg3: align
let const_align= self.make_value_int_const(align as u64, vm);
let rets = self.emit_runtime_entry(
&entrypoints::ALLOC_SLOW,
vec![tmp_allocator, size.clone(), const_align],
Some(vec![
tmp_res.clone()
]),
Some(node), f_content, f_context, vm
);
// end block (no liveout other than result)
self.backend.end_block(slowpath.clone());
self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
// block: alloc_end
self.backend.start_block(allocend.clone());
self.current_block = Some(allocend.clone());
tmp_res
}
fn emit_truncate_result (&mut self, from_ty: &P<MuType>, to_ty: &P<MuType>, op: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
// currently only use 64bits register
......@@ -1053,7 +1248,7 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_mem64_r64(&mem, src);
}
fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
fn emit_lea_base_immoffset(&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, ADDRESS_TYPE.clone(), vm);
self.backend.emit_lea_r64(dest, &mem);
......
......@@ -133,12 +133,14 @@ pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize)
#[no_mangle]
#[inline(never)]
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = unsafe {mutator.as_mut().unwrap()}.try_alloc_from_local(size, align);
unsafe {ret.to_object_reference()}
}
#[no_mangle]
pub extern fn alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
unsafe {ret.to_object_reference()}
}
\ No newline at end of file
......@@ -46,11 +46,22 @@ lazy_static! {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![ADDRESS_TYPE.clone()],
arg_tys: vec![UINT64_TYPE.clone(), UINT64_TYPE.clone()]
arg_tys: vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_alloc_slow")),
jit: RwLock::new(None),
};
// impl/decl: gc/lib.rs
pub static ref ALLOC_LARGE : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![ADDRESS_TYPE.clone()],
arg_tys: vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_alloc_large")),
jit: RwLock::new(None)
};
// impl/decl: exception.rs
pub static ref THROW_EXCEPTION : RuntimeEntrypoint = RuntimeEntrypoint {
......
......@@ -284,5 +284,283 @@ pub fn struct_insts() -> VM {
vm.define_func_version(func_ver);
vm
}
#[test]
fn test_hybrid_fix_part() {
VM::start_logging_trace();
let vm = Arc::new(hybrid_fix_part_insts());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("hybrid_fix_part_insts");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
let executable = aot::link_primordial(vec!["hybrid_fix_part_insts".to_string()], "hybrid_fix_part_insts_test");
let output = aot::execute_nocheck(executable);
assert!(output.status.code().is_some());
let ret_code = output.status.code().unwrap();
println!("return code: {}", ret_code);
assert!(ret_code == 1);
}
pub fn hybrid_fix_part_insts() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(int64.as_entity(), Mu("int64"));
// .typedef @my_hybrid = hybrid<@int64 @int64 | @int64>
let my_hybrid = vm.declare_type(vm.next_id(), MuType_::hybrid("MyHybrid".to_string(), vec![int64.clone(), int64.clone()], int64.clone()));
vm.set_name(my_hybrid.as_entity(), Mu("my_hybrid"));
// .typedef @ref_hybrid = ref<@my_hybrid>
let ref_hybrid = vm.declare_type(vm.next_id(), MuType_::muref(my_hybrid.clone()));
vm.set_name(ref_hybrid.as_entity(), Mu("ref_hybrid"));
// .typedef @iref_hybrid = iref<@my_hybrid>
let iref_hybrid = vm.declare_type(vm.next_id(), MuType_::iref(my_hybrid.clone()));
vm.set_name(iref_hybrid.as_entity(), Mu("iref_hybrid"));
// .typedef @iref_int64 = iref<@int64>
let iref_int64 = vm.declare_type(vm.next_id(), MuType_::iref(int64.clone()));
vm.set_name(iref_int64.as_entity(), Mu("iref_int64"));
// .const @int64_0 <@int64> = 0
let int64_0 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(0));
vm.set_name(int64_0.as_entity(), Mu("int64_0"));
// .const @int64_1 <@int64> = 1
let int64_1 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(1));
vm.set_name(int64_1.as_entity(), Mu("int64_1"));
// .const @int64_10 <@int64> = 10
let int64_10 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(10));
vm.set_name(int64_10.as_entity(), Mu("int64_10"));
// .funcsig @noparam_noret_sig = () -> ()
let noparam_noret_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![]);
vm.set_name(noparam_noret_sig.as_entity(), Mu("noparam_noret_sig"));
// .funcdecl @hybrid_fix_part_insts <@noparam_noret_sig>
let func = MuFunction::new(vm.next_id(), noparam_noret_sig.clone());
vm.set_name(func.as_entity(), Mu("hybrid_fix_part_insts"));
let func_id = func.id();
vm.declare_func(func);
// .funcdef @hybrid_fix_part_insts VERSION @hybrid_fix_part_insts_v1 <@noparam_noret_si>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, noparam_noret_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("hybrid_fix_part_insts_v1"));
// %entry():
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
// %a = NEWHYBRID <@my_hybrid @int64> @int64_10
let blk_entry_a = func_ver.new_ssa(vm.next_id(), ref_hybrid.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let int64_10_local = func_ver.new_constant(int64_10.clone());
let blk_entry_inst0 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_a.clone_value()]),
ops: RwLock::new(vec![int64_10_local]),
v: Instruction_::NewHybrid(my_hybrid.clone(), 0)
});
// %iref_a = GETIREF <@int64> %a
let blk_entry_iref_a = func_ver.new_ssa(vm.next_id(), iref_hybrid.clone());
vm.set_name(blk_entry_iref_a.as_entity(), Mu("blk_entry_iref_a"));
let blk_entry_inst1 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_iref_a.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone()]),
v: Instruction_::GetIRef(0)
});
// %iref_x = GETFIELDIREF <@my_hybrid 0> %iref_a
let blk_entry_iref_x = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_entry_iref_x.as_entity(), Mu("blk_entry_iref_x"));
let int64_0_local = func_ver.new_constant(int64_0.clone());
let blk_entry_inst2 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_iref_x.clone_value()]),
ops: RwLock::new(vec![blk_entry_iref_a.clone()]),
v: Instruction_::GetFieldIRef {
is_ptr: false,
base: 0, // 0th node in ops
index: 0 // 0th element in the struct
}
});
// STORE <@int64> %iref_x @int64_1
let int64_1_local = func_ver.new_constant(int64_1.clone());
let blk_entry_inst3 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_iref_x.clone(), int64_1_local.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0,
value: 1
}
});
// BRANCH %check(%a)
let blk_check_id = vm.next_id();
let blk_entry_branch = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_a]),
v: Instruction_::Branch1(Destination{
target: blk_check_id,
args: vec![DestArg::Normal(0)]
})
});
blk_entry.content = Some(BlockContent{
args: vec![],
exn_arg: None,
body: vec![blk_entry_inst0, blk_entry_inst1, blk_entry_inst2, blk_entry_inst3, blk_entry_branch],
keepalives: None
});
// %check(%a):
let blk_check_a = func_ver.new_ssa(vm.next_id(), ref_hybrid.clone());
vm.set_name(blk_check_a.as_entity(), Mu("blk_check_a"));
let mut blk_check = Block::new(blk_check_id);
vm.set_name(blk_check.as_entity(), Mu("check"));
// %blk_check_iref_a = GETIREF <@my_hybrid> a
let blk_check_iref_a = func_ver.new_ssa(vm.next_id(), iref_hybrid.clone());
vm.set_name(blk_check_iref_a.as_entity(), Mu("blk_check_iref_a"));
let blk_check_inst0 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_iref_a.clone_value()]),
ops: RwLock::new(vec![blk_check_a.clone()]),
v: Instruction_::GetIRef(0)
});
// %blk_check_iref_x = GETFIELDIREF <@my_hybrid 0> %blk_check_iref_a
let blk_check_iref_x = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_check_iref_x.as_entity(), Mu("blk_check_iref_x"));
let blk_check_inst1 = func_ver.new_inst(Instruction{