GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 8707cfce authored by John Zhang's avatar John Zhang

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents 1f818744 28c3fcf9
...@@ -60,6 +60,17 @@ impl MuType { ...@@ -60,6 +60,17 @@ impl MuType {
v: v v: v
} }
} }
pub fn get_referenced_ty(&self) -> Option<P<MuType>> {
use types::MuType_::*;
match self.v {
Ref(ref ty)
| IRef(ref ty)
| WeakRef(ref ty)
| UPtr(ref ty) => Some(ty.clone()),
_ => None
}
}
} }
pub type StructTag = MuName; pub type StructTag = MuName;
......
...@@ -74,7 +74,7 @@ pub trait CodeGenerator { ...@@ -74,7 +74,7 @@ pub trait CodeGenerator {
fn emit_mov_mem8_imm8 (&mut self, dest: Mem, src: i8); fn emit_mov_mem8_imm8 (&mut self, dest: Mem, src: i8);
// lea // lea
fn emit_lea_r64(&mut self, dest: Reg, src: Reg); fn emit_lea_r64(&mut self, dest: Reg, src: Mem);
// and // and
fn emit_and_r64_imm32(&mut self, dest: Reg, src: i32); fn emit_and_r64_imm32(&mut self, dest: Reg, src: i32);
......
...@@ -805,19 +805,16 @@ impl <'a> InstructionSelection { ...@@ -805,19 +805,16 @@ impl <'a> InstructionSelection {
unimplemented!() unimplemented!()
} }
} }
Instruction_::GetIRef(op_index) => { // memory insts: calculate the address, then lea
let ops = inst.ops.read().unwrap(); Instruction_::GetIRef(_)
| Instruction_::GetFieldIRef{..}
let ref op = ops[op_index]; | Instruction_::GetVarPartIRef{..}
let res_tmp = self.get_result_value(node); | Instruction_::ShiftIRef{..} => {
let mem_addr = self.emit_get_mem_from_inst(node, f_content, f_context, vm);
let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE; let tmp_res = self.get_result_value(node);
if hdr_size == 0 {
self.emit_move_node_to_value(&res_tmp, &op, f_content, f_context, vm); self.backend.emit_lea_r64(&tmp_res, &mem_addr);
} else {
self.emit_lea_base_immoffset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
}
} }
Instruction_::ThreadExit => { Instruction_::ThreadExit => {
...@@ -973,15 +970,15 @@ impl <'a> InstructionSelection { ...@@ -973,15 +970,15 @@ impl <'a> InstructionSelection {
}) })
} }
fn make_memory_op_base_offsetreg(&mut self, base: &P<Value>, offset: &P<Value>, ty: P<MuType>, vm: &VM) -> P<Value> { fn make_memory_op_base_index(&mut self, base: &P<Value>, index: &P<Value>, scale: u8, ty: P<MuType>, vm: &VM) -> P<Value> {
P(Value{ P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()), hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty.clone(), ty: ty.clone(),
v: Value_::Memory(MemoryLocation::Address{ v: Value_::Memory(MemoryLocation::Address{
base: base.clone(), base: base.clone(),
offset: Some(offset.clone()), offset: None,
index: None, index: Some(index.clone()),
scale: None scale: Some(scale)
}) })
}) })
} }
...@@ -1256,7 +1253,7 @@ impl <'a> InstructionSelection { ...@@ -1256,7 +1253,7 @@ impl <'a> InstructionSelection {
fn emit_udiv ( fn emit_udiv (
&mut self, &mut self,
op1: &P<TreeNode>, op2: &P<TreeNode>, op1: &TreeNode, op2: &TreeNode,
f_content: &FunctionContent, f_content: &FunctionContent,
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM) vm: &VM)
...@@ -1295,7 +1292,7 @@ impl <'a> InstructionSelection { ...@@ -1295,7 +1292,7 @@ impl <'a> InstructionSelection {
fn emit_idiv ( fn emit_idiv (
&mut self, &mut self,
op1: &P<TreeNode>, op2: &P<TreeNode>, op1: &TreeNode, op2: &TreeNode,
f_content: &FunctionContent, f_content: &FunctionContent,
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM) vm: &VM)
...@@ -1875,7 +1872,7 @@ impl <'a> InstructionSelection { ...@@ -1875,7 +1872,7 @@ impl <'a> InstructionSelection {
self.backend.emit_pop_r64(&x86_64::RBP); self.backend.emit_pop_r64(&x86_64::RBP);
} }
fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool { fn match_cmp_res(&mut self, op: &TreeNode) -> bool {
match op.v { match op.v {
TreeNode_::Instruction(ref inst) => { TreeNode_::Instruction(ref inst) => {
match inst.v { match inst.v {
...@@ -1887,7 +1884,7 @@ impl <'a> InstructionSelection { ...@@ -1887,7 +1884,7 @@ impl <'a> InstructionSelection {
} }
} }
fn emit_cmp_res(&mut self, cond: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp { fn emit_cmp_res(&mut self, cond: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
match cond.v { match cond.v {
TreeNode_::Instruction(ref inst) => { TreeNode_::Instruction(ref inst) => {
let ops = inst.ops.read().unwrap(); let ops = inst.ops.read().unwrap();
...@@ -1982,7 +1979,7 @@ impl <'a> InstructionSelection { ...@@ -1982,7 +1979,7 @@ impl <'a> InstructionSelection {
} }
} }
fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> { fn emit_ireg(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v { match op.v {
TreeNode_::Instruction(_) => { TreeNode_::Instruction(_) => {
self.instruction_select(op, f_content, f_context, vm); self.instruction_select(op, f_content, f_context, vm);
...@@ -2021,7 +2018,7 @@ impl <'a> InstructionSelection { ...@@ -2021,7 +2018,7 @@ impl <'a> InstructionSelection {
} }
} }
fn emit_fpreg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> { fn emit_fpreg(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v { match op.v {
TreeNode_::Instruction(_) => { TreeNode_::Instruction(_) => {
self.instruction_select(op, f_content, f_context, vm); self.instruction_select(op, f_content, f_context, vm);
...@@ -2037,21 +2034,21 @@ impl <'a> InstructionSelection { ...@@ -2037,21 +2034,21 @@ impl <'a> InstructionSelection {
} }
} }
fn match_iimm(&mut self, op: &P<TreeNode>) -> bool { fn match_iimm(&mut self, op: &TreeNode) -> bool {
match op.v { match op.v {
TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true, TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
_ => false _ => false
} }
} }
fn node_iimm_to_i32(&mut self, op: &P<TreeNode>) -> i32 { fn node_iimm_to_i32(&mut self, op: &TreeNode) -> i32 {
match op.v { match op.v {
TreeNode_::Value(ref pv) => self.value_iimm_to_i32(pv), TreeNode_::Value(ref pv) => self.value_iimm_to_i32(pv),
_ => panic!("expected iimm") _ => panic!("expected iimm")
} }
} }
fn node_iimm_to_value(&mut self, op: &P<TreeNode>) -> P<Value> { fn node_iimm_to_value(&mut self, op: &TreeNode) -> P<Value> {
match op.v { match op.v {
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
pv.clone() pv.clone()
...@@ -2071,7 +2068,7 @@ impl <'a> InstructionSelection { ...@@ -2071,7 +2068,7 @@ impl <'a> InstructionSelection {
} }
} }
fn emit_node_addr_to_value(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> { fn emit_node_addr_to_value(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v { match op.v {
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
match pv.v { match pv.v {
...@@ -2108,19 +2105,74 @@ impl <'a> InstructionSelection { ...@@ -2108,19 +2105,74 @@ impl <'a> InstructionSelection {
TreeNode_::Instruction(_) => self.emit_get_mem_from_inst(op, f_content, f_context, vm) TreeNode_::Instruction(_) => self.emit_get_mem_from_inst(op, f_content, f_context, vm)
} }
} }
fn emit_get_mem_from_inst(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> { fn emit_get_mem_from_inst(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let header_size = mm::objectmodel::OBJECT_HEADER_SIZE as i32; let mem = self.emit_get_mem_from_inst_inner(op, f_content, f_context, vm);
P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ADDRESS_TYPE.clone(),
v: Value_::Memory(mem)
})
}
fn addr_const_offset_adjust(&mut self, mem: MemoryLocation, more_offset: u64, vm: &VM) -> MemoryLocation {
match mem {
MemoryLocation::Address { base, offset, index, scale } => {
let new_offset = match offset {
Some(pv) => {
let old_offset = pv.extract_int_const();
old_offset + more_offset
},
None => more_offset
};
MemoryLocation::Address {
base: base,
offset: Some(self.make_value_int_const(new_offset, vm)),
index: index,
scale: scale
}
},
_ => panic!("expected an address memory location")
}
}
fn addr_append_index_scale(&mut self, mem: MemoryLocation, index: P<Value>, scale: u8, vm: &VM) -> MemoryLocation {
match mem {
MemoryLocation::Address {base, offset, ..} => {
MemoryLocation::Address {
base: base,
offset: offset,
index: Some(index),
scale: Some(scale)
}
},
_ => panic!("expected an address memory location")
}
}
fn emit_get_mem_from_inst_inner(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> MemoryLocation {
let header_size = mm::objectmodel::OBJECT_HEADER_SIZE as u64;
match op.v { match op.v {
TreeNode_::Instruction(ref inst) => { TreeNode_::Instruction(ref inst) => {
let ref ops = inst.ops.read().unwrap(); let ref ops = inst.ops.read().unwrap();
match inst.v { match inst.v {
// GETIREF -> [base + HDR_SIZE]
Instruction_::GetIRef(op_index) => { Instruction_::GetIRef(op_index) => {
let ref op = ops[op_index]; let ref ref_op = ops[op_index];
let ret = MemoryLocation::Address {
base: ref_op.clone_value(),
offset: Some(self.make_value_int_const(header_size, vm)),
index: None,
scale: None
};
self.make_memory_op_base_offset(&op.clone_value(), header_size, ADDRESS_TYPE.clone(), vm) trace!("MEM from GETIREF: {}", ret);
ret
} }
Instruction_::GetFieldIRef{is_ptr, base, index} => { Instruction_::GetFieldIRef{is_ptr, base, index} => {
let ref base = ops[base]; let ref base = ops[base];
...@@ -2135,29 +2187,149 @@ impl <'a> InstructionSelection { ...@@ -2135,29 +2187,149 @@ impl <'a> InstructionSelection {
} }
}; };
let ty_info = vm.get_backend_type_info(struct_ty.id()); let field_offset : i32 = self.get_field_offset(&struct_ty, index, vm);
let layout = match ty_info.struct_layout.as_ref() {
Some(layout) => layout,
None => panic!("a struct type does not have a layout yet: {:?}", ty_info)
};
debug_assert!(layout.len() > index);
let field_offset : i32 = layout[index] as i32;
match base.v { match base.v {
// GETFIELDIREF(GETIREF) -> add FIELD_OFFSET to old offset
TreeNode_::Instruction(Instruction{v: Instruction_::GetIRef(op_index), ref ops, ..}) => { TreeNode_::Instruction(Instruction{v: Instruction_::GetIRef(op_index), ref ops, ..}) => {
let ops_guard = ops.read().unwrap(); let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ref inner = ops_guard[op_index]; let ret = self.addr_const_offset_adjust(mem, field_offset as u64, vm);
self.make_memory_op_base_offset(&inner.clone_value(), header_size + field_offset, ADDRESS_TYPE.clone(), vm) trace!("MEM from GETFIELDIREF(GETIREF): {}", ret);
ret
}, },
// GETFIELDIREF(ireg) -> [base + FIELD_OFFSET]
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
let ret = MemoryLocation::Address {
base: tmp,
offset: Some(self.make_value_int_const(field_offset as u64, vm)),
index: None,
scale: None
};
trace!("MEM from GETFIELDIREF(ireg): {}", ret);
ret
}
}
}
Instruction_::GetVarPartIRef{is_ptr, base} => {
let ref base = ops[base];
let struct_ty = match base.clone_value().ty.get_referenced_ty() {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
let fix_part_size = vm.get_backend_type_info(struct_ty.id()).size;
match base.v {
// GETVARPARTIREF(GETIREF) -> add FIX_PART_SIZE to old offset
TreeNode_::Instruction(Instruction{v: Instruction_::GetIRef(_), ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_const_offset_adjust(mem, fix_part_size as u64, vm);
trace!("MEM from GETIVARPARTIREF(GETIREF): {}", ret);
ret
},
// GETVARPARTIREF(ireg) -> [base + VAR_PART_SIZE]
_ => { _ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm); let tmp = self.emit_ireg(base, f_content, f_context, vm);
self.make_memory_op_base_offset(&tmp, field_offset, ADDRESS_TYPE.clone(), vm) let ret = MemoryLocation::Address {
base: tmp,
offset: Some(self.make_value_int_const(fix_part_size as u64, vm)),
index: None,
scale: None
};
trace!("MEM from GETVARPARTIREF(ireg): {}", ret);
ret
} }
} }
} }
Instruction_::ShiftIRef{is_ptr, base, offset} => {
let ref base = ops[base];
let ref offset = ops[offset];
let tmp_res = self.get_result_value(op);
let ref base_ty = base.clone_value().ty;
let ele_ty = match base_ty.get_referenced_ty() {
Some(ty) => ty,
None => panic!("expected op in ShiftIRef of type IRef, found type: {}", base_ty)
};
let ele_ty_size = vm.get_backend_type_info(ele_ty.id()).size;
if self.match_iimm(offset) {
let index = self.node_iimm_to_i32(offset);
let shift_size = ele_ty_size as i32 * index;
let mem = match base.v {
// SHIFTIREF(GETVARPARTIREF(_), imm) -> add shift_size to old offset
TreeNode_::Instruction(Instruction{v: Instruction_::GetVarPartIRef{..}, ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_const_offset_adjust(mem, shift_size as u64, vm);
trace!("MEM from SHIFTIREF(GETVARPARTIREF(_), imm): {}", ret);
ret
},
// SHIFTIREF(ireg, imm) -> [base + SHIFT_SIZE]
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
let ret = MemoryLocation::Address {
base: tmp,
offset: Some(self.make_value_int_const(shift_size as u64, vm)),
index: None,
scale: None
};
trace!("MEM from SHIFTIREF(ireg, imm): {}", ret);
ret
}
};
mem
} else {
let tmp_index = self.emit_ireg(offset, f_content, f_context, vm);
let scale : u8 = match ele_ty_size {
8 | 4 | 2 | 1 => ele_ty_size as u8,
_ => unimplemented!()
};
let mem = match base.v {
// SHIFTIREF(GETVARPARTIREF(_), ireg) -> add index and scale
TreeNode_::Instruction(Instruction{v: Instruction_::GetVarPartIRef{..}, ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_append_index_scale(mem, tmp_index, scale, vm);
trace!("MEM from SHIFTIREF(GETVARPARTIREF(_), ireg): {}", ret);
ret
},
// SHIFTIREF(ireg, ireg) -> base + index * scale
_ => {
let tmp = self.emit_ireg(base, f_content, f_context, vm);
let ret = MemoryLocation::Address {
base: tmp,
offset: None,
index: Some(tmp_index),
scale: Some(scale)
};
trace!("MEM from SHIFTIREF(ireg, ireg): {}", ret);
ret
}
};
mem
}
}
_ => unimplemented!() _ => unimplemented!()
} }
}, },
...@@ -2165,7 +2337,7 @@ impl <'a> InstructionSelection { ...@@ -2165,7 +2337,7 @@ impl <'a> InstructionSelection {
} }
} }
fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool { fn match_funcref_const(&mut self, op: &TreeNode) -> bool {
match op.v { match op.v {
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
let is_const = match pv.v { let is_const = match pv.v {
...@@ -2185,7 +2357,7 @@ impl <'a> InstructionSelection { ...@@ -2185,7 +2357,7 @@ impl <'a> InstructionSelection {
} }
} }
fn node_funcref_const_to_id(&mut self, op: &P<TreeNode>) -> MuID { fn node_funcref_const_to_id(&mut self, op: &TreeNode) -> MuID {
match op.v { match op.v {
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
match pv.v { match pv.v {
...@@ -2198,7 +2370,7 @@ impl <'a> InstructionSelection { ...@@ -2198,7 +2370,7 @@ impl <'a> InstructionSelection {
} }
#[allow(unused_variables)] #[allow(unused_variables)]
fn match_mem(&mut self, op: &P<TreeNode>) -> bool { fn match_mem(&mut self, op: &TreeNode) -> bool {
match op.v { match op.v {
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
match pv.v { match pv.v {
...@@ -2217,7 +2389,7 @@ impl <'a> InstructionSelection { ...@@ -2217,7 +2389,7 @@ impl <'a> InstructionSelection {
} }
#[allow(unused_variables)] #[allow(unused_variables)]
fn emit_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> { fn emit_mem(&mut self, op: &TreeNode, vm: &VM) -> P<Value> {
unimplemented!() unimplemented!()
} }
...@@ -2243,7 +2415,7 @@ impl <'a> InstructionSelection { ...@@ -2243,7 +2415,7 @@ impl <'a> InstructionSelection {
} }
} }
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) { fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty; let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) { if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
...@@ -2304,6 +2476,17 @@ impl <'a> InstructionSelection { ...@@ -2304,6 +2476,17 @@ impl <'a> InstructionSelection {
let tl = self.emit_get_threadlocal(None, f_content, f_context, vm); let tl = self.emit_get_threadlocal(None, f_content, f_context, vm);
self.emit_load_base_offset(exception_arg, &tl, *thread::EXCEPTION_OBJ_OFFSET as i32, vm); self.emit_load_base_offset(exception_arg, &tl, *thread::EXCEPTION_OBJ_OFFSET as i32, vm);
} }
fn get_field_offset(&mut self, ty: &P<MuType>, index: usize, vm: &VM) -> i32 {
let ty_info = vm.get_backend_type_info(ty.id());
let layout = match ty_info.struct_layout.as_ref() {
Some(layout) => layout,
None => panic!("a struct type does not have a layout yet: {:?}", ty_info)
};
debug_assert!(layout.len() > index);
layout[index] as i32
}
fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String { fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String {
let ret = { let ret = {
......
...@@ -7,6 +7,7 @@ use std::sync::Arc; ...@@ -7,6 +7,7 @@ use std::sync::Arc;
use std::process::Command; use std::process::Command;
use std::process::Output; use std::process::Output;
use std::os::unix::process::ExitStatusExt;
pub mod aot; pub mod aot;
pub mod c_api; pub mod c_api;
...@@ -40,6 +41,10 @@ pub fn exec_nocheck (mut cmd: Command) -> Output { ...@@ -40,6 +41,10 @@ pub fn exec_nocheck (mut cmd: Command) -> Output {
println!("---err---"); println!("---err---");
println!("{}", String::from_utf8_lossy(&output.stderr)); println!("{}", String::from_utf8_lossy(&output.stderr));
if output.status.signal().is_some() {
println!("terminated by a signal: {}", output.status.signal().unwrap());
}
output output
} }
......
...@@ -562,5 +562,452 @@ pub fn hybrid_fix_part_insts() -> VM { ...@@ -562,5 +562,452 @@ pub fn hybrid_fix_part_insts() -> VM {
vm.define_func_version(func_ver); vm.define_func_version(func_ver);
vm
}
#[test]
fn test_hybrid_var_part() {
VM::start_logging_trace();
let vm = Arc::new(hybrid_var_part_insts());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("hybrid_var_part_insts");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
let executable = aot::link_primordial(vec!["hybrid_var_part_insts".to_string()], "hybrid_var_part_insts_test");
let output = aot::execute_nocheck(executable);
assert!(output.status.code().is_some());
let ret_code = output.status.code().unwrap();
println!("return code: {}", ret_code);
assert!(ret_code == 20);
}
pub fn hybrid_var_part_insts() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(int64.as_entity(), Mu("int64"));
// .typedef @my_hybrid = hybrid<@int64 @int64 | @int64>
let my_hybrid = vm.declare_type(vm.next_id(), MuType_::hybrid("MyHybrid".to_string(), vec![int64.clone(), int64.clone()], int64.clone()));
vm.set_name(my_hybrid.as_entity(), Mu("my_hybrid"));
// .typedef @ref_hybrid = ref<@my_hybrid>
let ref_hybrid = vm.declare_type(vm.next_id(), MuType_::muref(my_hybrid.clone()));
vm.set_name(ref_hybrid.as_entity(), Mu("ref_hybrid"));
// .typedef @iref_hybrid = iref<@my_hybrid>
let iref_hybrid = vm.declare_type(vm.next_id(), MuType_::iref(my_hybrid.clone()));
vm.set_name(iref_hybrid.as_entity(), Mu("iref_hybrid"));
// .typedef @iref_int64 = iref<@int64>
let iref_int64 = vm.declare_type(vm.next_id(), MuType_::iref(int64.clone()));
vm.set_name(iref_int64.as_entity(), Mu("iref_int64"));
// .const @int64_0 <@int64> = 0
let int64_0 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(0));
vm.set_name(int64_0.as_entity(), Mu("int64_0"));
// .const @int64_1 <@int64> = 1
let int64_1 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(1));
vm.set_name(int64_1.as_entity(), Mu("int64_1"));
// .const @int64_2 <@int64> = 2
let int64_2 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(2));
vm.set_name(int64_2.as_entity(), Mu("int64_2"));
// .const @int64_3 <@int64> = 3
let int64_3 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(3));
vm.set_name(int64_3.as_entity(), Mu("int64_3"));
// .const @int64_4 <@int64> = 4
let int64_4 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(4));
vm.set_name(int64_4.as_entity(), Mu("int64_4"));
// .const @int64_10 <@int64> = 10
let int64_10 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(10));
vm.set_name(int64_10.as_entity(), Mu("int64_10"));
// .funcsig @noparam_noret_sig = () -> ()
let noparam_noret_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![]);
vm.set_name(noparam_noret_sig.as_entity(), Mu("noparam_noret_sig"));