GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit 8f2ff707 authored by John Zhang's avatar John Zhang

Merge branch 'master' into jit-test

parents 4bfc9b22 d9758e2c
#!/bin/sh
RUST_BACKTRACE=1 RUST_TEST_THREADS=1 cargo test "$@"
RUSTFLAGS=-Zincremental=target/incr-cache RUST_BACKTRACE=1 RUST_TEST_THREADS=1 cargo test "$@"
......@@ -1375,36 +1375,79 @@ impl CodeGenerator for ASMCodeGen {
false
)
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("addq {},{}", reg1, reg2);
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 5 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 5 + 1 + reg1.len() + 1);
let asm = format!("movsd {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
id2 => vec![loc2]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
id1 => vec![loc1]
},
false
)
}
// load
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (mem, uses) = self.prepare_mem(src, 5 + 1);
let (reg, id2, loc2) = self.prepare_reg(dest, 5 + 1 + mem.len() + 1);
let asm = format!("movsd {},{}", mem, reg);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2]
},
uses,
true
)
}
// store
fn emit_movsd_mem64_f64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (reg, id1, loc1) = self.prepare_reg(src, 5 + 1);
let (mem, mut uses) = self.prepare_mem(dest, 5 + 1 + reg.len() + 1);
// the register we used for the memory location is counted as 'use'
// use the vec from mem as 'use' (push use reg from src to it)
if uses.contains_key(&id1) {
uses.get_mut(&id1).unwrap().push(loc1);
} else {
uses.insert(id1, vec![loc1]);
}
let asm = format!("movsd {},{}", reg, mem);
self.add_asm_inst(
asm,
hashmap!{},
uses,
true
)
}
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: lea {} -> {}", src, dest);
let (mem, uses) = self.prepare_mem(src, 4 + 1);
let (reg, id2, loc2) = self.prepare_reg(dest, 4 + 1 + mem.len() + 1);
let asm = format!("leaq {},{}", mem, reg);
self.add_asm_inst(
asm,
hashmap!{
......@@ -1412,16 +1455,16 @@ impl CodeGenerator for ASMCodeGen {
},
uses,
true
)
)
}
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: and {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("andq ${},{}", src, reg1);
self.add_asm_inst(
asm,
hashmap!{
......@@ -1433,14 +1476,35 @@ impl CodeGenerator for ASMCodeGen {
false
)
}
fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: and {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("andq {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
},
false
)
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("addq {},{}", reg1, reg2);
self.add_asm_inst(
asm,
......@@ -1453,7 +1517,7 @@ impl CodeGenerator for ASMCodeGen {
},
false
)
}
}
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
......@@ -1478,6 +1542,32 @@ impl CodeGenerator for ASMCodeGen {
false
)
}
fn emit_addsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: addsd {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 5 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 5 + 1 + reg1.len() + 1);
let asm = format!("addsd {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
},
false
)
}
fn emit_addsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: addsd {}, {} -> {}", dest, src, dest);
unimplemented!()
}
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
......
......@@ -26,26 +26,33 @@ pub trait CodeGenerator {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_r64_imm32 (&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64 (&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64 (&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_movsd_mem64_f64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_addsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_addsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mul_r64(&mut self, src: &P<Value>);
fn emit_mul_r64 (&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
fn emit_jmp(&mut self, dest: MuName);
......
......@@ -171,7 +171,7 @@ impl <'a> InstructionSelection {
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
......@@ -182,7 +182,7 @@ impl <'a> InstructionSelection {
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
......@@ -196,7 +196,7 @@ impl <'a> InstructionSelection {
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
......@@ -215,7 +215,7 @@ impl <'a> InstructionSelection {
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
......@@ -226,7 +226,7 @@ impl <'a> InstructionSelection {
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
......@@ -240,7 +240,7 @@ impl <'a> InstructionSelection {
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
......@@ -284,7 +284,7 @@ impl <'a> InstructionSelection {
// put imm in a temporary
// here we use result reg as temporary
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
self.backend.emit_mul_r64(&res_tmp);
......@@ -297,9 +297,41 @@ impl <'a> InstructionSelection {
}
// mov rax -> result
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &rax);
},
// floating point
op::BinOp::FAdd => {
if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
trace!("emit add-fpreg-fpreg");
let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
let res_tmp = self.get_result_value(node);
// movsd op1, res
self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_addsd_f64_f64(&res_tmp, &reg_op2);
} else if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit add-fpreg-mem");
let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.get_result_value(node);
// mov op1, res
self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
// sub op2 res
self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2);
} else if self.match_mem(&ops[op1]) && self.match_fpreg(&ops[op2]) {
trace!("emit add-mem-fpreg");
unimplemented!();
} else {
unimplemented!()
}
}
_ => unimplemented!()
}
......@@ -321,7 +353,7 @@ impl <'a> InstructionSelection {
}
let resolved_loc = self.node_mem_to_value(loc_op, vm);
let res_temp = self.emit_get_result(node);
let res_temp = self.get_result_value(node);
if self.match_ireg(node) {
// emit mov(GPR)
......@@ -371,7 +403,7 @@ impl <'a> InstructionSelection {
let ops = inst.ops.read().unwrap();
let ref op = ops[op_index];
let res_tmp = self.emit_get_result(node);
let res_tmp = self.get_result_value(node);
let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
if hdr_size == 0 {
......@@ -440,7 +472,7 @@ impl <'a> InstructionSelection {
// put start as result
// ASM: mov %start -> %result
let tmp_res = self.emit_get_result(node);
let tmp_res = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_start);
// ASM jmp alloc_end
......@@ -533,8 +565,14 @@ impl <'a> InstructionSelection {
fn emit_load_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, dest.ty.clone(), vm);
self.backend.emit_mov_r64_mem64(dest, &mem);
if dest.is_int_reg() {
self.backend.emit_mov_r64_mem64(dest, &mem);
} else if dest.is_fp_reg() {
self.backend.emit_movsd_f64_mem64(dest, &mem);
} else {
unimplemented!();
}
}
fn emit_store_base_offset (&mut self, base: &P<Value>, offset: i32, src: &P<Value>, vm: &VM) {
......@@ -929,7 +967,7 @@ impl <'a> InstructionSelection {
// unload arguments
let mut gpr_arg_count = 0;
// TODO: let mut fpr_arg_count = 0;
let mut fpr_arg_count = 0;
// initial stack arg is at RBP+16
// arg <- RBP + 16
// return addr
......@@ -949,7 +987,17 @@ impl <'a> InstructionSelection {
stack_arg_offset += arg_size as i32;
}
} else if arg.is_fp_reg() {
unimplemented!();
if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() {
self.backend.emit_movsd_f64_f64(&arg, &x86_64::ARGUMENT_FPRs[fpr_arg_count]);
fpr_arg_count += 1;
} else {
// unload from stack
self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
// move stack_arg_offset by the size of 'arg'
let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
stack_arg_offset += arg_size as i32;
}
} else {
// args that are not fp or int (possibly struct/array/etc)
unimplemented!();
......@@ -971,7 +1019,7 @@ impl <'a> InstructionSelection {
};
let mut gpr_ret_count = 0;
// TODO: let mut fpr_ret_count = 0;
let mut fpr_ret_count = 0;
for i in ret_val_indices {
let ref ret_val = ops[*i];
if self.match_ireg(ret_val) {
......@@ -984,6 +1032,11 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
gpr_ret_count += 1;
} else if self.match_fpreg(ret_val) {
let reg_ret_val = self.emit_fpreg(ret_val, f_content, f_context, vm);
self.backend.emit_movsd_f64_f64(&x86_64::RETURN_FPRs[fpr_ret_count], &reg_ret_val);
fpr_ret_count += 1;
} else {
unimplemented!();
}
......@@ -1061,7 +1114,7 @@ impl <'a> InstructionSelection {
let ref value = inst.value.as_ref().unwrap()[0];
if types::is_scalar(&value.ty) {
if value.is_int_reg() {
true
} else {
false
......@@ -1076,30 +1129,63 @@ impl <'a> InstructionSelection {
}
}
}
fn match_fpreg(&mut self, op: &TreeNode) -> bool {
match op.v {
TreeNode_::Instruction(ref inst) => {
if inst.value.is_some() {
if inst.value.as_ref().unwrap().len() > 1 {
return false;
}
let ref value = inst.value.as_ref().unwrap()[0];
if value.is_fp_reg() {
true
} else {
false
}
} else {
false
}
}
TreeNode_::Value(ref pv) => {
pv.is_fp_reg()
}
}
}
fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Instruction(_) => {
self.instruction_select(op, f_content, f_context, vm);
self.emit_get_result(op)
self.get_result_value(op)
},
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::Constant(_)
| Value_::Global(_)
| Value_::Memory(_) => panic!("expected ireg"),
Value_::SSAVar(_) => {
pv.clone()
},
Value_::SSAVar(_) => pv.clone(),
_ => panic!("expected ireg")
}
}
}
}
#[allow(unused_variables)]
fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
unimplemented!()
fn emit_fpreg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Instruction(_) => {
self.instruction_select(op, f_content, f_context, vm);
self.get_result_value(op)
},
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::SSAVar(_) => pv.clone(),
_ => panic!("expected fpreg")
}
}
}
}
fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
......@@ -1224,7 +1310,7 @@ impl <'a> InstructionSelection {
unimplemented!()
}
fn emit_get_result(&mut self, node: &TreeNode) -> P<Value> {
fn get_result_value(&mut self, node: &TreeNode) -> P<Value> {
match node.v {
TreeNode_::Instruction(ref inst) => {
if inst.value.is_some() {
......
......@@ -143,7 +143,9 @@ lazy_static!{
XMM1.clone()
];
pub static ref ARGUMENT_FPRs : [P<Value>; 6] = [
pub static ref ARGUMENT_FPRs : [P<Value>; 8] = [
XMM0.clone(),
XMM1.clone(),
XMM2.clone(),
XMM3.clone(),
XMM4.clone(),
......
......@@ -2,6 +2,8 @@ extern crate mu;
#[macro_use]
extern crate log;
extern crate simple_logger;
#[macro_use]
extern crate maplit;
mod test_ir;
mod test_compiler;
......
......@@ -5,4 +5,5 @@ mod test_global;
mod test_compiler;
mod test_alloc;
mod test_exception;
mod test_thread;
\ No newline at end of file
mod test_thread;
mod test_floatingpoint;
\ No newline at end of file
extern crate mu;
extern crate log;
extern crate simple_logger;
extern crate libloading;
use self::mu::ast::types::*;
use self::mu::ast::ir::*;
use self::mu::ast::inst::*;
use self::mu::ast::op::*;
use self::mu::vm::*;
use self::mu::compiler::*;
use std::sync::RwLock;
use std::sync::Arc;
use aot;
#[test]
fn test_fp_add() {
simple_logger::init_with_level(log::LogLevel::Trace).ok();
let vm = Arc::new(fp_add());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("fp_add");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
backend::emit_context(&vm);
let dylib = aot::link_dylib(vec![Mu("fp_add")], "libfp_add.dylib");
let lib = libloading::Library::new(dylib.as_os_str()).unwrap();
unsafe {
let fp_add : libloading::Symbol<unsafe extern fn(f64, f64) -> f64> = lib.get(b"fp_add").unwrap();
let fp_add_1_1 = fp_add(1f64, 1f64);
println!("fp_add(1, 1) = {}", fp_add_1_1);
assert!(fp_add_1_1 == 2f64);
}
}
fn fp_add() -> VM {
let vm = VM::new();
// .typedef @double = double
let type_def_double = vm.declare_type(vm.next_id(), MuType_::double());
vm.set_name(type_def_double.as_entity(), Mu("double"));
// .funcsig @fp_add_sig = (@double @double) -> (@double)
let fp_add_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_double.clone()], vec![type_def_double.clone(), type_def_double.clone()]);
vm.set_name(fp_add_sig.as_entity(), Mu("fp_add_sig"));
// .funcdecl @fp_add <@fp_add_sig>
let func_id = vm.next_id();
let func = MuFunction::new(func_id, fp_add_sig.clone());
vm.set_name(func.as_entity(), Mu("fp_add"));
vm.declare_func(func);
// .funcdef @fp_add VERSION @fp_add_v1 <@fp_add_sig>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, fp_add_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("fp_add_v1"));
// %entry(<@double> %a, <@double> %b):
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
let blk_entry_a = func_ver.new_ssa(vm.next_id(), type_def_double.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let blk_entry_b = func_ver.new_ssa(vm.next_id(), type_def_double.clone());
vm.set_name(blk_entry_b.as_entity(), Mu("blk_entry_b"));
// %r = FADD %a %b
let blk_entry_r = func_ver.new_ssa(vm.next_id(), type_def_double.clone());
vm.set_name(blk_entry_r.as_entity(), Mu("blk_entry_r"));
let blk_entry_add = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_r.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone(), blk_entry_b.clone()]),
v: Instruction_::BinOp(BinOp::FAdd, 0, 1)
});
// RET %r
let blk_entry_term = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_r.clone()]),
v: Instruction_::Return(vec![0])
});
blk_entry.content = Some(BlockContent{
args: vec![blk_entry_a.clone_value(), blk_entry_b.clone_value()],
exn_arg: None,
body: vec![blk_entry_add, blk_entry_term],
keepalives: None
});
func_ver.define(FunctionContent{
entry: blk_entry.id(),
blocks: hashmap!{
blk_entry.id() => blk_entry
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment