Commit d7f104ea authored by qinsoon's avatar qinsoon

make allocation test as primordial, seems working

cannot get it work by loading it dynamically. Rust mangles mu runtime
functions in test executable (even if I stated #[no_mangle]), so the
dynamic library cannot call mu runtime functions (symbols not found)
parent 43eb1d7a
......@@ -459,6 +459,13 @@ pub struct Value {
}
impl Value {
pub fn is_mem(&self) -> bool {
match self.v {
Value_::Memory(_) => true,
_ => false
}
}
pub fn is_int_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
......
......@@ -425,7 +425,7 @@ impl ASMCodeGen {
let mut locs : Vec<ASMLocation> = vec![];
let mut result_str : String = "".to_string();
let mut loc_cursor : usize = 0;
let mut loc_cursor : usize = loc;
match op.v {
// offset(base,index,scale)
......@@ -755,7 +755,7 @@ impl CodeGenerator for ASMCodeGen {
);
}
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32) {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32) {
trace!("emit: cmp {} {}", op1, op2);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
......@@ -794,7 +794,7 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: mov {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -854,7 +854,7 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: mov {} -> {}", src, dest);
let (mem, id, loc) = self.prepare_mem(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -925,7 +925,7 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: and {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -965,7 +965,7 @@ impl CodeGenerator for ASMCodeGen {
unimplemented!()
}
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -1005,7 +1005,7 @@ impl CodeGenerator for ASMCodeGen {
unimplemented!()
}
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......
......@@ -15,27 +15,27 @@ pub trait CodeGenerator {
fn end_block(&mut self, block_name: MuName);
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mul_r64(&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
......
......@@ -426,7 +426,12 @@ impl <'a> InstructionSelection {
let ref op = ops[op_index];
let res_tmp = self.emit_get_result(node);
self.emit_lea_base_offset(&res_tmp, &op.clone_value(), 0, vm);
let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
if hdr_size == 0 {
self.emit_general_move(&op, &res_tmp, f_content, f_context, vm);
} else {
self.emit_lea_base_offset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
}
}
Instruction_::ThreadExit => {
......@@ -434,7 +439,7 @@ impl <'a> InstructionSelection {
// get thread local and add offset to get sp_loc
let tl = self.emit_get_threadlocal(f_content, f_context, vm);
self.backend.emit_add_r64_imm32(&tl, *thread::NATIVE_SP_LOC_OFFSET as u32);
self.backend.emit_add_r64_imm32(&tl, *thread::NATIVE_SP_LOC_OFFSET as i32);
self.emit_runtime_entry(&entrypoints::SWAP_BACK_TO_NATIVE_STACK, vec![tl.clone()], None, f_content, f_context, vm);
}
......@@ -456,35 +461,35 @@ impl <'a> InstructionSelection {
// ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
let tmp_cursor = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as u32, vm);
self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
// alignup cursor (cursor + align - 1 & !(align - 1))
// ASM: lea align-1(%cursor) -> %start
let align = ty_info.alignment;
let align = ty_info.alignment as i32;
let tmp_start = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_start, &tmp_tl, (align - 1) as u32, vm);
self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
// ASM: and %start, !(align-1) -> %start
self.backend.emit_and_r64_imm32(&tmp_start, !(align-1) as u32);
self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1));
// bump cursor
// ASM: lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as u32, vm);
self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
// check with limit
// ASM: cmp %end, [%tl + allocator_offset + limit_offset]
let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as u32, runtime::ADDRESS_TYPE.clone(), vm);
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, runtime::ADDRESS_TYPE.clone(), vm);
self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
// branch to slow path if end > limit
// ASM: jg alloc_slow
// ASM: jl alloc_slow
let slowpath = format!("{}_allocslow", node.id());
self.backend.emit_jg(slowpath.clone());
self.backend.emit_jl(slowpath.clone());
// update cursor
// ASM: mov %end -> [%tl + allocator_offset + limit_offset]
self.emit_store_base_offset(&tmp_tl, limit_offset as u32, &tmp_end, vm);
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
// put start as result
// ASM: mov %start -> %result
......@@ -504,19 +509,26 @@ impl <'a> InstructionSelection {
self.current_block = Some(slowpath.clone());
self.backend.start_block(slowpath.clone());
self.backend.set_block_livein(slowpath.clone(), &vec![]);
// arg1: allocator address
let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// arg2: size
let const_size = self.make_value_int_const(ty_size as u64, vm);
// arg3: align
let const_align= self.make_value_int_const(ty_align as u64, vm);
let rets = self.emit_runtime_entry(
&entrypoints::ALLOC_SLOW,
vec![const_size, const_align],
vec![tmp_allocator, const_size, const_align],
Some(vec![
tmp_res.clone()
]),
f_content, f_context, vm
);
// end block (no liveout)
// end block (no liveout other than result)
self.backend.end_block(slowpath.clone());
self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
......@@ -540,7 +552,7 @@ impl <'a> InstructionSelection {
f_context.make_temporary(vm.next_id(), ty).clone_value()
}
fn make_memory_op_base_offset (&mut self, base: &P<Value>, offset: u32, ty: P<MuType>, vm: &VM) -> P<Value> {
fn make_memory_op_base_offset (&mut self, base: &P<Value>, offset: i32, ty: P<MuType>, vm: &VM) -> P<Value> {
P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty.clone(),
......@@ -561,19 +573,19 @@ impl <'a> InstructionSelection {
})
}
fn emit_load_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: u32, vm: &VM) {
fn emit_load_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, dest.ty.clone(), vm);
self.backend.emit_mov_r64_mem64(dest, &mem);
}
fn emit_store_base_offset (&mut self, base: &P<Value>, offset: u32, src: &P<Value>, vm: &VM) {
fn emit_store_base_offset (&mut self, base: &P<Value>, offset: i32, src: &P<Value>, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, src.ty.clone(), vm);
self.backend.emit_mov_mem64_r64(&mem, src);
}
fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: u32, vm: &VM) {
fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, runtime::ADDRESS_TYPE.clone(), vm);
self.backend.emit_lea_r64(dest, &mem);
......@@ -628,7 +640,7 @@ impl <'a> InstructionSelection {
}
} else if arg.is_int_const() {
if x86_64::is_valid_x86_imm(arg) {
let int_const = arg.extract_int_const() as u32;
let int_const = arg.extract_int_const() as i32;
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], int_const);
......@@ -641,6 +653,14 @@ impl <'a> InstructionSelection {
// put the constant to memory
unimplemented!()
}
} else if arg.is_mem() {
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
self.backend.emit_mov_r64_mem64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
gpr_arg_count += 1;
} else {
// use stack to pass argument
unimplemented!()
}
} else {
// floating point
unimplemented!()
......@@ -916,12 +936,12 @@ impl <'a> InstructionSelection {
}
}
fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> u32 {
fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> i32 {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::Constant(Constant::Int(val)) => {
val as u32
val as i32
},
_ => panic!("expected iimm")
}
......@@ -964,7 +984,25 @@ impl <'a> InstructionSelection {
Value_::Constant(_) => unimplemented!()
}
}
TreeNode_::Instruction(_) => unimplemented!()
TreeNode_::Instruction(_) => self.emit_get_mem_from_inst(op, vm)
}
}
fn emit_get_mem_from_inst(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Instruction(ref inst) => {
let ref ops = inst.ops.read().unwrap();
match inst.v {
Instruction_::GetIRef(op_index) => {
let ref op = ops[op_index];
self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, runtime::ADDRESS_TYPE.clone(), vm)
}
_ => unimplemented!()
}
},
_ => panic!("expecting a instruction that yields a memory address")
}
}
......
......@@ -260,8 +260,66 @@ pub fn is_machine_reg(reg: MuID) -> bool {
}
}
fn build_live_set(cf: &CompiledFunction, func: &MuFunctionVersion) {
#[allow(unused_variables)]
fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
let n_insts = cf.mc.number_of_insts();
let mut livein : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut liveout : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut is_changed = true;
while is_changed {
// reset
is_changed = false;
for n in 0..n_insts {
let in_set_old = livein[n].to_vec(); // copy to new vec
let out_set_old = liveout[n].to_vec();
// in[n] <- use[n] + (out[n] - def[n])
// (1) in[n] = use[n]
let mut in_set_new = vec![];
in_set_new.extend_from_slice(&cf.mc.get_inst_reg_uses(n));
// (2) diff = out[n] - def[n]
let mut diff = liveout[n].to_vec();
for def in cf.mc.get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def);
}
// (3) in[n] = in[n] + diff
vec_utils::append_unique(&mut in_set_new, &mut diff);
// update livein[n]
livein[n].clear();
livein[n].extend_from_slice(&in_set_new);
// out[n] <- union(in[s] for every successor s of n)
let mut union = vec![];
for s in cf.mc.get_succs(n) {
vec_utils::append_clone_unique(&mut union, &livein[*s]);
}
// update liveout[n]
liveout[n].clear();
liveout[n].extend_from_slice(&union);
let n_changed = !vec_utils::is_identical_ignore_order(&livein[n], &in_set_old)
|| !vec_utils::is_identical_ignore_order(&liveout[n], &out_set_old);
is_changed = is_changed || n_changed;
}
}
for block in cf.mc.get_all_blocks().to_vec() {
if cf.mc.get_ir_block_livein(&block).is_none() {
let start_inst = cf.mc.get_block_range(&block).unwrap().start;
cf.mc.set_ir_block_livein(&block, livein[start_inst].to_vec());
}
if cf.mc.get_ir_block_liveout(&block).is_none() {
let end_inst = cf.mc.get_block_range(&block).unwrap().end;
cf.mc.set_ir_block_liveout(&block, liveout[end_inst].to_vec());
}
}
}
// from Tailoring Graph-coloring Register Allocation For Runtime Compilation, Figure 4
......
......@@ -5,6 +5,8 @@ use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
pub const OBJECT_HEADER_SIZE : usize = 0;
pub fn init() {
MARK_STATE.store(1, atomic::Ordering::SeqCst);
}
......
......@@ -2,16 +2,17 @@
#include <stdlib.h>
#include <stdio.h>
#include <dlfcn.h>
#include <pthread.h>
__thread void* mu_tls;
void set_thread_local(void* thread) {
printf("setting mu_tls to %p\n", thread);
printf("Thread%p: setting mu_tls to %p\n", pthread_self(), thread);
mu_tls = thread;
}
void* get_thread_local() {
printf("getting mu_tls as %p\n", mu_tls);
printf("Thread%p: getting mu_tls as %p\n", pthread_self(), mu_tls);
return mu_tls;
}
......
......@@ -12,6 +12,25 @@
a == b
}
pub fn is_identical_ignore_order<T: Ord + Clone> (vec: &Vec<T>, vec2: &Vec<T>) -> bool {
if vec.len() != vec2.len() {
return false;
}
let mut vec = vec.to_vec();
let mut vec2 = vec2.to_vec();
vec.sort();
vec2.sort();
for i in 0..vec.len() {
if vec[i] != vec2[i] {
return false;
}
}
return true;
}
pub fn as_str<T: fmt::Display>(vec: &Vec<T>) -> String {
let mut ret = String::new();
for i in 0..vec.len() {
......@@ -48,6 +67,12 @@
add_unique(vec, val);
}
}
pub fn append_clone_unique<T: PartialEq + Clone> (vec: &mut Vec<T>, vec2: &Vec<T>) {
for ele in vec2 {
add_unique(vec, ele.clone());
}
}
pub fn find_value<T: PartialEq> (vec: &Vec<T>, val: T) -> Option<usize> {
for i in 0..vec.len() {
......
......@@ -37,6 +37,8 @@ mod aot {
use std::process::Command;
use std::process::Output;
const CC : &'static str = "clang";
fn exec (mut cmd: Command) -> Output {
println!("executing: {:?}", cmd);
let output = cmd.output().expect("failed to execute");
......@@ -50,7 +52,7 @@ mod aot {
}
fn link_executable_internal (files: Vec<PathBuf>, out: PathBuf) -> PathBuf {
let mut gcc = Command::new("gcc");
let mut gcc = Command::new(CC);
for file in files {
println!("link with {:?}", file.as_path());
......@@ -70,7 +72,7 @@ mod aot {
let mut object_files : Vec<PathBuf> = vec![];
for file in files {
let mut gcc = Command::new("gcc");
let mut gcc = Command::new(CC);
gcc.arg("-c");
gcc.arg("-fpic");
......@@ -86,8 +88,11 @@ mod aot {
exec(gcc);
}
let mut gcc = Command::new("gcc");
let mut gcc = Command::new(CC);
gcc.arg("-shared");
gcc.arg("-Wl");
gcc.arg("-undefined");
gcc.arg("dynamic_lookup");
for obj in object_files {
gcc.arg(obj.as_os_str());
}
......
......@@ -36,30 +36,21 @@ fn test_instruction_new() {
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
let dylib = aot::link_dylib(vec![Mu("fac")], "liballoc_new.dylib");
let lib = libloading::Library::new(dylib.as_os_str()).unwrap();
unsafe {
let alloc_new : libloading::Symbol<unsafe extern fn() -> u64> = lib.get(b"alloc_new").unwrap();
// before invoking it, we need to disguise current thread as mu thread (having an allocator)
fake_mutator_for_cur_thread(&vm);
let ret = alloc_new();
println!("return value from alloc_new is {}", ret);
assert!(ret == 1);
}
let executable = aot::link_primordial(vec!["alloc_new".to_string()], "alloc_new_test");
aot::execute(executable);
}
#[allow(dead_code)]
fn fake_mutator_for_cur_thread(vm: &VM) {
// init gc
const IMMIX_SPACE_SIZE : ByteSize = 500 << 20;
const LO_SPACE_SIZE : ByteSize = 500 << 20;
mm::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = mm::new_mutator();
let mutator = mm::new_mutator();
let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::fake_thread(vm.next_id(), mutator)));
......@@ -87,7 +78,7 @@ pub fn alloc_new() -> VM {
vm.set_name(const_def_int64_1.as_entity(), "int64_1".to_string());
// .funcsig @alloc_new_sig = () -> (@int64)
let func_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![type_def_int64.clone()]);
let func_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_int64.clone()], vec![]);
vm.set_name(func_sig.as_entity(), "alloc_new_sig".to_string());
// .funcdecl @alloc_new <@alloc_new_sig>
......@@ -121,11 +112,11 @@ pub fn alloc_new() -> VM {
v: Instruction_::GetIRef(0)
});
// STORE <@int_64> @a @int_64_1
// STORE <@int_64> @a_iref @int_64_1
let blk_0_const_int64_1 = func_ver.new_constant(vm.next_id(), const_def_int64_1.clone());
let blk_0_inst2 = func_ver.new_inst(vm.next_id(), Instruction{
value: None,
ops: RwLock::new(vec![blk_0_a.clone(), blk_0_const_int64_1.clone()]),
ops: RwLock::new(vec![blk_0_a_iref.clone(), blk_0_const_int64_1.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
......@@ -134,23 +125,23 @@ pub fn alloc_new() -> VM {
}
});
// %x = LOAD <@int_64> @a_iref
let blk_0_x = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_0_x.as_entity(), "blk_0_x".to_string());
let blk_0_inst3 = func_ver.new_inst(vm.next_id(), Instruction{
value: Some(vec![blk_0_x.clone_value()]),
ops: RwLock::new(vec![blk_0_a_iref.clone()]),
v: Instruction_::Load{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0
}
});
let blk_0_term = func_ver.new_inst(vm.next_id(), Instruction{
// // %x = LOAD <@int_64> @a_iref
// let blk_0_x = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
// vm.set_name(blk_0_x.as_entity(), "blk_0_x".to_string());
// let blk_0_inst3 = func_ver.new_inst(vm.next_id(), Instruction{
// value: Some(vec![blk_0_x.clone_value()]),
// ops: RwLock::new(vec![blk_0_a_iref.clone()]),
// v: Instruction_::Load{
// is_ptr: false,
// order: MemoryOrder::Relaxed,
// mem_loc: 0
// }
// });
let blk_0_term = func_ver.new_inst(vm.next_id(), Instruction {
value: None,
ops: RwLock::new(vec![blk_0_x.clone()]),
v: Instruction_::Return(vec![0])
ops: RwLock::new(vec![]),
v: Instruction_::ThreadExit
});
let blk_0_content = BlockContent {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment