Commit d7f104ea authored by qinsoon's avatar qinsoon

make allocation test as primordial, seems working

cannot get it work by loading it dynamically. Rust mangles mu runtime
functions in test executable (even if I stated #[no_mangle]), so the
dynamic library cannot call mu runtime functions (symbols not found)
parent 43eb1d7a
......@@ -459,6 +459,13 @@ pub struct Value {
}
impl Value {
pub fn is_mem(&self) -> bool {
match self.v {
Value_::Memory(_) => true,
_ => false
}
}
pub fn is_int_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
......
......@@ -425,7 +425,7 @@ impl ASMCodeGen {
let mut locs : Vec<ASMLocation> = vec![];
let mut result_str : String = "".to_string();
let mut loc_cursor : usize = 0;
let mut loc_cursor : usize = loc;
match op.v {
// offset(base,index,scale)
......@@ -755,7 +755,7 @@ impl CodeGenerator for ASMCodeGen {
);
}
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32) {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32) {
trace!("emit: cmp {} {}", op1, op2);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
......@@ -794,7 +794,7 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: mov {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -854,7 +854,7 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: mov {} -> {}", src, dest);
let (mem, id, loc) = self.prepare_mem(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -925,7 +925,7 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: and {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -965,7 +965,7 @@ impl CodeGenerator for ASMCodeGen {
unimplemented!()
}
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......@@ -1005,7 +1005,7 @@ impl CodeGenerator for ASMCodeGen {
unimplemented!()
}
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
......
......@@ -15,27 +15,27 @@ pub trait CodeGenerator {
fn end_block(&mut self, block_name: MuName);
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mul_r64(&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
......
......@@ -260,8 +260,66 @@ pub fn is_machine_reg(reg: MuID) -> bool {
}
}
fn build_live_set(cf: &CompiledFunction, func: &MuFunctionVersion) {
#[allow(unused_variables)]
fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
let n_insts = cf.mc.number_of_insts();
let mut livein : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut liveout : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut is_changed = true;
while is_changed {
// reset
is_changed = false;
for n in 0..n_insts {
let in_set_old = livein[n].to_vec(); // copy to new vec
let out_set_old = liveout[n].to_vec();
// in[n] <- use[n] + (out[n] - def[n])
// (1) in[n] = use[n]
let mut in_set_new = vec![];
in_set_new.extend_from_slice(&cf.mc.get_inst_reg_uses(n));
// (2) diff = out[n] - def[n]
let mut diff = liveout[n].to_vec();
for def in cf.mc.get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def);
}
// (3) in[n] = in[n] + diff
vec_utils::append_unique(&mut in_set_new, &mut diff);
// update livein[n]
livein[n].clear();
livein[n].extend_from_slice(&in_set_new);
// out[n] <- union(in[s] for every successor s of n)
let mut union = vec![];
for s in cf.mc.get_succs(n) {
vec_utils::append_clone_unique(&mut union, &livein[*s]);
}
// update liveout[n]
liveout[n].clear();
liveout[n].extend_from_slice(&union);
let n_changed = !vec_utils::is_identical_ignore_order(&livein[n], &in_set_old)
|| !vec_utils::is_identical_ignore_order(&liveout[n], &out_set_old);
is_changed = is_changed || n_changed;
}
}
for block in cf.mc.get_all_blocks().to_vec() {
if cf.mc.get_ir_block_livein(&block).is_none() {
let start_inst = cf.mc.get_block_range(&block).unwrap().start;
cf.mc.set_ir_block_livein(&block, livein[start_inst].to_vec());
}
if cf.mc.get_ir_block_liveout(&block).is_none() {
let end_inst = cf.mc.get_block_range(&block).unwrap().end;
cf.mc.set_ir_block_liveout(&block, liveout[end_inst].to_vec());
}
}
}
// from Tailoring Graph-coloring Register Allocation For Runtime Compilation, Figure 4
......
......@@ -5,6 +5,8 @@ use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
pub const OBJECT_HEADER_SIZE : usize = 0;
pub fn init() {
MARK_STATE.store(1, atomic::Ordering::SeqCst);
}
......
......@@ -2,16 +2,17 @@
#include <stdlib.h>
#include <stdio.h>
#include <dlfcn.h>
#include <pthread.h>
__thread void* mu_tls;
void set_thread_local(void* thread) {
printf("setting mu_tls to %p\n", thread);
printf("Thread%p: setting mu_tls to %p\n", pthread_self(), thread);
mu_tls = thread;
}
void* get_thread_local() {
printf("getting mu_tls as %p\n", mu_tls);
printf("Thread%p: getting mu_tls as %p\n", pthread_self(), mu_tls);
return mu_tls;
}
......
......@@ -12,6 +12,25 @@
a == b
}
pub fn is_identical_ignore_order<T: Ord + Clone> (vec: &Vec<T>, vec2: &Vec<T>) -> bool {
if vec.len() != vec2.len() {
return false;
}
let mut vec = vec.to_vec();
let mut vec2 = vec2.to_vec();
vec.sort();
vec2.sort();
for i in 0..vec.len() {
if vec[i] != vec2[i] {
return false;
}
}
return true;
}
pub fn as_str<T: fmt::Display>(vec: &Vec<T>) -> String {
let mut ret = String::new();
for i in 0..vec.len() {
......@@ -48,6 +67,12 @@
add_unique(vec, val);
}
}
pub fn append_clone_unique<T: PartialEq + Clone> (vec: &mut Vec<T>, vec2: &Vec<T>) {
for ele in vec2 {
add_unique(vec, ele.clone());
}
}
pub fn find_value<T: PartialEq> (vec: &Vec<T>, val: T) -> Option<usize> {
for i in 0..vec.len() {
......
......@@ -37,6 +37,8 @@ mod aot {
use std::process::Command;
use std::process::Output;
const CC : &'static str = "clang";
fn exec (mut cmd: Command) -> Output {
println!("executing: {:?}", cmd);
let output = cmd.output().expect("failed to execute");
......@@ -50,7 +52,7 @@ mod aot {
}
fn link_executable_internal (files: Vec<PathBuf>, out: PathBuf) -> PathBuf {
let mut gcc = Command::new("gcc");
let mut gcc = Command::new(CC);
for file in files {
println!("link with {:?}", file.as_path());
......@@ -70,7 +72,7 @@ mod aot {
let mut object_files : Vec<PathBuf> = vec![];
for file in files {
let mut gcc = Command::new("gcc");
let mut gcc = Command::new(CC);
gcc.arg("-c");
gcc.arg("-fpic");
......@@ -86,8 +88,11 @@ mod aot {
exec(gcc);
}
let mut gcc = Command::new("gcc");
let mut gcc = Command::new(CC);
gcc.arg("-shared");
gcc.arg("-Wl");
gcc.arg("-undefined");
gcc.arg("dynamic_lookup");
for obj in object_files {
gcc.arg(obj.as_os_str());
}
......
......@@ -36,30 +36,21 @@ fn test_instruction_new() {
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
let dylib = aot::link_dylib(vec![Mu("fac")], "liballoc_new.dylib");
let lib = libloading::Library::new(dylib.as_os_str()).unwrap();
unsafe {
let alloc_new : libloading::Symbol<unsafe extern fn() -> u64> = lib.get(b"alloc_new").unwrap();
// before invoking it, we need to disguise current thread as mu thread (having an allocator)
fake_mutator_for_cur_thread(&vm);
let ret = alloc_new();
println!("return value from alloc_new is {}", ret);
assert!(ret == 1);
}
let executable = aot::link_primordial(vec!["alloc_new".to_string()], "alloc_new_test");
aot::execute(executable);
}
#[allow(dead_code)]
fn fake_mutator_for_cur_thread(vm: &VM) {
// init gc
const IMMIX_SPACE_SIZE : ByteSize = 500 << 20;
const LO_SPACE_SIZE : ByteSize = 500 << 20;
mm::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = mm::new_mutator();
let mutator = mm::new_mutator();
let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::fake_thread(vm.next_id(), mutator)));
......@@ -87,7 +78,7 @@ pub fn alloc_new() -> VM {
vm.set_name(const_def_int64_1.as_entity(), "int64_1".to_string());
// .funcsig @alloc_new_sig = () -> (@int64)
let func_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![type_def_int64.clone()]);
let func_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_int64.clone()], vec![]);
vm.set_name(func_sig.as_entity(), "alloc_new_sig".to_string());
// .funcdecl @alloc_new <@alloc_new_sig>
......@@ -121,11 +112,11 @@ pub fn alloc_new() -> VM {
v: Instruction_::GetIRef(0)
});
// STORE <@int_64> @a @int_64_1
// STORE <@int_64> @a_iref @int_64_1
let blk_0_const_int64_1 = func_ver.new_constant(vm.next_id(), const_def_int64_1.clone());
let blk_0_inst2 = func_ver.new_inst(vm.next_id(), Instruction{
value: None,
ops: RwLock::new(vec![blk_0_a.clone(), blk_0_const_int64_1.clone()]),
ops: RwLock::new(vec![blk_0_a_iref.clone(), blk_0_const_int64_1.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
......@@ -134,23 +125,23 @@ pub fn alloc_new() -> VM {
}
});
// %x = LOAD <@int_64> @a_iref
let blk_0_x = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_0_x.as_entity(), "blk_0_x".to_string());
let blk_0_inst3 = func_ver.new_inst(vm.next_id(), Instruction{
value: Some(vec![blk_0_x.clone_value()]),
ops: RwLock::new(vec![blk_0_a_iref.clone()]),
v: Instruction_::Load{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0
}
});
let blk_0_term = func_ver.new_inst(vm.next_id(), Instruction{
// // %x = LOAD <@int_64> @a_iref
// let blk_0_x = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
// vm.set_name(blk_0_x.as_entity(), "blk_0_x".to_string());
// let blk_0_inst3 = func_ver.new_inst(vm.next_id(), Instruction{
// value: Some(vec![blk_0_x.clone_value()]),
// ops: RwLock::new(vec![blk_0_a_iref.clone()]),
// v: Instruction_::Load{
// is_ptr: false,
// order: MemoryOrder::Relaxed,
// mem_loc: 0
// }
// });
let blk_0_term = func_ver.new_inst(vm.next_id(), Instruction {
value: None,
ops: RwLock::new(vec![blk_0_x.clone()]),
v: Instruction_::Return(vec![0])
ops: RwLock::new(vec![]),
v: Instruction_::ThreadExit
});
let blk_0_content = BlockContent {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment