Commit 23651d9e authored by qinsoon's avatar qinsoon

[wip] working on global, moving to asm backend

parent 48b8b266
......@@ -402,7 +402,10 @@ impl fmt::Display for TreeNode {
write!(f, "+({} {})", pv.ty, c)
},
Value_::Global(ref g) => {
write!(f, "+({} @{})", g.ty, g.tag)
write!(f, "+({} to GLOBAL {} @{})", pv.ty, g.ty, g.tag)
},
Value_::Memory(ref mem) => {
write!(f, "+({})", mem)
}
}
},
......@@ -485,7 +488,10 @@ impl fmt::Display for Value {
write!(f, "+({} {})", self.ty, c)
},
Value_::Global(ref g) => {
write!(f, "+({} @{})", g.ty, g.tag)
write!(f, "+({} to GLOBAL {} @{})", self.ty, g.ty, g.tag)
},
Value_::Memory(ref mem) => {
write!(f, "+({})", mem)
}
}
}
......@@ -495,7 +501,8 @@ impl fmt::Display for Value {
pub enum Value_ {
SSAVar(MuID),
Constant(Constant),
Global(P<GlobalCell>)
Global(P<GlobalCell>),
Memory(MemoryLocation)
}
#[derive(Debug, Clone)]
......@@ -558,6 +565,37 @@ impl fmt::Display for Constant {
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum MemoryLocation {
Address{
base: P<Value>,
offset: Option<P<Value>>,
index: Option<P<Value>>,
scale: Option<u8>
},
Symbolic{
base: Option<P<Value>>,
label: MuTag
}
}
impl fmt::Display for MemoryLocation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&MemoryLocation::Address{ref base, ref offset, ref index, scale} => {
write!(f, "{} + {} + {} * {}", base, offset.as_ref().unwrap(), index.as_ref().unwrap(), scale.unwrap())
}
&MemoryLocation::Symbolic{ref base, ref label} => {
if base.is_some() {
write!(f, "{}({})", base.as_ref().unwrap(), label)
} else {
write!(f, "{}", label)
}
}
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct GlobalCell {
pub tag: MuTag,
......
......@@ -285,6 +285,15 @@ pub fn is_native_safe(ty: &MuType) -> bool {
}
}
pub fn get_referent_ty(ty: &MuType) -> Option<P<MuType>> {
match ty {
&MuType_::Ref(ref referent)
| &MuType_::IRef(ref referent)
| &MuType_::WeakRef(ref referent) => Some(referent.clone()),
_ => None
}
}
macro_rules! is_type (
($e:expr, $p:pat) => (
match $e {
......
......@@ -361,7 +361,7 @@ impl ASMCodeGen {
}
}
fn prepare_op(&self, op: &P<Value>, loc: usize) -> (String, MuID, ASMLocation) {
fn prepare_reg(&self, op: &P<Value>, loc: usize) -> (String, MuID, ASMLocation) {
let str = self.asm_reg_op(op);
let len = str.len();
(str, op.extract_ssa_id().unwrap(), ASMLocation::new(loc, len))
......@@ -564,8 +564,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
let (reg1, id1, loc1) = self.prepare_op(op1, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(op2, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(op2, 4 + 1 + reg1.len() + 1);
let asm = format!("cmpq {},{}", reg1, reg2);
......@@ -581,7 +581,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32) {
trace!("emit: cmp {} {}", op1, op2);
let (reg1, id1, loc1) = self.prepare_op(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
let asm = format!("cmpq ${},{}", op2, reg1);
......@@ -602,7 +602,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: mov {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_op(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("movq ${},{}", src, reg1);
......@@ -623,8 +623,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_op(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("movq {},{}", reg1, reg2);
......@@ -637,11 +637,16 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_mem64_r64(&mut self, src: &P<Value>, dest: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
unimplemented!()
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("addq {},{}", reg1, reg2);
......@@ -662,7 +667,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(dest, 4 + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1);
let asm = format!("addq {},${}", src, reg1);
......@@ -678,8 +683,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("subq {},{}", reg1, reg2);
......@@ -700,7 +705,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("subq ${},{}", src, reg1);
......@@ -716,7 +721,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mul_r64(&mut self, src: &P<Value>) {
trace!("emit: mul rax, {} -> (rdx, rax)", src);
let (reg, id, loc) = self.prepare_op(src, 3 + 1);
let (reg, id, loc) = self.prepare_reg(src, 3 + 1);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
......@@ -843,7 +848,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_push_r64(&mut self, src: &P<Value>) {
trace!("emit: push {}", src);
let (reg, id, loc) = self.prepare_op(src, 5 + 1);
let (reg, id, loc) = self.prepare_reg(src, 5 + 1);
let rsp = self.prepare_machine_reg(&x86_64::RSP);
let asm = format!("pushq {}", reg);
......@@ -860,7 +865,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_pop_r64(&mut self, dest: &P<Value>) {
trace!("emit: pop {}", dest);
let (reg, id, loc) = self.prepare_op(dest, 4 + 1);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let rsp = self.prepare_machine_reg(&x86_64::RSP);
let asm = format!("popq {}", reg);
......
......@@ -22,6 +22,7 @@ pub trait CodeGenerator {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, src: &P<Value>, dest: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
......
......@@ -37,7 +37,7 @@ impl <'a> InstructionSelection {
// 3. we need to backup/restore all the callee-saved registers
// if any of these assumption breaks, we will need to re-emit the code
#[allow(unused_variables)]
fn instruction_select(&mut self, node: &'a P<TreeNode>, cur_func: &MuFunctionVersion) {
fn instruction_select(&mut self, node: &'a P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VMContext) {
trace!("instsel on node {}", node);
match node.v {
......@@ -56,14 +56,14 @@ impl <'a> InstructionSelection {
let ops = inst.ops.borrow();
self.process_dest(&ops, fallthrough_dest, cur_func);
self.process_dest(&ops, branch_dest, cur_func);
self.process_dest(&ops, fallthrough_dest, cur_func, vm);
self.process_dest(&ops, branch_dest, cur_func, vm);
let ref cond = ops[cond];
if self.match_cmp_res(cond) {
trace!("emit cmp_eq-branch2");
match self.emit_cmp_res(cond, cur_func) {
match self.emit_cmp_res(cond, cur_func, vm) {
op::CmpOp::EQ => self.backend.emit_je(branch_dest),
op::CmpOp::NE => self.backend.emit_jne(branch_dest),
op::CmpOp::UGE => self.backend.emit_jae(branch_dest),
......@@ -79,7 +79,7 @@ impl <'a> InstructionSelection {
} else if self.match_ireg(cond) {
trace!("emit ireg-branch2");
let cond_reg = self.emit_ireg(cond, cur_func);
let cond_reg = self.emit_ireg(cond, cur_func, vm);
// emit: cmp cond_reg 1
self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
......@@ -93,7 +93,7 @@ impl <'a> InstructionSelection {
Instruction_::Branch1(ref dest) => {
let ops = inst.ops.borrow();
self.process_dest(&ops, dest, cur_func);
self.process_dest(&ops, dest, cur_func, vm);
trace!("emit branch1");
// jmp
......@@ -128,7 +128,7 @@ impl <'a> InstructionSelection {
trace!("arg {}", arg);
if self.match_ireg(arg) {
let arg = self.emit_ireg(arg, cur_func);
let arg = self.emit_ireg(arg, cur_func, vm);
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
......@@ -158,7 +158,7 @@ impl <'a> InstructionSelection {
self.backend.emit_call_near_rel32(target);
} else if self.match_ireg(func) {
let target = self.emit_ireg(func, cur_func);
let target = self.emit_ireg(func, cur_func, vm);
self.backend.emit_call_near_r64(&target);
} else if self.match_mem(func) {
......@@ -189,7 +189,7 @@ impl <'a> InstructionSelection {
},
Instruction_::Return(_) => {
self.emit_common_epilogue(inst, cur_func);
self.emit_common_epilogue(inst, cur_func, vm);
self.backend.emit_ret();
},
......@@ -202,8 +202,8 @@ impl <'a> InstructionSelection {
if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit add-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1], cur_func);
let reg_op2 = self.emit_ireg(&ops[op2], cur_func);
let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
let reg_op2 = self.emit_ireg(&ops[op2], cur_func, vm);
let res_tmp = self.emit_get_result(node);
// mov op1, res
......@@ -213,7 +213,7 @@ impl <'a> InstructionSelection {
} else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
trace!("emit add-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1], cur_func);
let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
let reg_op2 = self.emit_get_iimm(&ops[op2]);
let res_tmp = self.emit_get_result(node);
......@@ -227,7 +227,7 @@ impl <'a> InstructionSelection {
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit add-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], cur_func);
let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
let reg_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.emit_get_result(node);
......@@ -246,8 +246,8 @@ impl <'a> InstructionSelection {
if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit sub-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1], cur_func);
let reg_op2 = self.emit_ireg(&ops[op2], cur_func);
let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
let reg_op2 = self.emit_ireg(&ops[op2], cur_func, vm);
let res_tmp = self.emit_get_result(node);
// mov op1, res
......@@ -257,7 +257,7 @@ impl <'a> InstructionSelection {
} else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
trace!("emit sub-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1], cur_func);
let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
let imm_op2 = self.emit_get_iimm(&ops[op2]);
let res_tmp = self.emit_get_result(node);
......@@ -271,7 +271,7 @@ impl <'a> InstructionSelection {
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit sub-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], cur_func);
let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
let mem_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.emit_get_result(node);
......@@ -291,7 +291,7 @@ impl <'a> InstructionSelection {
let rax = x86_64::RAX.clone();
let op1 = &ops[op1];
if self.match_ireg(op1) {
let reg_op1 = self.emit_ireg(op1, cur_func);
let reg_op1 = self.emit_ireg(op1, cur_func, vm);
self.backend.emit_mov_r64_r64(&rax, &reg_op1);
} else if self.match_iimm(op1) {
......@@ -309,7 +309,7 @@ impl <'a> InstructionSelection {
// mul op2 -> rax
let op2 = &ops[op2];
if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, cur_func);
let reg_op2 = self.emit_ireg(op2, cur_func, vm);
self.backend.emit_mul_r64(&reg_op2);
} else if self.match_iimm(op2) {
......@@ -337,6 +337,22 @@ impl <'a> InstructionSelection {
_ => unimplemented!()
}
}
Instruction_::Load{is_ptr, order, mem_loc} => {
let ops = inst.ops.borrow();
let ref loc_op = ops[mem_loc];
let resolved_loc = self.emit_get_mem(loc_op, vm);
let res_temp = self.emit_get_result(node);
if self.match_ireg(node) {
// emit mov(GPR)
self.backend.emit_mov_mem64_r64(&resolved_loc, &res_temp);
} else {
// emit mov(FPR)
unimplemented!()
}
}
_ => unimplemented!()
} // main switch
......@@ -349,7 +365,7 @@ impl <'a> InstructionSelection {
}
#[allow(unused_variables)]
fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, cur_func: &MuFunctionVersion) {
fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, cur_func: &MuFunctionVersion, vm: &VMContext) {
for i in 0..dest.args.len() {
let ref dest_arg = dest.args[i];
match dest_arg {
......@@ -374,7 +390,7 @@ impl <'a> InstructionSelection {
let ref target_args = cur_func.content.as_ref().unwrap().get_block(dest.target).content.as_ref().unwrap().args;
let ref target_arg = target_args[i];
self.emit_general_move(&arg, target_arg, cur_func);
self.emit_general_move(&arg, target_arg, cur_func, vm);
},
&DestArg::Freshbound(_) => unimplemented!()
}
......@@ -426,7 +442,7 @@ impl <'a> InstructionSelection {
self.backend.end_block(block_name);
}
fn emit_common_epilogue(&mut self, ret_inst: &Instruction, cur_func: &MuFunctionVersion) {
fn emit_common_epilogue(&mut self, ret_inst: &Instruction, cur_func: &MuFunctionVersion, vm: &VMContext) {
// epilogue is not a block (its a few instruction inserted before return)
// FIXME: this may change in the future
......@@ -442,7 +458,7 @@ impl <'a> InstructionSelection {
for i in ret_val_indices {
let ref ret_val = ops[*i];
if self.match_ireg(ret_val) {
let reg_ret_val = self.emit_ireg(ret_val, cur_func);
let reg_ret_val = self.emit_ireg(ret_val, cur_func, vm);
self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
gpr_ret_count += 1;
......@@ -480,7 +496,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_cmp_res(&mut self, cond: &P<TreeNode>, cur_func: &MuFunctionVersion) -> op::CmpOp {
fn emit_cmp_res(&mut self, cond: &P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VMContext) -> op::CmpOp {
match cond.v {
TreeNode_::Instruction(ref inst) => {
let ops = inst.ops.borrow();
......@@ -492,12 +508,12 @@ impl <'a> InstructionSelection {
if op::is_int_cmp(op) {
if self.match_ireg(op1) && self.match_ireg(op2) {
let reg_op1 = self.emit_ireg(op1, cur_func);
let reg_op2 = self.emit_ireg(op2, cur_func);
let reg_op1 = self.emit_ireg(op1, cur_func, vm);
let reg_op2 = self.emit_ireg(op2, cur_func, vm);
self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
} else if self.match_ireg(op1) && self.match_iimm(op2) {
let reg_op1 = self.emit_ireg(op1, cur_func);
let reg_op1 = self.emit_ireg(op1, cur_func, vm);
let iimm_op2 = self.emit_get_iimm(op2);
self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
......@@ -544,20 +560,21 @@ impl <'a> InstructionSelection {
}
}
fn emit_ireg(&mut self, op: &P<TreeNode>, cur_func: &MuFunctionVersion) -> P<Value> {
fn emit_ireg(&mut self, op: &P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VMContext) -> P<Value> {
match op.v {
TreeNode_::Instruction(_) => {
self.instruction_select(op, cur_func);
self.instruction_select(op, cur_func, vm);
self.emit_get_result(op)
},
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::Constant(_) => panic!("expected ireg"),
Value_::Constant(_)
| Value_::Global(_)
| Value_::Memory(_) => panic!("expected ireg"),
Value_::SSAVar(_) => {
pv.clone()
},
Value_::Global(_) => unimplemented!()
}
}
}
......@@ -589,6 +606,44 @@ impl <'a> InstructionSelection {
}
}
fn emit_get_mem(&mut self, op: &P<TreeNode>, vm: &VMContext) -> P<Value> {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::SSAVar(_) => P(Value{
tag: "",
ty: types::get_referent_ty(& pv.ty).unwrap(),
v: Value_::Memory(MemoryLocation::Address{
base: pv.clone(),
offset: None,
index: None,
scale: None
})
}),
Value_::Global(ref glob) => {
if vm.is_running() {
// get address from vm
unimplemented!()
} else {
// symbolic
P(Value{
tag: "",
ty: types::get_referent_ty(&pv.ty).unwrap(),
v: Value_::Memory(MemoryLocation::Symbolic{
base: Some(x86_64::RIP.clone()),
label: glob.tag
})
})
}
},
Value_::Memory(_) => pv.clone(),
Value_::Constant(_) => unimplemented!()
}
}
TreeNode_::Instruction(_) => unimplemented!()
}
}
fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
match op.v {
TreeNode_::Value(ref pv) => {
......@@ -647,12 +702,12 @@ impl <'a> InstructionSelection {
}
}
fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, cur_func: &MuFunctionVersion) {
fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, cur_func: &MuFunctionVersion, vm: &VMContext) {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
if self.match_ireg(src) {
let src_reg = self.emit_ireg(src, cur_func);
let src_reg = self.emit_ireg(src, cur_func, vm);
self.backend.emit_mov_r64_r64(dest, &src_reg);
} else if self.match_iimm(src) {
let src_imm = self.emit_get_iimm(src);
......@@ -702,7 +757,7 @@ impl CompilerPass for InstructionSelection {
self.backend.set_block_liveout(block.label, &live_out);
for inst in block_content.body.iter() {
self.instruction_select(inst, func);
self.instruction_select(inst, func, vm_context);
}
self.backend.end_block(block.label);
......
......@@ -58,6 +58,8 @@ lazy_static! {
pub static ref R14 : P<Value> = GPR!("r14", 14);
pub static ref R15 : P<Value> = GPR!("r15", 15);
pub static ref RIP : P<Value> = GPR!("rip", 32);
pub static ref RETURN_GPRs : [P<Value>; 2] = [
RAX.clone(),
RDX.clone(),
......@@ -191,7 +193,8 @@ lazy_static! {
XMM12.clone(),
XMM13.clone(),
XMM14.clone(),
XMM15.clone()
XMM15.clone(),
RIP.clone()
];
// put callee saved regs first
......
......@@ -59,7 +59,7 @@ impl Compiler {
}
pub struct CompilerPolicy {
passes: Vec<Box<CompilerPass>>
pub passes: Vec<Box<CompilerPass>>
}
impl CompilerPolicy {
......@@ -80,4 +80,4 @@ impl CompilerPolicy {
pub fn new(passes: Vec<Box<CompilerPass>>) -> CompilerPolicy {
CompilerPolicy{passes: passes}
}
}
}
\ No newline at end of file
......@@ -7,8 +7,11 @@ use vm::machine_code::CompiledFunction;
use std::sync::RwLock;
use std::cell::RefCell;
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
pub struct VMContext {
pub is_running: AtomicBool,
constants: RwLock<HashMap<MuTag, P<Value>>>,
types: RwLock<HashMap<MuTag, P<MuType>>>,
......@@ -23,7 +26,9 @@ pub struct VMContext {
impl <'a> VMContext {
pub fn new() -> VMContext {
VMContext {
let ret = VMContext {
is_running: ATOMIC_BOOL_INIT,
constants: RwLock::new(HashMap::new()),
types: RwLock::new(HashMap::new()),
......@@ -33,7 +38,19 @@ impl <'a> VMContext {
func_vers: RwLock::new(HashMap::new()),
funcs: RwLock::new(HashMap::new()),
compiled_funcs: RwLock::new(HashMap::new())
}
};
ret.is_running.store(false, Ordering::SeqCst);
ret
}
pub fn run_vm(&self) {
self.is_running.store(true, Ordering::SeqCst);
}
pub fn is_running(&self) -> bool {
self.is_running.load(Ordering::Relaxed)
}
pub fn declare_const(&self, const_name: MuTag, ty: P<MuType>, val: Constant) -> P<Value> {
......
......@@ -354,7 +354,7 @@ pub fn global_access() -> VMContext {
let mut blk_0 = Block::new("blk_0");
// %x = LOAD <@int_64> @a
let blk_0_x = func_ver.new_ssa("blk_0_x", type_def_iref_int64.clone()).clone_value();
let blk_0_x = func_ver.new_ssa("blk_0_x", type_def_int64.clone()).clone_value();
let blk_0_a = func_ver.new_global(global_a.clone());
let blk_0_inst0 = func_ver.new_inst(Instruction{
value: Some(vec![blk_0_x]),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment