Commit 23651d9e authored by qinsoon's avatar qinsoon

[wip] working on global, moving to asm backend

parent 48b8b266
......@@ -402,7 +402,10 @@ impl fmt::Display for TreeNode {
write!(f, "+({} {})", pv.ty, c)
},
Value_::Global(ref g) => {
write!(f, "+({} @{})", g.ty, g.tag)
write!(f, "+({} to GLOBAL {} @{})", pv.ty, g.ty, g.tag)
},
Value_::Memory(ref mem) => {
write!(f, "+({})", mem)
}
}
},
......@@ -485,7 +488,10 @@ impl fmt::Display for Value {
write!(f, "+({} {})", self.ty, c)
},
Value_::Global(ref g) => {
write!(f, "+({} @{})", g.ty, g.tag)
write!(f, "+({} to GLOBAL {} @{})", self.ty, g.ty, g.tag)
},
Value_::Memory(ref mem) => {
write!(f, "+({})", mem)
}
}
}
......@@ -495,7 +501,8 @@ impl fmt::Display for Value {
pub enum Value_ {
SSAVar(MuID),
Constant(Constant),
Global(P<GlobalCell>)
Global(P<GlobalCell>),
Memory(MemoryLocation)
}
#[derive(Debug, Clone)]
......@@ -558,6 +565,37 @@ impl fmt::Display for Constant {
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum MemoryLocation {
Address{
base: P<Value>,
offset: Option<P<Value>>,
index: Option<P<Value>>,
scale: Option<u8>
},
Symbolic{
base: Option<P<Value>>,
label: MuTag
}
}
impl fmt::Display for MemoryLocation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&MemoryLocation::Address{ref base, ref offset, ref index, scale} => {
write!(f, "{} + {} + {} * {}", base, offset.as_ref().unwrap(), index.as_ref().unwrap(), scale.unwrap())
}
&MemoryLocation::Symbolic{ref base, ref label} => {
if base.is_some() {
write!(f, "{}({})", base.as_ref().unwrap(), label)
} else {
write!(f, "{}", label)
}
}
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct GlobalCell {
pub tag: MuTag,
......
......@@ -285,6 +285,15 @@ pub fn is_native_safe(ty: &MuType) -> bool {
}
}
pub fn get_referent_ty(ty: &MuType) -> Option<P<MuType>> {
match ty {
&MuType_::Ref(ref referent)
| &MuType_::IRef(ref referent)
| &MuType_::WeakRef(ref referent) => Some(referent.clone()),
_ => None
}
}
macro_rules! is_type (
($e:expr, $p:pat) => (
match $e {
......
......@@ -361,7 +361,7 @@ impl ASMCodeGen {
}
}
fn prepare_op(&self, op: &P<Value>, loc: usize) -> (String, MuID, ASMLocation) {
fn prepare_reg(&self, op: &P<Value>, loc: usize) -> (String, MuID, ASMLocation) {
let str = self.asm_reg_op(op);
let len = str.len();
(str, op.extract_ssa_id().unwrap(), ASMLocation::new(loc, len))
......@@ -564,8 +564,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
let (reg1, id1, loc1) = self.prepare_op(op1, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(op2, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(op2, 4 + 1 + reg1.len() + 1);
let asm = format!("cmpq {},{}", reg1, reg2);
......@@ -581,7 +581,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32) {
trace!("emit: cmp {} {}", op1, op2);
let (reg1, id1, loc1) = self.prepare_op(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
let asm = format!("cmpq ${},{}", op2, reg1);
......@@ -602,7 +602,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: mov {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_op(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("movq ${},{}", src, reg1);
......@@ -623,8 +623,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_op(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("movq {},{}", reg1, reg2);
......@@ -637,11 +637,16 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_mem64_r64(&mut self, src: &P<Value>, dest: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
unimplemented!()
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("addq {},{}", reg1, reg2);
......@@ -662,7 +667,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(dest, 4 + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1);
let asm = format!("addq {},${}", src, reg1);
......@@ -678,8 +683,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_op(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("subq {},{}", reg1, reg2);
......@@ -700,7 +705,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_op(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("subq ${},{}", src, reg1);
......@@ -716,7 +721,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mul_r64(&mut self, src: &P<Value>) {
trace!("emit: mul rax, {} -> (rdx, rax)", src);
let (reg, id, loc) = self.prepare_op(src, 3 + 1);
let (reg, id, loc) = self.prepare_reg(src, 3 + 1);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
......@@ -843,7 +848,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_push_r64(&mut self, src: &P<Value>) {
trace!("emit: push {}", src);
let (reg, id, loc) = self.prepare_op(src, 5 + 1);
let (reg, id, loc) = self.prepare_reg(src, 5 + 1);
let rsp = self.prepare_machine_reg(&x86_64::RSP);
let asm = format!("pushq {}", reg);
......@@ -860,7 +865,7 @@ impl CodeGenerator for ASMCodeGen {
fn emit_pop_r64(&mut self, dest: &P<Value>) {
trace!("emit: pop {}", dest);
let (reg, id, loc) = self.prepare_op(dest, 4 + 1);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let rsp = self.prepare_machine_reg(&x86_64::RSP);
let asm = format!("popq {}", reg);
......
......@@ -22,6 +22,7 @@ pub trait CodeGenerator {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, src: &P<Value>, dest: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
......
......@@ -58,6 +58,8 @@ lazy_static! {
pub static ref R14 : P<Value> = GPR!("r14", 14);
pub static ref R15 : P<Value> = GPR!("r15", 15);
pub static ref RIP : P<Value> = GPR!("rip", 32);
pub static ref RETURN_GPRs : [P<Value>; 2] = [
RAX.clone(),
RDX.clone(),
......@@ -191,7 +193,8 @@ lazy_static! {
XMM12.clone(),
XMM13.clone(),
XMM14.clone(),
XMM15.clone()
XMM15.clone(),
RIP.clone()
];
// put callee saved regs first
......
......@@ -59,7 +59,7 @@ impl Compiler {
}
pub struct CompilerPolicy {
passes: Vec<Box<CompilerPass>>
pub passes: Vec<Box<CompilerPass>>
}
impl CompilerPolicy {
......@@ -80,4 +80,4 @@ impl CompilerPolicy {
pub fn new(passes: Vec<Box<CompilerPass>>) -> CompilerPolicy {
CompilerPolicy{passes: passes}
}
}
}
\ No newline at end of file
......@@ -7,8 +7,11 @@ use vm::machine_code::CompiledFunction;
use std::sync::RwLock;
use std::cell::RefCell;
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
pub struct VMContext {
pub is_running: AtomicBool,
constants: RwLock<HashMap<MuTag, P<Value>>>,
types: RwLock<HashMap<MuTag, P<MuType>>>,
......@@ -23,7 +26,9 @@ pub struct VMContext {
impl <'a> VMContext {
pub fn new() -> VMContext {
VMContext {
let ret = VMContext {
is_running: ATOMIC_BOOL_INIT,
constants: RwLock::new(HashMap::new()),
types: RwLock::new(HashMap::new()),
......@@ -33,7 +38,19 @@ impl <'a> VMContext {
func_vers: RwLock::new(HashMap::new()),
funcs: RwLock::new(HashMap::new()),
compiled_funcs: RwLock::new(HashMap::new())
}
};
ret.is_running.store(false, Ordering::SeqCst);
ret
}
pub fn run_vm(&self) {
self.is_running.store(true, Ordering::SeqCst);
}
pub fn is_running(&self) -> bool {
self.is_running.load(Ordering::Relaxed)
}
pub fn declare_const(&self, const_name: MuTag, ty: P<MuType>, val: Constant) -> P<Value> {
......
......@@ -354,7 +354,7 @@ pub fn global_access() -> VMContext {
let mut blk_0 = Block::new("blk_0");
// %x = LOAD <@int_64> @a
let blk_0_x = func_ver.new_ssa("blk_0_x", type_def_iref_int64.clone()).clone_value();
let blk_0_x = func_ver.new_ssa("blk_0_x", type_def_int64.clone()).clone_value();
let blk_0_a = func_ver.new_global(global_a.clone());
let blk_0_inst0 = func_ver.new_inst(Instruction{
value: Some(vec![blk_0_x]),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment