Commit 5f1f7500 authored by qinsoon's avatar qinsoon

[wip] instsel for load/store, work on regalloc

parent 23651d9e
......@@ -587,7 +587,7 @@ impl fmt::Display for MemoryLocation {
}
&MemoryLocation::Symbolic{ref base, ref label} => {
if base.is_some() {
write!(f, "{}({})", base.as_ref().unwrap(), label)
write!(f, "{}({})", label, base.as_ref().unwrap())
} else {
write!(f, "{}", label)
}
......
......@@ -371,6 +371,119 @@ impl ASMCodeGen {
op.extract_ssa_id().unwrap()
}
#[allow(unused_assignments)]
fn prepare_mem(&self, op: &P<Value>, loc: usize) -> (String, Vec<MuID>, Vec<ASMLocation>) {
let mut ids : Vec<MuID> = vec![];
let mut locs : Vec<ASMLocation> = vec![];
let mut result_str : String = "".to_string();
let mut loc_cursor : usize = 0;
match op.v {
// offset(base,index,scale)
Value_::Memory(MemoryLocation::Address{ref base, ref offset, ref index, scale}) => {
// deal with offset
if offset.is_some() {
let offset = offset.as_ref().unwrap();
match offset.v {
Value_::SSAVar(id) => {
// temp as offset
let (str, id, loc) = self.prepare_reg(offset, 0);
result_str.push_str(&str);
ids.push(id);
locs.push(loc);
loc_cursor += str.len();
},
Value_::Constant(Constant::Int(val)) => {
let str = val.to_string();
result_str.push_str(&str);
loc_cursor += str.len();
},
_ => panic!("unexpected offset type: {:?}", offset)
}
}
result_str.push('(');
loc_cursor += 1;
// deal with base, base is ssa
let (str, id, loc) = self.prepare_reg(base, loc_cursor);
result_str.push_str(&str);
ids.push(id);
locs.push(loc);
loc_cursor += str.len();
// deal with index (ssa or constant)
if index.is_some() {
result_str.push(',');
loc_cursor += 1; // plus 1 for ,
let index = index.as_ref().unwrap();
match index.v {
Value_::SSAVar(id) => {
// temp as offset
let (str, id, loc) = self.prepare_reg(index, loc_cursor);
result_str.push_str(&str);
ids.push(id);
locs.push(loc);
loc_cursor += str.len();
},
Value_::Constant(Constant::Int(val)) => {
let str = val.to_string();
result_str.push_str(&str);
loc_cursor += str.len();
},
_ => panic!("unexpected index type: {:?}", index)
}
// scale
if scale.is_some() {
result_str.push(',');
loc_cursor += 1;
let scale = scale.unwrap();
let str = scale.to_string();
result_str.push_str(&str);
loc_cursor += str.len();
}
}
result_str.push(')');
loc_cursor += 1;
},
Value_::Memory(MemoryLocation::Symbolic{ref base, label}) => {
result_str.push_str(label);
loc_cursor += label.len();
if base.is_some() {
result_str.push('(');
loc_cursor += 1;
let (str, id, loc) = self.prepare_reg(base.as_ref().unwrap(), loc_cursor);
result_str.push_str(&str);
ids.push(id);
locs.push(loc);
loc_cursor += str.len();
result_str.push(')');
loc_cursor += 1;
}
},
_ => panic!("expect mem location as value")
}
(result_str, ids, locs)
}
fn asm_reg_op(&self, op: &P<Value>) -> String {
let id = op.extract_ssa_id().unwrap();
if id < RESERVED_NODE_IDS_FOR_MACHINE {
......@@ -617,7 +730,56 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
unimplemented!()
let (mem, id1, loc1) = self.prepare_mem(src, 4 + 1);
let (reg, id2, loc2) = self.prepare_reg(dest, 4 + 1 + mem.len() + 1);
let asm = format!("movq {},{}", mem, reg);
self.add_asm_inst(
asm,
vec![id2],
vec![loc2],
id1,
loc1
)
}
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
let (reg, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (mem, mut id2, mut loc2) = self.prepare_mem(dest, 4 + 1 + reg.len() + 1);
// the register we used for the memory location is counted as 'use'
id2.push(id1);
loc2.push(loc1);
let asm = format!("movq {},{}", reg, mem);
self.add_asm_inst(
asm,
vec![], // not defining anything (write to memory)
vec![],
id2,
loc2
)
}
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: mov {} -> {}", src, dest);
let (mem, id, loc) = self.prepare_mem(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("movq ${},{}", src, mem);
self.add_asm_inst(
asm,
vec![],
vec![],
id,
loc
)
}
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
......@@ -637,11 +799,6 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_mov_mem64_r64(&mut self, src: &P<Value>, dest: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
unimplemented!()
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
......
......@@ -22,7 +22,8 @@ pub trait CodeGenerator {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, src: &P<Value>, dest: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
......
......@@ -4,6 +4,7 @@ use ast::inst::Instruction;
use ast::inst::Destination;
use ast::inst::DestArg;
use ast::inst::Instruction_;
use ast::inst::MemoryOrder;
use ast::op;
use ast::types;
use ast::types::MuType_;
......@@ -338,16 +339,62 @@ impl <'a> InstructionSelection {
}
}
// load on x64 generates mov inst (no matter what order is specified)
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
Instruction_::Load{is_ptr, order, mem_loc} => {
let ops = inst.ops.borrow();
let ref loc_op = ops[mem_loc];
// check order
match order {
MemoryOrder::Relaxed
| MemoryOrder::Consume
| MemoryOrder::Acquire
| MemoryOrder::SeqCst => {},
_ => panic!("didnt expect order {:?} with store inst", order)
}
let resolved_loc = self.emit_get_mem(loc_op, vm);
let res_temp = self.emit_get_result(node);
if self.match_ireg(node) {
// emit mov(GPR)
self.backend.emit_mov_mem64_r64(&resolved_loc, &res_temp);
self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
} else {
// emit mov(FPR)
unimplemented!()
}
}
Instruction_::Store{is_ptr, order, mem_loc, value} => {
let ops = inst.ops.borrow();
let ref loc_op = ops[mem_loc];
let ref val_op = ops[value];
let generate_plain_mov : bool = {
match order {
MemoryOrder::Relaxed | MemoryOrder::Release => true,
MemoryOrder::SeqCst => false,
_ => panic!("didnt expect order {:?} with store inst", order)
}
};
let resolved_loc = self.emit_get_mem(loc_op, vm);
if self.match_ireg(val_op) {
let val = self.emit_ireg(val_op, cur_func, vm);
if generate_plain_mov {
self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
} else {
unimplemented!()
}
} else if self.match_iimm(val_op) {
let val = self.emit_get_iimm(val_op);
if generate_plain_mov {
self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
} else {
unimplemented!()
}
} else {
// emit mov(FPR)
unimplemented!()
......
......@@ -308,7 +308,7 @@ pub fn build_chaitin_briggs (cf: &CompiledFunction, func: &MuFunctionVersion) ->
let dst = cf.mc.get_inst_reg_defines(i);
// src may be an immediate number
// but dest is definitly a register
// but dest is a register or a memory location
debug_assert!(dst.len() == 1);
if src.len() == 1 {
......
......@@ -361,7 +361,7 @@ pub fn global_access() -> VMContext {
ops: RefCell::new(vec![blk_0_a.clone()]),
v: Instruction_::Load{
is_ptr: false,
order: MemoryOrder::SeqCst,
order: MemoryOrder::Relaxed,
mem_loc: 0
}
});
......@@ -373,7 +373,7 @@ pub fn global_access() -> VMContext {
ops: RefCell::new(vec![blk_0_a.clone(), blk_0_const_int64_1.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::SeqCst,
order: MemoryOrder::Relaxed,
mem_loc: 0,
value: 1
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment