Commit 2d836b52 authored by qinsoon's avatar qinsoon

[wip] keep working

parent 8038fb6d
......@@ -9,7 +9,7 @@ use std::cell::RefCell;
#[derive(Debug, Clone)]
pub struct Instruction {
pub value : Option<Vec<P<TreeNode>>>,
pub value : Option<Vec<P<Value>>>,
pub ops : RefCell<Vec<P<TreeNode>>>,
pub v: Instruction_
}
......
......@@ -35,10 +35,13 @@ impl MuFunction {
self.content = Some(content)
}
pub fn new_ssa(&mut self, id: MuID, tag: MuTag, ty: P<MuType>) -> P<TreeNode> {
pub fn new_ssa(&mut self, tag: MuTag, ty: P<MuType>) -> P<TreeNode> {
let id = TreeNode::get_id();
self.context.values.insert(id, ValueEntry{id: id, tag: tag, ty: ty.clone(), use_count: Cell::new(0), expr: None});
P(TreeNode {
id: id,
op: pick_op_code_for_ssa(&ty),
v: TreeNode_::Value(P(Value{
tag: tag,
......@@ -50,6 +53,7 @@ impl MuFunction {
pub fn new_constant(&mut self, v: P<Value>) -> P<TreeNode> {
P(TreeNode{
id: TreeNode::get_id(),
op: pick_op_code_for_const(&v.ty),
v: TreeNode_::Value(v)
})
......@@ -190,13 +194,27 @@ pub struct BlockContent {
#[derive(Debug, Clone)]
/// always use with P<TreeNode>
pub struct TreeNode {
pub id: MuID,
pub op: OpCode,
pub v: TreeNode_,
}
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
static CUR_ID : AtomicUsize = ATOMIC_USIZE_INIT;
impl TreeNode {
pub fn get_id() -> MuID {
let ret = CUR_ID.load(Ordering::SeqCst);
CUR_ID.store(ret + 1, Ordering::SeqCst);
return ret;
}
pub fn new_inst(v: Instruction) -> P<TreeNode> {
P(TreeNode{op: pick_op_code_for_inst(&v), v: TreeNode_::Instruction(v)})
P(TreeNode{
id: TreeNode::get_id(),
op: pick_op_code_for_inst(&v),
v: TreeNode_::Instruction(v),
})
}
pub fn extract_ssa_id(&self) -> Option<MuID> {
......@@ -210,6 +228,20 @@ impl TreeNode {
_ => None
}
}
pub fn clone_value(&self) -> Option<P<Value>> {
match self.v {
TreeNode_::Value(ref val) => Some(val.clone()),
_ => None
}
}
pub fn into_value(self) -> Option<P<Value>> {
match self.v {
TreeNode_::Value(val) => Some(val),
_ => None
}
}
}
/// use +() to display a node
......@@ -272,7 +304,27 @@ impl Value {
}
_ => false
}
}
}
pub fn extract_ssa_id(&self) -> Option<MuID> {
match self.v {
Value_::SSAVar(id) => Some(id),
_ => None
}
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.v {
Value_::SSAVar(id) => {
write!(f, "+({} %{}#{})", self.ty, self.tag, id)
},
Value_::Constant(ref c) => {
write!(f, "+({} {})", self.ty, c)
}
}
}
}
#[derive(Debug, Clone)]
......
......@@ -186,6 +186,22 @@ pub enum AtomicRMWOp {
UMIN
}
pub fn is_int_cmp(op: CmpOp) -> bool {
match op {
CmpOp::EQ
| CmpOp::NE
| CmpOp::SGE
| CmpOp::SGT
| CmpOp::SLE
| CmpOp::SLT
| CmpOp::UGE
| CmpOp::UGT
| CmpOp::ULE
| CmpOp::ULT => true,
_ => false
}
}
pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
match inst.v {
Instruction_::BinOp(op, _, _) => OpCode::Binary(op),
......
pub mod inst_sel;
mod temp;
pub use compiler::backend::temp::Temporary;
#[cfg(target_arch = "x86_64")]
mod x86_64;
......
use ast::types::*;
use ast::ir::*;
pub struct Temporary {
pub hll_id: MuID
}
\ No newline at end of file
......@@ -5,6 +5,7 @@ use compiler::backend::x86_64::CodeGenerator;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
use ast::inst::*;
pub struct ASMCodeGen {
foo: usize
......@@ -18,22 +19,86 @@ impl ASMCodeGen {
impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
}
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: &P<Value>) {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32) {
trace!("emit: cmp {} {}", op1, op2);
}
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
}
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: &P<Value>) {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: mov {} -> {}", src, dest);
}
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
}
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
}
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
}
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
}
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
}
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
}
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
}
fn emit_jmp(&mut self, dest: &Destination) {
trace!("emit: jmp {}", dest.target);
}
fn emit_je(&mut self, dest: &Destination) {
trace!("emit: je {}", dest.target);
}
fn emit_jne(&mut self, dest: &Destination) {
trace!("emit: jne {}", dest.target);
}
fn emit_ja(&mut self, dest: &Destination) {
trace!("emit: ja {}", dest.target);
}
fn emit_jae(&mut self, dest: &Destination) {
trace!("emit: jae {}", dest.target);
}
fn emit_jb(&mut self, dest: &Destination) {
trace!("emit: jb {}", dest.target);
}
fn emit_jbe(&mut self, dest: &Destination) {
trace!("emit: jbe {}", dest.target);
}
fn emit_call(&mut self, func: &P<Value>) {
trace!("emit: call {}", func);
}
fn emit_ret(&mut self) {
trace!("emit: ret");
}
}
\ No newline at end of file
use ast::ptr::P;
use ast::ir::*;
use ast::inst::*;
pub trait CodeGenerator {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_jmp(&mut self, dest: &Destination);
fn emit_je(&mut self, dest: &Destination);
fn emit_jne(&mut self, dest: &Destination);
fn emit_ja(&mut self, dest: &Destination);
fn emit_jae(&mut self, dest: &Destination);
fn emit_jb(&mut self, dest: &Destination);
fn emit_jbe(&mut self, dest: &Destination);
fn emit_call(&mut self, func: &P<Value>);
fn emit_ret(&mut self);
}
\ No newline at end of file
use ast::ir::*;
use ast::ptr::*;
use ast::inst::Instruction;
use ast::inst::Destination;
use ast::inst::DestArg;
use ast::inst::Instruction_::*;
use ast::inst::Instruction_;
use ast::op;
use ast::op::OpCode;
use ast::types::*;
use ast::types;
use vm::context::VMContext;
use compiler::CompilerPass;
use compiler::backend::x86_64::*;
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
pub struct InstructionSelection {
name: &'static str,
backend: Box<CodeGenerator>
}
#[derive(Clone)]
pub enum MatchResult {
REG(P<Value>),
MEM{base: P<Value>, index: P<Value>, scale: P<Value>, disp: P<Value>},
IMM(P<Value>),
FP_REG(P<Value>),
FP_IMM(P<Value>),
}
macro_rules! results_as {
($results: expr, $expect: pat) => {
{
let find_pattern = |x: Vec<MatchResult>| {
for i in x.iter() {
match i {
&$expect => return Some(i.clone()),
_ => continue
}
}
None
};
find_pattern($results)
};
}
}
macro_rules! match_result {
($result1: expr, $expect1: pat, $result2: expr, $expect2: pat, $block: block) => {
{
let r1 = results_as!($result1, $expect1);
let r2 = results_as!($result2, $expect2);
if r1.is_some() && r2.is_some() $block
}
};
($result1: expr, $expect1: pat, $block) => {
{
let r1 = results_as!($result1, $expect1);
if r1.is_some() $block
}
};
backend: Box<CodeGenerator>
}
impl <'a> InstructionSelection {
......@@ -75,12 +34,18 @@ impl <'a> InstructionSelection {
// 3. we need to backup/restore all the callee-saved registers
// if any of these assumption breaks, we will need to re-emit the code
#[allow(unused_variables)]
fn instruction_select(&mut self, inst: &'a P<TreeNode>) -> Option<Vec<MatchResult>> {
trace!("instsel on node {}", inst);
match inst.v {
fn instruction_select(&mut self, node: &'a P<TreeNode>) {
trace!("instsel on node {}", node);
// let mut state = inst.state.borrow_mut();
// *state = Some(BURSState::new(MATCH_RES_LEN));
match node.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
// move this to trace generation
// assert here
let (fallthrough_dest, branch_dest, branch_if_true) = {
if true_prob > 0.5f32 {
(true_dest, false_dest, false)
......@@ -89,122 +54,62 @@ impl <'a> InstructionSelection {
}
};
let mut ops = inst.ops.borrow_mut();
let ops = inst.ops.borrow();
self.process_dest(&mut ops, fallthrough_dest);
self.process_dest(&mut ops, branch_dest);
self.process_dest(&ops, fallthrough_dest);
self.process_dest(&ops, branch_dest);
let ref cond = ops[cond];
match cond.op {
OpCode::Comparison(op) => {
trace!("Tile comp-branch2");
match cond.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
CmpOp(op, op1, op2) => {
// cmp op1 op2
// jcc branch_dest
// #fallthrough_dest:
// ..
let op1 = self.instruction_select(&ops[op1]).unwrap();
let op2 = self.instruction_select(&ops[op2]).unwrap();
match_result!(op1, MatchResult::REG(_), op2, MatchResult::REG(_), {
});
// // x86 cmp only allows second op as immediate
// let (op1, op2, branch_if_true) = {
// if op1.is_int_const() && op2.is_int_reg() {
// (op2, op1, !branch_if_true)
// } else {
// (op1, op2, branch_if_true)
// }
// };
//
// if op1.is_int_reg() && op2.is_int_reg() {
// self.backend.emit_cmp_r64_r64(op1, op2);
// } else if op1.is_int_reg() && op2.is_int_const() {
// // x86 only supports immediates smaller than 32bits
// let ty : &MuType_ = &op2.ty;
// match ty {
// &MuType_::Int(len) if len <= 32 => {
// self.backend.emit_cmp_r64_imm32(op1, op2);
// },
// &MuType_::Int(len) if len > 32 => {
// self.backend.emit_cmp_r64_mem64(op1, op2);
// },
// _ => panic!("{} is supposed to be int type", ty)
// }
// } else if op1.is_int_const() && op2.is_int_reg() {
// panic!("expected op2 as imm and op1 as reg found op1: {:?}, op2: {:?}", op1, op2);
// } else if op1.is_int_const() && op2.is_int_const() {
//
// }
None
},
_ => panic!("expected a comparison op")
}
},
_ => panic!("expected a comparison inst")
}
},
if self.match_cmp_res(cond) {
trace!("emit cmp_eq-branch2");
match self.emit_cmp_res(cond) {
op::CmpOp::EQ => self.backend.emit_je(branch_dest),
op::CmpOp::NE => self.backend.emit_jne(branch_dest),
op::CmpOp::SGE => self.backend.emit_jae(branch_dest),
op::CmpOp::SGT => self.backend.emit_ja(branch_dest),
op::CmpOp::SLE => self.backend.emit_jbe(branch_dest),
op::CmpOp::SLT => self.backend.emit_jb(branch_dest),
_ => unimplemented!()
}
} else if self.match_ireg(cond) {
trace!("emit ireg-branch2");
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile value-branch2");
// test/cmp pv 0
// jcc branch_dest
// #fallthrough_dest:
// ...
None
},
let cond_reg = self.emit_ireg(cond);
_ => {
trace!("nested: compute cond");
// instsel for cond first
self.instruction_select(cond);
// test/cmp res 0
// jcc branch_dest
// #fallthrough_dest:
// ...
trace!("Tile value-branch2 after computing cond");
None
}
// emit: cmp cond_reg 1
self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
// emit: je #branch_dest
self.backend.emit_je(branch_dest);
} else {
unimplemented!();
}
},
Branch1(ref dest) => {
let mut ops = inst.ops.borrow_mut();
Instruction_::Branch1(ref dest) => {
let ops = inst.ops.borrow();
self.process_dest(&mut ops, dest);
self.process_dest(&ops, dest);
trace!("Tile branch1");
trace!("emit branch1");
// jmp
None
self.backend.emit_jmp(dest);
},
ExprCall{ref data, is_abort} => {
trace!("Tile exprcall");
Instruction_::ExprCall{ref data, is_abort} => {
trace!("deal with pre-call convention");
let ops = inst.ops.borrow_mut();
let ops = inst.ops.borrow();
for arg_index in data.args.iter() {
let ref arg = ops[*arg_index];
trace!("arg {}", arg);
match arg.op {
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile move-gpr-arg");
trace!("emit move-gpr-arg");
// move to register
},
OpCode::RegFP | OpCode::FPImm => {
trace!("Tile move-fpr-arg");
trace!("emit move-fpr-arg");
// move to fp register
},
_ => {
......@@ -213,30 +118,31 @@ impl <'a> InstructionSelection {
self.instruction_select(arg);
// mov based on type
trace!("Tile move-arg after computing arg");
trace!("emit move-arg after computing arg");
}
}
}
// emit call
let ref func = ops[data.func];
// check direct call or indirect
// return ret vals
None
// deal with ret vals
unimplemented!()
},
Return(ref vals) => {
let ops = inst.ops.borrow_mut();
Instruction_::Return(ref vals) => {
let ops = inst.ops.borrow();
for val_index in vals.iter() {
let ref val = ops[*val_index];
trace!("return val: {}", val);
match val.op {
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile move-gpr-ret");
trace!("emit move-gpr-ret");
// move to return register
}
OpCode::RegFP | OpCode::FPImm => {
trace!("Tile move-fpr-ret");
trace!("emit move-fpr-ret");
// move to return fp register
}
_ => {
......@@ -245,37 +151,108 @@ impl <'a> InstructionSelection {
self.instruction_select(val);
// move based on type
trace!("Tile move-ret-val after computing arg");
trace!("emit move-ret-val after computing arg");
}
}
}
None
self.backend.emit_ret();
},
BinOp(op, op1, op2) => {
Instruction_::BinOp(op, op1, op2) => {
let ops = inst.ops.borrow();
match op {
op::BinOp::Add => {
trace!("Tile add");
// mov op1, res
// add op2 res
None
if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit add-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1]);
let reg_op2 = self.emit_ireg(&ops[op2]);
let res_tmp = self.emit_get_result(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
} else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
trace!("emit add-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1]);
let reg_op2 = self.emit_get_iimm(&ops[op2]);
let res_tmp = self.emit_get_result(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
// add op2, res
self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
} else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit add-imm-ireg");
unimplemented!();
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit add-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1]);
let reg_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.emit_get_result(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
} else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit add-mem-ireg");
unimplemented!();
} else {
unimplemented!()
}
},
op::BinOp::Sub => {
trace!("Tile sub");
// mov op1, res
// sub op1, res
None
if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit sub-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1]);
let reg_op2 = self.emit_ireg(&ops[op2]);
let res_tmp = self.emit_get_result(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
} else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
trace!("emit sub-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1]);
let reg_op2 = self.emit_get_iimm(&ops[op2]);
let res_tmp = self.emit_get_result(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
// add op2, res
self.backend.emit_sub_r64_imm32(&res_tmp, reg_op2);
} else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit sub-imm-ireg");
unimplemented!();
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit sub-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1]);
let reg_op2 = self.emit_mem(&ops[op2]);
let res_tmp = self.emit_get_result(node);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
// sub op2 res
self.backend.emit_sub_r64_mem64(&res_tmp, &reg_op2);
} else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {