GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit 8038fb6d authored by qinsoon's avatar qinsoon

[wip] revising instruction selection, multiple backends(asm, binary)

parent 9d2cc163
......@@ -247,6 +247,34 @@ pub struct Value {
pub v: Value_
}
impl Value {
pub fn is_int_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
if is_scalar(&self.ty) && !is_fp(&self.ty) {
true
} else {
false
}
}
_ => false
}
}
pub fn is_int_const(&self) -> bool {
match self.v {
Value_::Constant(_) => {
let ty : &MuType_ = &self.ty;
match ty {
&MuType_::Int(_) => true,
_ => false
}
}
_ => false
}
}
}
#[derive(Debug, Clone)]
pub enum Value_ {
SSAVar(MuID),
......
pub use inst_sel;
\ No newline at end of file
pub use self::arch_specific::*;
#[cfg(target_arch = "x86_64")]
#[path="x86_64/inst_sel.rs"]
mod arch_specific;
pub use compiler::backend::x86_64::inst_sel::*;
#[cfg(target_arch = "arm")]
#[path="arm/inst_sel.rs"]
mod arch_specific;
\ No newline at end of file
pub use compiler::backend::arm::inst_sel::*;
\ No newline at end of file
pub mod inst_sel;
\ No newline at end of file
pub mod inst_sel;
#[cfg(target_arch = "x86_64")]
mod x86_64;
#[cfg(target_arch = "arm")]
mod arm;
\ No newline at end of file
#![allow(unused_variables)]
use compiler::backend::x86_64::CodeGenerator;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
pub struct ASMCodeGen {
foo: usize
}
impl ASMCodeGen {
pub fn new() -> ASMCodeGen {
ASMCodeGen {foo: 0}
}
}
impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>) {
}
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: &P<Value>) {
}
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>) {
}
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: &P<Value>) {
}
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
}
}
\ No newline at end of file
use ast::ptr::P;
use ast::ir::*;
pub trait CodeGenerator {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
}
\ No newline at end of file
......@@ -5,224 +5,339 @@ use ast::inst::DestArg;
use ast::inst::Instruction_::*;
use ast::op;
use ast::op::OpCode;
use ast::types::*;
use vm::context::VMContext;
use compiler::CompilerPass;
use compiler::backend::x86_64::*;
pub struct InstructionSelection {
name: &'static str
name: &'static str,
backend: Box<CodeGenerator>
}
impl InstructionSelection {
pub fn new() -> InstructionSelection {
InstructionSelection{name: "Instruction Selection (x64)"}
}
#[derive(Clone)]
pub enum MatchResult {
REG(P<Value>),
MEM{base: P<Value>, index: P<Value>, scale: P<Value>, disp: P<Value>},
IMM(P<Value>),
FP_REG(P<Value>),
FP_IMM(P<Value>),
}
impl CompilerPass for InstructionSelection {
fn name(&self) -> &'static str {
self.name
}
#[allow(unused_variables)]
fn start_function(&mut self, vm_context: &VMContext, func: &mut MuFunction) {
debug!("{}", self.name());
macro_rules! results_as {
($results: expr, $expect: pat) => {
{
let find_pattern = |x: Vec<MatchResult>| {
for i in x.iter() {
match i {
&$expect => return Some(i.clone()),
_ => continue
}
}
None
};
find_pattern($results)
};
}
}
#[allow(unused_variables)]
fn visit_function(&mut self, vm_context: &VMContext, func: &mut MuFunction) {
for block_label in func.block_trace.as_ref().unwrap() {
let block = func.content.as_mut().unwrap().get_block_mut(block_label);
let block_content = block.content.as_mut().unwrap();
for inst in block_content.body.iter_mut() {
instruction_select(inst);
}
macro_rules! match_result {
($result1: expr, $expect1: pat, $result2: expr, $expect2: pat, $block: block) => {
{
let r1 = results_as!($result1, $expect1);
let r2 = results_as!($result2, $expect2);
if r1.is_some() && r2.is_some() $block
}
}
};
($result1: expr, $expect1: pat, $block) => {
{
let r1 = results_as!($result1, $expect1);
if r1.is_some() $block
}
};
}
#[allow(unused_variables)]
fn instruction_select(inst: &mut P<TreeNode>) {
trace!("instsel on node {}", inst);
match inst.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
let (fallthrough_dest, branch_dest) = {
if true_prob > 0.5f32 {
(true_dest, false_dest)
} else {
(false_dest, true_dest)
}
};
let mut ops = inst.ops.borrow_mut();
process_dest(&mut ops, fallthrough_dest);
process_dest(&mut ops, branch_dest);
let ref mut cond = ops[cond];
match cond.op {
OpCode::Comparison(op) => {
trace!("Tile comp-branch2");
match cond.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
CmpOp(op, op1, op2) => {
// cmp op1 op2
// jcc branch_dest
// #fallthrough_dest:
// ..
},
_ => panic!("expected a comparison op")
}
},
_ => panic!("expected a comparison inst")
impl <'a> InstructionSelection {
pub fn new() -> InstructionSelection {
InstructionSelection{
name: "Instruction Selection (x64)",
backend: Box::new(ASMCodeGen::new())
}
}
// in this pass, we assume that
// 1. all temporaries will use 64bit registers
// 2. we do not need to backup/restore caller-saved registers
// 3. we need to backup/restore all the callee-saved registers
// if any of these assumption breaks, we will need to re-emit the code
#[allow(unused_variables)]
fn instruction_select(&mut self, inst: &'a P<TreeNode>) -> Option<Vec<MatchResult>> {
trace!("instsel on node {}", inst);
match inst.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
let (fallthrough_dest, branch_dest, branch_if_true) = {
if true_prob > 0.5f32 {
(true_dest, false_dest, false)
} else {
(false_dest, true_dest, true)
}
},
};
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile value-branch2");
// test/cmp pv 0
// jcc branch_dest
// #fallthrough_dest:
// ...
},
let mut ops = inst.ops.borrow_mut();
_ => {
trace!("nested: compute cond");
// instsel for cond first
instruction_select(cond);
self.process_dest(&mut ops, fallthrough_dest);
self.process_dest(&mut ops, branch_dest);
let ref cond = ops[cond];
match cond.op {
OpCode::Comparison(op) => {
trace!("Tile comp-branch2");
match cond.v {
TreeNode_::Instruction(ref inst) => {
match inst.v {
CmpOp(op, op1, op2) => {
// cmp op1 op2
// jcc branch_dest
// #fallthrough_dest:
// ..
let op1 = self.instruction_select(&ops[op1]).unwrap();
let op2 = self.instruction_select(&ops[op2]).unwrap();
match_result!(op1, MatchResult::REG(_), op2, MatchResult::REG(_), {
});
// // x86 cmp only allows second op as immediate
// let (op1, op2, branch_if_true) = {
// if op1.is_int_const() && op2.is_int_reg() {
// (op2, op1, !branch_if_true)
// } else {
// (op1, op2, branch_if_true)
// }
// };
//
// if op1.is_int_reg() && op2.is_int_reg() {
// self.backend.emit_cmp_r64_r64(op1, op2);
// } else if op1.is_int_reg() && op2.is_int_const() {
// // x86 only supports immediates smaller than 32bits
// let ty : &MuType_ = &op2.ty;
// match ty {
// &MuType_::Int(len) if len <= 32 => {
// self.backend.emit_cmp_r64_imm32(op1, op2);
// },
// &MuType_::Int(len) if len > 32 => {
// self.backend.emit_cmp_r64_mem64(op1, op2);
// },
// _ => panic!("{} is supposed to be int type", ty)
// }
// } else if op1.is_int_const() && op2.is_int_reg() {
// panic!("expected op2 as imm and op1 as reg found op1: {:?}, op2: {:?}", op1, op2);
// } else if op1.is_int_const() && op2.is_int_const() {
//
// }
None
},
_ => panic!("expected a comparison op")
}
},
_ => panic!("expected a comparison inst")
}
},
// test/cmp res 0
// jcc branch_dest
// #fallthrough_dest:
// ...
trace!("Tile value-branch2 after computing cond")
}
}
},
Branch1(ref dest) => {
let mut ops = inst.ops.borrow_mut();
process_dest(&mut ops, dest);
trace!("Tile branch1");
// jmp
},
ExprCall{ref data, is_abort} => {
trace!("Tile exprcall");
let mut ops = inst.ops.borrow_mut();
for arg_index in data.args.iter() {
let ref mut arg = ops[*arg_index];
trace!("arg {}", arg);
match arg.op {
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile move-gpr-arg");
// move to register
},
OpCode::RegFP | OpCode::FPImm => {
trace!("Tile move-fpr-arg");
// move to fp register
trace!("Tile value-branch2");
// test/cmp pv 0
// jcc branch_dest
// #fallthrough_dest:
// ...
None
},
_ => {
trace!("nested: compute arg");
// instself for arg first
instruction_select(arg);
trace!("nested: compute cond");
// instsel for cond first
self.instruction_select(cond);
// mov based on type
trace!("Tile move-arg after computing arg");
// test/cmp res 0
// jcc branch_dest
// #fallthrough_dest:
// ...
trace!("Tile value-branch2 after computing cond");
None
}
}
}
},
Return(ref vals) => {
let mut ops = inst.ops.borrow_mut();
for val_index in vals.iter() {
let ref mut val = ops[*val_index];
trace!("return val: {}", val);
},
Branch1(ref dest) => {
let mut ops = inst.ops.borrow_mut();
self.process_dest(&mut ops, dest);
match val.op {
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile move-gpr-ret");
// move to return register
trace!("Tile branch1");
// jmp
None
},
ExprCall{ref data, is_abort} => {
trace!("Tile exprcall");
let ops = inst.ops.borrow_mut();
for arg_index in data.args.iter() {
let ref arg = ops[*arg_index];
trace!("arg {}", arg);
match arg.op {
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile move-gpr-arg");
// move to register
},
OpCode::RegFP | OpCode::FPImm => {
trace!("Tile move-fpr-arg");
// move to fp register
},
_ => {
trace!("nested: compute arg");
// instself for arg first
self.instruction_select(arg);
// mov based on type
trace!("Tile move-arg after computing arg");
}
}
OpCode::RegFP | OpCode::FPImm => {
trace!("Tile move-fpr-ret");
// move to return fp register
}
// emit call
// return ret vals
None
},
Return(ref vals) => {
let ops = inst.ops.borrow_mut();
for val_index in vals.iter() {
let ref val = ops[*val_index];
trace!("return val: {}", val);
match val.op {
OpCode::RegI64 | OpCode::IntImmI64 => {
trace!("Tile move-gpr-ret");
// move to return register
}
OpCode::RegFP | OpCode::FPImm => {
trace!("Tile move-fpr-ret");
// move to return fp register
}
_ => {
trace!("nested: compute return val");
// instsel for return val first
self.instruction_select(val);
// move based on type
trace!("Tile move-ret-val after computing arg");
}
}
_ => {
trace!("nested: compute return val");
// instsel for return val first
instruction_select(val);
}
None
},
BinOp(op, op1, op2) => {
match op {
op::BinOp::Add => {
trace!("Tile add");
// mov op1, res
// add op2 res
// move based on type
trace!("Tile move-ret-val after computing arg");
}
None
},
op::BinOp::Sub => {
trace!("Tile sub");
// mov op1, res
// sub op1, res
None
},
op::BinOp::Mul => {
trace!("Tile mul");
// mov op1 rax
// mul op2 rax
// mov rax res
None
},
_ => unimplemented!()
}
}
},
BinOp(op, op1, op2) => {
match op {
op::BinOp::Add => {
trace!("Tile add");
// mov op1, res
// add op2 res
},
op::BinOp::Sub => {
trace!("Tile sub")
// mov op1, res
// sub op1, res
},
op::BinOp::Mul => {
trace!("Tile mul")
// mov op1 rax
// mul op2 rax
// mov rax res
_ => unimplemented!()
} // main switch
},
TreeNode_::Value(ref p) => {
None
}
}
}
#[allow(unused_variables)]
fn process_dest(&mut self, ops: &mut Vec<P<TreeNode>>, dest: &Destination) {
for dest_arg in dest.args.iter() {
match dest_arg {
&DestArg::Normal(op_index) => {
let ref mut arg = ops[op_index];
match arg.op {
OpCode::RegI64
| OpCode::RegFP
| OpCode::IntImmI64
| OpCode::FPImm => {
// do nothing
},
_ => unimplemented!()
_ => {
trace!("nested: compute arg for branch");
// nested: compute arg
self.instruction_select(arg);
}
}
}
},
&DestArg::Freshbound(_) => unimplemented!()
}
}
}
}
_ => unimplemented!()
} // main switch
},
_ => panic!("expected instruction")
impl CompilerPass for InstructionSelection {
fn name(&self) -> &'static str {
self.name
}
}
#[allow(unused_variables)]
fn process_dest(ops: &mut Vec<P<TreeNode>>, dest: &Destination) {
for dest_arg in dest.args.iter() {
match dest_arg {
&DestArg::Normal(op_index) => {
let ref mut arg = ops[op_index];
match arg.op {
OpCode::RegI64
| OpCode::RegFP
| OpCode::IntImmI64
| OpCode::FPImm => {
// do nothing
},
_ => {
trace!("nested: compute arg for branch");
// nested: compute arg
instruction_select(arg);
}
}
},
&DestArg::Freshbound(_) => unimplemented!()
#[allow(unused_variables)]
fn start_function(&mut self, vm_context: &VMContext, func: &mut MuFunction) {
debug!("{}", self.name());
}
#[allow(unused_variables)]
fn visit_function(&mut self, vm_context: &VMContext, func: &mut MuFunction) {
for block_label in func.block_trace.as_ref().unwrap() {
let block = func.content.as_mut().unwrap().get_block_mut(block_label);
let block_content = block.content.as_mut().unwrap();
for inst in block_content.body.iter_mut() {
self.instruction_select(inst);
}
}
}
}
\ No newline at end of file
pub mod inst_sel;
mod codegen;
pub use compiler::backend::x86_64::codegen::CodeGenerator;
mod asm_backend;
pub use compiler::backend::x86_64::asm_backend::ASMCodeGen;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
let ty : &MuType_ = &op.ty;
match ty {
&MuType_::Int(len) if len <= 32 => true,
_ => false
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment