Commit 3f035045 authored by qinsoon's avatar qinsoon

implement binop_with_status

parent 0e158574
......@@ -92,6 +92,8 @@ pub enum Instruction_ {
// expressions
BinOp(BinOp, OpIndex, OpIndex),
BinOpWithStatus(BinOp, BinOpStatus, OpIndex, OpIndex),
CmpOp(CmpOp, OpIndex, OpIndex),
ConvOp{
operation: ConvOp,
......@@ -282,6 +284,9 @@ impl Instruction_ {
fn debug_str(&self, ops: &Vec<P<TreeNode>>) -> String {
match self {
&Instruction_::BinOp(op, op1, op2) => format!("{:?} {} {}", op, ops[op1], ops[op2]),
&Instruction_::BinOpWithStatus(op, status, op1, op2) => {
format!("{:?} {:?} {} {}", op, status, ops[op1], ops[op2])
}
&Instruction_::CmpOp(op, op1, op2) => format!("{:?} {} {}", op, ops[op1], ops[op2]),
&Instruction_::ConvOp{operation, ref from_ty, ref to_ty, operand} => {
format!("{:?} {} {} {}", operation, from_ty, to_ty, ops[operand])
......@@ -402,6 +407,50 @@ impl Instruction_ {
}
}
#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
pub struct BinOpStatus {
pub flag_n: bool,
pub flag_z: bool,
pub flag_c: bool,
pub flag_v: bool
}
impl BinOpStatus {
pub fn n() -> BinOpStatus {
BinOpStatus {flag_n: true, flag_z: false, flag_c: false, flag_v: false}
}
pub fn z() -> BinOpStatus {
BinOpStatus {flag_n: false, flag_z: true, flag_c: false, flag_v: false}
}
pub fn c() -> BinOpStatus {
BinOpStatus {flag_n: false, flag_z: false, flag_c: true, flag_v: false}
}
pub fn v() -> BinOpStatus {
BinOpStatus {flag_n: false, flag_z: false, flag_c: false, flag_v: true}
}
}
impl fmt::Debug for BinOpStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.flag_n {
write!(f, "#N").unwrap();
}
if self.flag_z {
write!(f, "#Z").unwrap();
}
if self.flag_c {
write!(f, "#C").unwrap();
}
if self.flag_v {
write!(f, "#V").unwrap();
}
Ok(())
}
}
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum MemoryOrder {
NotAtomic,
......
......@@ -3,7 +3,8 @@ use inst::Instruction_::*;
pub fn is_terminal_inst(inst: &Instruction_) -> bool {
match inst {
&BinOp(_, _, _)
&BinOp(_, _, _)
| &BinOpWithStatus(_, _, _, _)
| &CmpOp(_, _, _)
| &ConvOp{..}
| &ExprCall{..}
......@@ -56,6 +57,7 @@ pub fn is_non_terminal_inst(inst: &Instruction_) -> bool {
pub fn has_side_effect(inst: &Instruction_) -> bool {
match inst {
&BinOp(_, _, _) => false,
&BinOpWithStatus(_, _, _, _) => false,
&CmpOp(_, _, _) => false,
&ConvOp{..} => false,
&ExprCall{..} => true,
......
......@@ -36,6 +36,7 @@ pub enum OpCode {
// expression
Binary(BinOp),
BinaryWithStatus(BinOp),
Comparison(CmpOp),
Conversion(ConvOp),
AtomicRMW(AtomicRMWOp),
......@@ -253,6 +254,7 @@ pub fn is_int_cmp(op: CmpOp) -> bool {
pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
match inst.v {
Instruction_::BinOp(op, _, _) => OpCode::Binary(op),
Instruction_::BinOpWithStatus(op, _, _, _) => OpCode::BinaryWithStatus(op),
Instruction_::CmpOp(op, _, _) => OpCode::Comparison(op),
Instruction_::ConvOp{operation, ..} => OpCode::Conversion(operation),
Instruction_::AtomicRMW{op, ..} => OpCode::AtomicRMW(op),
......
......@@ -1891,6 +1891,75 @@ impl CodeGenerator for ASMCodeGen {
)
}
// set byte
fn emit_sets_r8(&mut self, dest: Reg) {
trace!("emit: sets {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("sets {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
}
fn emit_setz_r8(&mut self, dest: Reg) {
trace!("emit: setz {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("setz {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
}
fn emit_seto_r8(&mut self, dest: Reg) {
trace!("emit: seto {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("seto {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
}
fn emit_setb_r8(&mut self, dest: Reg) {
trace!("emit: setb {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("setb {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
}
// cmov src -> dest
// binop op1, op2 (op2 is destination)
......
......@@ -48,6 +48,12 @@ pub trait CodeGenerator {
fn emit_movs_r_r (&mut self, dest: Reg, src: Reg);
fn emit_movz_r_r (&mut self, dest: Reg, src: Reg);
// set byte
fn emit_sets_r8 (&mut self, dest: Reg);
fn emit_setz_r8 (&mut self, dest: Reg);
fn emit_seto_r8 (&mut self, dest: Reg);
fn emit_setb_r8 (&mut self, dest: Reg);
// gpr conditional move
fn emit_cmova_r_r (&mut self, dest: Reg, src: Reg);
......
......@@ -2,7 +2,7 @@ use ast::ir::*;
use ast::ptr::*;
use ast::inst::*;
use ast::op;
use ast::op::OpCode;
use ast::op::*;
use ast::types;
use ast::types::*;
use vm::VM;
......@@ -584,573 +584,55 @@ impl <'a> InstructionSelection {
Instruction_::BinOp(op, op1, op2) => {
trace!("instsel on BINOP");
let ops = inst.ops.read().unwrap();
let res_tmp = self.get_result_value(node);
match op {
op::BinOp::Add => {
if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
trace!("emit add-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2, res
self.backend.emit_add_r_imm(&res_tmp, reg_op2);
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit add-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_mem(&ops[op2], vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_add_r_mem(&res_tmp, &reg_op2);
} else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit add-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_add_r_r(&res_tmp, &reg_op2);
} else {
unimplemented!()
}
},
op::BinOp::Sub => {
if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
trace!("emit sub-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2, res
self.backend.emit_sub_r_imm(&res_tmp, imm_op2);
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit sub-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2], vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// sub op2 res
self.backend.emit_sub_r_mem(&res_tmp, &mem_op2);
} else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
trace!("emit sub-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_sub_r_r(&res_tmp, &reg_op2);
} else {
unimplemented!()
}
},
op::BinOp::And => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit and-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
// mov op1 -> res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// and op2, res -> res
self.backend.emit_and_r_imm(&res_tmp, imm_op2);
} else if self.match_ireg(op1) && self.match_mem(op2) {
trace!("emit and-ireg-mem");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let mem_op2 = self.emit_mem(op2, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// and op2, res -> res
self.backend.emit_and_r_mem(&res_tmp, &mem_op2);
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit and-ireg-ireg");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// and op2, res -> res
self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
} else {
unimplemented!()
}
},
op::BinOp::Or => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit or-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
// mov op1 -> res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r_imm(&res_tmp, imm_op2);
} else if self.match_ireg(op1) && self.match_mem(op2) {
trace!("emit or-ireg-mem");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let mem_op2 = self.emit_mem(op2, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r_mem(&res_tmp, &mem_op2);
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit or-ireg-ireg");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
} else {
unimplemented!()
}
},
op::BinOp::Xor => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit xor-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
// mov op1 -> res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// xor op2, res -> res
self.backend.emit_xor_r_imm(&res_tmp, imm_op2);
} else if self.match_ireg(op1) && self.match_mem(op2) {
trace!("emit xor-ireg-mem");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let mem_op2 = self.emit_mem(op2, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// xor op2, res -> res
self.backend.emit_xor_r_mem(&res_tmp, &mem_op2);
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit xor-ireg-ireg");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// xor op2, res -> res
self.backend.emit_xor_r_r(&res_tmp, &tmp_op2);
} else {
unimplemented!()
}
}
op::BinOp::Mul => {
// mov op1 -> rax
let op1 = &ops[op1];
let mreg_op1 = match op1.clone_value().ty.get_int_length() {
Some(64) => x86_64::RAX.clone(),
Some(32) => x86_64::EAX.clone(),
Some(16) => x86_64::AX.clone(),
Some(8) => x86_64::AL.clone(),
_ => unimplemented!()
};
if self.match_iimm(op1) {
let imm_op1 = self.node_iimm_to_i32(op1);
self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
} else if self.match_mem(op1) {
let mem_op1 = self.emit_mem(op1, vm);
self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
} else if self.match_ireg(op1) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
self.backend.emit_mov_r_r(&mreg_op1, &reg_op1);
} else {
unimplemented!();
}
// mul op2
let op2 = &ops[op2];
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2);
// put imm in a temporary
// here we use result reg as temporary
self.backend.emit_mov_r_imm(&res_tmp, imm_op2);
self.backend.emit_mul_r(&res_tmp);
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2, vm);
self.backend.emit_mul_mem(&mem_op2);
} else if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_mul_r(&reg_op2);
} else {
unimplemented!();
}
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
}
},
op::BinOp::Udiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
}
_ => unimplemented!()
}
},
op::BinOp::Sdiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
}
_ => unimplemented!()
}
},
op::BinOp::Urem => {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
}
_ => unimplemented!()
}
},
op::BinOp::Srem => {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
}
_ => unimplemented!()
}
},
op::BinOp::Shl => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_mem(op1) {
unimplemented!()
} else if self.match_ireg(op1) {
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2) as i8;
// shl op1, op2 -> op1
self.backend.emit_shl_r_imm8(&tmp_op1, imm_op2);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
} else if self.match_ireg(op2) {
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
// shl op1, cl -> op1
self.backend.emit_shl_r_cl(&tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
} else {
panic!("unexpected op2 (not ireg not iimm): {}", op2);
}
} else {
panic!("unexpected op1 (not ireg not mem): {}", op1);
}
},
op::BinOp::Lshr => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_mem(op1) {
unimplemented!()
} else if self.match_ireg(op1) {
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2) as i8;
// shr op1, op2 -> op1
self.backend.emit_shr_r_imm8(&tmp_op1, imm_op2);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
} else if self.match_ireg(op2) {
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
// shr op1, cl -> op1
self.backend.emit_shr_r_cl(&tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
} else {
panic!("unexpected op2 (not ireg not iimm): {}", op2);
}
} else {
panic!("unexpected op1 (not ireg not mem): {}", op1);
}
},
op::BinOp::Ashr => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_mem(op1) {
unimplemented!()
} else if self.match_ireg(op1) {
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2) as i8;
// sar op1, op2 -> op1
self.backend.emit_sar_r_imm8(&tmp_op1, imm_op2);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
} else if self.match_ireg(op2) {
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
// sar op1, cl -> op1
self.backend.emit_sar_r_cl(&tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
} else {
panic!("unexpected op2 (not ireg not iimm): {}", op2);
}
} else {
panic!("unexpected op1 (not ireg not mem): {}", op1);
}
},
// floating point
op::BinOp::FAdd => {
if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
trace!("emit add-fpreg-mem");
let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2], vm);
// mov op1, res
self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
// sub op2 res
self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2);
} else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
trace!("emit add-fpreg-fpreg");
let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
// movsd op1, res
self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_addsd_f64_f64(&res_tmp, &reg_op2);
} else {
panic!("unexpected fadd: {}", node)
}
}