diff --git a/src/ast/src/inst.rs b/src/ast/src/inst.rs index a39b2177255f99cb9cebac318446b85783afbf26..97d266f1cd0c0daa6f8474b9ae43b473e3455560 100644 --- a/src/ast/src/inst.rs +++ b/src/ast/src/inst.rs @@ -92,6 +92,8 @@ pub enum Instruction_ { // expressions BinOp(BinOp, OpIndex, OpIndex), + BinOpWithStatus(BinOp, BinOpStatus, OpIndex, OpIndex), + CmpOp(CmpOp, OpIndex, OpIndex), ConvOp{ operation: ConvOp, @@ -282,6 +284,9 @@ impl Instruction_ { fn debug_str(&self, ops: &Vec>) -> String { match self { &Instruction_::BinOp(op, op1, op2) => format!("{:?} {} {}", op, ops[op1], ops[op2]), + &Instruction_::BinOpWithStatus(op, status, op1, op2) => { + format!("{:?} {:?} {} {}", op, status, ops[op1], ops[op2]) + } &Instruction_::CmpOp(op, op1, op2) => format!("{:?} {} {}", op, ops[op1], ops[op2]), &Instruction_::ConvOp{operation, ref from_ty, ref to_ty, operand} => { format!("{:?} {} {} {}", operation, from_ty, to_ty, ops[operand]) @@ -402,6 +407,50 @@ impl Instruction_ { } } +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub struct BinOpStatus { + pub flag_n: bool, + pub flag_z: bool, + pub flag_c: bool, + pub flag_v: bool +} + +impl BinOpStatus { + pub fn n() -> BinOpStatus { + BinOpStatus {flag_n: true, flag_z: false, flag_c: false, flag_v: false} + } + + pub fn z() -> BinOpStatus { + BinOpStatus {flag_n: false, flag_z: true, flag_c: false, flag_v: false} + } + + pub fn c() -> BinOpStatus { + BinOpStatus {flag_n: false, flag_z: false, flag_c: true, flag_v: false} + } + + pub fn v() -> BinOpStatus { + BinOpStatus {flag_n: false, flag_z: false, flag_c: false, flag_v: true} + } +} + +impl fmt::Debug for BinOpStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.flag_n { + write!(f, "#N").unwrap(); + } + if self.flag_z { + write!(f, "#Z").unwrap(); + } + if self.flag_c { + write!(f, "#C").unwrap(); + } + if self.flag_v { + write!(f, "#V").unwrap(); + } + Ok(()) + } +} + #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub enum MemoryOrder { NotAtomic, diff --git a/src/ast/src/ir_semantics.rs b/src/ast/src/ir_semantics.rs index d8dc921573a9c3e1f7421b5d02bd3cff84d3176c..a0967b6e252ada14cfd2f6835af6a7851018c140 100644 --- a/src/ast/src/ir_semantics.rs +++ b/src/ast/src/ir_semantics.rs @@ -3,7 +3,8 @@ use inst::Instruction_::*; pub fn is_terminal_inst(inst: &Instruction_) -> bool { match inst { - &BinOp(_, _, _) + &BinOp(_, _, _) + | &BinOpWithStatus(_, _, _, _) | &CmpOp(_, _, _) | &ConvOp{..} | &ExprCall{..} @@ -56,6 +57,7 @@ pub fn is_non_terminal_inst(inst: &Instruction_) -> bool { pub fn has_side_effect(inst: &Instruction_) -> bool { match inst { &BinOp(_, _, _) => false, + &BinOpWithStatus(_, _, _, _) => false, &CmpOp(_, _, _) => false, &ConvOp{..} => false, &ExprCall{..} => true, diff --git a/src/ast/src/op.rs b/src/ast/src/op.rs index f7aeeb8a49b160efe5b4de90189ec7a585534dd0..bfce4d6cd15413134c460dfd1784d2723c465d0d 100644 --- a/src/ast/src/op.rs +++ b/src/ast/src/op.rs @@ -36,6 +36,7 @@ pub enum OpCode { // expression Binary(BinOp), + BinaryWithStatus(BinOp), Comparison(CmpOp), Conversion(ConvOp), AtomicRMW(AtomicRMWOp), @@ -253,6 +254,7 @@ pub fn is_int_cmp(op: CmpOp) -> bool { pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode { match inst.v { Instruction_::BinOp(op, _, _) => OpCode::Binary(op), + Instruction_::BinOpWithStatus(op, _, _, _) => OpCode::BinaryWithStatus(op), Instruction_::CmpOp(op, _, _) => OpCode::Comparison(op), Instruction_::ConvOp{operation, ..} => OpCode::Conversion(operation), Instruction_::AtomicRMW{op, ..} => OpCode::AtomicRMW(op), diff --git a/src/compiler/backend/arch/x86_64/asm_backend.rs b/src/compiler/backend/arch/x86_64/asm_backend.rs index b2aa59bcfdd12eda934277cb857c753d77edd4bf..e67bd9411893b52f29eb1340fd0dd164997b7f2d 100644 --- a/src/compiler/backend/arch/x86_64/asm_backend.rs +++ b/src/compiler/backend/arch/x86_64/asm_backend.rs @@ -1891,6 +1891,75 @@ impl CodeGenerator for ASMCodeGen { ) } + // set byte + fn emit_sets_r8(&mut self, dest: Reg) { + trace!("emit: sets {}", dest); + + let (reg, id, loc) = self.prepare_reg(dest, 4 + 1); + + let asm = format!("sets {}", reg); + + self.add_asm_inst( + asm, + linked_hashmap!{ + id => vec![loc] + }, + linked_hashmap!{}, + false + ) + } + + fn emit_setz_r8(&mut self, dest: Reg) { + trace!("emit: setz {}", dest); + + let (reg, id, loc) = self.prepare_reg(dest, 4 + 1); + + let asm = format!("setz {}", reg); + + self.add_asm_inst( + asm, + linked_hashmap!{ + id => vec![loc] + }, + linked_hashmap!{}, + false + ) + } + + fn emit_seto_r8(&mut self, dest: Reg) { + trace!("emit: seto {}", dest); + + let (reg, id, loc) = self.prepare_reg(dest, 4 + 1); + + let asm = format!("seto {}", reg); + + self.add_asm_inst( + asm, + linked_hashmap!{ + id => vec![loc] + }, + linked_hashmap!{}, + false + ) + } + + fn emit_setb_r8(&mut self, dest: Reg) { + trace!("emit: setb {}", dest); + + let (reg, id, loc) = self.prepare_reg(dest, 4 + 1); + + let asm = format!("setb {}", reg); + + self.add_asm_inst( + asm, + linked_hashmap!{ + id => vec![loc] + }, + linked_hashmap!{}, + false + ) + } + // cmov src -> dest // binop op1, op2 (op2 is destination) diff --git a/src/compiler/backend/arch/x86_64/codegen.rs b/src/compiler/backend/arch/x86_64/codegen.rs index 8e92548b63a2941027f74cc16e36c5eb676ba614..6bd49afe58b344db34ae3792ebc4e75c2c9c18d4 100644 --- a/src/compiler/backend/arch/x86_64/codegen.rs +++ b/src/compiler/backend/arch/x86_64/codegen.rs @@ -48,6 +48,12 @@ pub trait CodeGenerator { fn emit_movs_r_r (&mut self, dest: Reg, src: Reg); fn emit_movz_r_r (&mut self, dest: Reg, src: Reg); + // set byte + fn emit_sets_r8 (&mut self, dest: Reg); + fn emit_setz_r8 (&mut self, dest: Reg); + fn emit_seto_r8 (&mut self, dest: Reg); + fn emit_setb_r8 (&mut self, dest: Reg); + // gpr conditional move fn emit_cmova_r_r (&mut self, dest: Reg, src: Reg); diff --git a/src/compiler/backend/arch/x86_64/inst_sel.rs b/src/compiler/backend/arch/x86_64/inst_sel.rs index 132143a6db7405a7f8e6a0650e44e60322063b9f..5606865f5180d012e18a30dd881057c82876b9a9 100644 --- a/src/compiler/backend/arch/x86_64/inst_sel.rs +++ b/src/compiler/backend/arch/x86_64/inst_sel.rs @@ -2,7 +2,7 @@ use ast::ir::*; use ast::ptr::*; use ast::inst::*; use ast::op; -use ast::op::OpCode; +use ast::op::*; use ast::types; use ast::types::*; use vm::VM; @@ -584,573 +584,55 @@ impl <'a> InstructionSelection { Instruction_::BinOp(op, op1, op2) => { trace!("instsel on BINOP"); - let ops = inst.ops.read().unwrap(); - - let res_tmp = self.get_result_value(node); - - match op { - op::BinOp::Add => { - if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) { - trace!("emit add-ireg-imm"); - - let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.node_iimm_to_i32(&ops[op2]); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, ®_op1); - // add op2, res - self.backend.emit_add_r_imm(&res_tmp, reg_op2); - } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) { - trace!("emit add-ireg-mem"); - - let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_mem(&ops[op2], vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, ®_op1); - // add op2 res - self.backend.emit_add_r_mem(&res_tmp, ®_op2); - } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) { - trace!("emit add-ireg-ireg"); - - let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, ®_op1); - // add op2 res - self.backend.emit_add_r_r(&res_tmp, ®_op2); - } else { - unimplemented!() - } - }, - op::BinOp::Sub => { - if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) { - trace!("emit sub-ireg-imm"); - - let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); - let imm_op2 = self.node_iimm_to_i32(&ops[op2]); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, ®_op1); - // add op2, res - self.backend.emit_sub_r_imm(&res_tmp, imm_op2); - } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) { - trace!("emit sub-ireg-mem"); - - let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); - let mem_op2 = self.emit_mem(&ops[op2], vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, ®_op1); - // sub op2 res - self.backend.emit_sub_r_mem(&res_tmp, &mem_op2); - } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) { - trace!("emit sub-ireg-ireg"); - - let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, ®_op1); - // add op2 res - self.backend.emit_sub_r_r(&res_tmp, ®_op2); - } else { - unimplemented!() - } - }, - op::BinOp::And => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - if self.match_ireg(op1) && self.match_iimm(op2) { - trace!("emit and-ireg-iimm"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let imm_op2 = self.node_iimm_to_i32(op2); - - // mov op1 -> res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // and op2, res -> res - self.backend.emit_and_r_imm(&res_tmp, imm_op2); - } else if self.match_ireg(op1) && self.match_mem(op2) { - trace!("emit and-ireg-mem"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let mem_op2 = self.emit_mem(op2, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // and op2, res -> res - self.backend.emit_and_r_mem(&res_tmp, &mem_op2); - } else if self.match_ireg(op1) && self.match_ireg(op2) { - trace!("emit and-ireg-ireg"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // and op2, res -> res - self.backend.emit_and_r_r(&res_tmp, &tmp_op2); - } else { - unimplemented!() - } - }, - op::BinOp::Or => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - if self.match_ireg(op1) && self.match_iimm(op2) { - trace!("emit or-ireg-iimm"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let imm_op2 = self.node_iimm_to_i32(op2); - - // mov op1 -> res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // Or op2, res -> res - self.backend.emit_or_r_imm(&res_tmp, imm_op2); - } else if self.match_ireg(op1) && self.match_mem(op2) { - trace!("emit or-ireg-mem"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let mem_op2 = self.emit_mem(op2, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // Or op2, res -> res - self.backend.emit_or_r_mem(&res_tmp, &mem_op2); - } else if self.match_ireg(op1) && self.match_ireg(op2) { - trace!("emit or-ireg-ireg"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // Or op2, res -> res - self.backend.emit_or_r_r(&res_tmp, &tmp_op2); - } else { - unimplemented!() - } - }, - op::BinOp::Xor => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - if self.match_ireg(op1) && self.match_iimm(op2) { - trace!("emit xor-ireg-iimm"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let imm_op2 = self.node_iimm_to_i32(op2); - - // mov op1 -> res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // xor op2, res -> res - self.backend.emit_xor_r_imm(&res_tmp, imm_op2); - } else if self.match_ireg(op1) && self.match_mem(op2) { - trace!("emit xor-ireg-mem"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let mem_op2 = self.emit_mem(op2, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // xor op2, res -> res - self.backend.emit_xor_r_mem(&res_tmp, &mem_op2); - } else if self.match_ireg(op1) && self.match_ireg(op2) { - trace!("emit xor-ireg-ireg"); - - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - // mov op1, res - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - // xor op2, res -> res - self.backend.emit_xor_r_r(&res_tmp, &tmp_op2); - } else { - unimplemented!() - } - } - op::BinOp::Mul => { - // mov op1 -> rax - let op1 = &ops[op1]; - - let mreg_op1 = match op1.clone_value().ty.get_int_length() { - Some(64) => x86_64::RAX.clone(), - Some(32) => x86_64::EAX.clone(), - Some(16) => x86_64::AX.clone(), - Some(8) => x86_64::AL.clone(), - _ => unimplemented!() - }; - - if self.match_iimm(op1) { - let imm_op1 = self.node_iimm_to_i32(op1); - - self.backend.emit_mov_r_imm(&mreg_op1, imm_op1); - } else if self.match_mem(op1) { - let mem_op1 = self.emit_mem(op1, vm); - - self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1); - } else if self.match_ireg(op1) { - let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm); - - self.backend.emit_mov_r_r(&mreg_op1, ®_op1); - } else { - unimplemented!(); - } - - // mul op2 - let op2 = &ops[op2]; - if self.match_iimm(op2) { - let imm_op2 = self.node_iimm_to_i32(op2); - - // put imm in a temporary - // here we use result reg as temporary - self.backend.emit_mov_r_imm(&res_tmp, imm_op2); - - self.backend.emit_mul_r(&res_tmp); - } else if self.match_mem(op2) { - let mem_op2 = self.emit_mem(op2, vm); - - self.backend.emit_mul_mem(&mem_op2); - } else if self.match_ireg(op2) { - let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - self.backend.emit_mul_r(®_op2); - } else { - unimplemented!(); - } - - // mov rax -> result - match res_tmp.ty.get_int_length() { - Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX), - Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX), - Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX), - Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL), - _ => unimplemented!() - } - - }, - op::BinOp::Udiv => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - self.emit_udiv(op1, op2, f_content, f_context, vm); - - // mov rax -> result - match res_tmp.ty.get_int_length() { - Some(64) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); - } - Some(32) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX); - } - Some(16) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX); - } - Some(8) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL); - } - _ => unimplemented!() - } - }, - op::BinOp::Sdiv => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - self.emit_idiv(op1, op2, f_content, f_context, vm); - - // mov rax -> result - match res_tmp.ty.get_int_length() { - Some(64) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); - } - Some(32) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX); - } - Some(16) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX); - } - Some(8) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL); - } - _ => unimplemented!() - } - }, - op::BinOp::Urem => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - self.emit_udiv(op1, op2, f_content, f_context, vm); - - // mov rdx -> result - match res_tmp.ty.get_int_length() { - Some(64) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); - } - Some(32) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX); - } - Some(16) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX); - } - Some(8) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH); - } - _ => unimplemented!() - } - }, - op::BinOp::Srem => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - self.emit_idiv(op1, op2, f_content, f_context, vm); - - // mov rdx -> result - match res_tmp.ty.get_int_length() { - Some(64) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); - } - Some(32) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX); - } - Some(16) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX); - } - Some(8) => { - self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH); - } - _ => unimplemented!() - } - }, - - op::BinOp::Shl => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - if self.match_mem(op1) { - unimplemented!() - } else if self.match_ireg(op1) { - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - - if self.match_iimm(op2) { - let imm_op2 = self.node_iimm_to_i32(op2) as i8; - - // shl op1, op2 -> op1 - self.backend.emit_shl_r_imm8(&tmp_op1, imm_op2); - - // mov op1 -> result - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - } else if self.match_ireg(op2) { - let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - // mov op2 -> cl - self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2); - - // shl op1, cl -> op1 - self.backend.emit_shl_r_cl(&tmp_op1); - - // mov op1 -> result - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - } else { - panic!("unexpected op2 (not ireg not iimm): {}", op2); - } - } else { - panic!("unexpected op1 (not ireg not mem): {}", op1); - } - }, - op::BinOp::Lshr => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - if self.match_mem(op1) { - unimplemented!() - } else if self.match_ireg(op1) { - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - - if self.match_iimm(op2) { - let imm_op2 = self.node_iimm_to_i32(op2) as i8; - - // shr op1, op2 -> op1 - self.backend.emit_shr_r_imm8(&tmp_op1, imm_op2); - - // mov op1 -> result - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - } else if self.match_ireg(op2) { - let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - // mov op2 -> cl - self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2); - - // shr op1, cl -> op1 - self.backend.emit_shr_r_cl(&tmp_op1); - - // mov op1 -> result - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - } else { - panic!("unexpected op2 (not ireg not iimm): {}", op2); - } - } else { - panic!("unexpected op1 (not ireg not mem): {}", op1); - } - }, - op::BinOp::Ashr => { - let op1 = &ops[op1]; - let op2 = &ops[op2]; - - if self.match_mem(op1) { - unimplemented!() - } else if self.match_ireg(op1) { - let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); - - if self.match_iimm(op2) { - let imm_op2 = self.node_iimm_to_i32(op2) as i8; - - // sar op1, op2 -> op1 - self.backend.emit_sar_r_imm8(&tmp_op1, imm_op2); - - // mov op1 -> result - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - } else if self.match_ireg(op2) { - let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); - - // mov op2 -> cl - self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2); - - // sar op1, cl -> op1 - self.backend.emit_sar_r_cl(&tmp_op1); - - // mov op1 -> result - self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); - } else { - panic!("unexpected op2 (not ireg not iimm): {}", op2); - } - } else { - panic!("unexpected op1 (not ireg not mem): {}", op1); - } - }, - - - // floating point - op::BinOp::FAdd => { - if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { - trace!("emit add-fpreg-mem"); - - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let mem_op2 = self.emit_mem(&ops[op2], vm); - - // mov op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // sub op2 res - self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2); - } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { - trace!("emit add-fpreg-fpreg"); - - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); - - // movsd op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // add op2 res - self.backend.emit_addsd_f64_f64(&res_tmp, ®_op2); - } else { - panic!("unexpected fadd: {}", node) - } - } - - op::BinOp::FSub => { - if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { - trace!("emit sub-fpreg-mem"); - - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let mem_op2 = self.emit_mem(&ops[op2], vm); - - // mov op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // sub op2 res - self.backend.emit_subsd_f64_mem64(&res_tmp, &mem_op2); - } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { - trace!("emit sub-fpreg-fpreg"); - - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); - - // movsd op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // sub op2 res - self.backend.emit_subsd_f64_f64(&res_tmp, ®_op2); - } else { - panic!("unexpected fsub: {}", node) - } - } - - op::BinOp::FMul => { - if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { - trace!("emit mul-fpreg-mem"); + self.emit_binop(node, inst, op, op1, op2, f_content, f_context, vm); + }, - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let mem_op2 = self.emit_mem(&ops[op2], vm); + Instruction_::BinOpWithStatus(op, status, op1, op2) => { + trace!("instsel on BINOP_STATUS"); - // mov op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // mul op2 res - self.backend.emit_mulsd_f64_mem64(&res_tmp, &mem_op2); - } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { - trace!("emit mul-fpreg-fpreg"); + self.emit_binop(node, inst, op, op1, op2, f_content, f_context, vm); - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + let values = inst.value.as_ref().unwrap(); + let mut status_value_index = 1; - // movsd op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // mul op2 res - self.backend.emit_mulsd_f64_f64(&res_tmp, ®_op2); - } else { - panic!("unexpected fmul: {}", node) - } - } + // negative flag + if status.flag_n { + let tmp_status = values[status_value_index].clone(); + status_value_index += 1; - op::BinOp::FDiv => { - if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { - trace!("emit div-fpreg-mem"); + self.backend.emit_sets_r8(&tmp_status); + } - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let mem_op2 = self.emit_mem(&ops[op2], vm); + // zero flag + if status.flag_z { + let tmp_status = values[status_value_index].clone(); + status_value_index += 1; - // mov op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // div op2 res - self.backend.emit_divsd_f64_mem64(&res_tmp, &mem_op2); - } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { - trace!("emit div-fpreg-fpreg"); + self.backend.emit_setz_r8(&tmp_status); + } - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + // unsigned overflow + if status.flag_c { + let tmp_status = values[status_value_index].clone(); + status_value_index += 1; - // movsd op1, res - self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); - // div op2 res - self.backend.emit_divsd_f64_f64(&res_tmp, ®_op2); - } else { - panic!("unexpected fdiv: {}", node) + match op { + BinOp::Add | BinOp::Sub | BinOp::Mul => { + self.backend.emit_setb_r8(&tmp_status); } + _ => panic!("Only Add/Sub/Mul has #C flag") } + } - op::BinOp::FRem => { - if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { - trace!("emit frem-fpreg-fpreg"); - - let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); - let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); - - let reg_tmp = self.get_result_value(node); + // signed overflow + if status.flag_v { + let tmp_status = values[status_value_index].clone(); - self.emit_runtime_entry(&entrypoints::FREM, vec![reg_op1.clone(), reg_op2.clone()], Some(vec![reg_tmp.clone()]), Some(node), f_content, f_context, vm); - } else { - panic!("unexpected fdiv: {}", node) + match op { + BinOp::Add | BinOp::Sub | BinOp::Mul => { + self.backend.emit_seto_r8(&tmp_status); } + _ => panic!("Only Add/Sub/Mul has #V flag") } } } @@ -1719,6 +1201,578 @@ impl <'a> InstructionSelection { }) } + fn emit_binop (&mut self, node: &TreeNode, inst: &Instruction, op: BinOp, op1: OpIndex, op2: OpIndex, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) { + let ops = inst.ops.read().unwrap(); + + let res_tmp = self.get_result_value(node); + + match op { + op::BinOp::Add => { + if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) { + trace!("emit add-ireg-imm"); + + let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.node_iimm_to_i32(&ops[op2]); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, ®_op1); + // add op2, res + self.backend.emit_add_r_imm(&res_tmp, reg_op2); + } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) { + trace!("emit add-ireg-mem"); + + let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_mem(&ops[op2], vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, ®_op1); + // add op2 res + self.backend.emit_add_r_mem(&res_tmp, ®_op2); + } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) { + trace!("emit add-ireg-ireg"); + + let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, ®_op1); + // add op2 res + self.backend.emit_add_r_r(&res_tmp, ®_op2); + } else { + unimplemented!() + } + }, + op::BinOp::Sub => { + if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) { + trace!("emit sub-ireg-imm"); + + let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); + let imm_op2 = self.node_iimm_to_i32(&ops[op2]); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, ®_op1); + // add op2, res + self.backend.emit_sub_r_imm(&res_tmp, imm_op2); + } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) { + trace!("emit sub-ireg-mem"); + + let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); + let mem_op2 = self.emit_mem(&ops[op2], vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, ®_op1); + // sub op2 res + self.backend.emit_sub_r_mem(&res_tmp, &mem_op2); + } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) { + trace!("emit sub-ireg-ireg"); + + let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, ®_op1); + // add op2 res + self.backend.emit_sub_r_r(&res_tmp, ®_op2); + } else { + unimplemented!() + } + }, + op::BinOp::And => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + if self.match_ireg(op1) && self.match_iimm(op2) { + trace!("emit and-ireg-iimm"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let imm_op2 = self.node_iimm_to_i32(op2); + + // mov op1 -> res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // and op2, res -> res + self.backend.emit_and_r_imm(&res_tmp, imm_op2); + } else if self.match_ireg(op1) && self.match_mem(op2) { + trace!("emit and-ireg-mem"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let mem_op2 = self.emit_mem(op2, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // and op2, res -> res + self.backend.emit_and_r_mem(&res_tmp, &mem_op2); + } else if self.match_ireg(op1) && self.match_ireg(op2) { + trace!("emit and-ireg-ireg"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // and op2, res -> res + self.backend.emit_and_r_r(&res_tmp, &tmp_op2); + } else { + unimplemented!() + } + }, + op::BinOp::Or => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + if self.match_ireg(op1) && self.match_iimm(op2) { + trace!("emit or-ireg-iimm"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let imm_op2 = self.node_iimm_to_i32(op2); + + // mov op1 -> res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // Or op2, res -> res + self.backend.emit_or_r_imm(&res_tmp, imm_op2); + } else if self.match_ireg(op1) && self.match_mem(op2) { + trace!("emit or-ireg-mem"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let mem_op2 = self.emit_mem(op2, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // Or op2, res -> res + self.backend.emit_or_r_mem(&res_tmp, &mem_op2); + } else if self.match_ireg(op1) && self.match_ireg(op2) { + trace!("emit or-ireg-ireg"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // Or op2, res -> res + self.backend.emit_or_r_r(&res_tmp, &tmp_op2); + } else { + unimplemented!() + } + }, + op::BinOp::Xor => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + if self.match_ireg(op1) && self.match_iimm(op2) { + trace!("emit xor-ireg-iimm"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let imm_op2 = self.node_iimm_to_i32(op2); + + // mov op1 -> res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // xor op2, res -> res + self.backend.emit_xor_r_imm(&res_tmp, imm_op2); + } else if self.match_ireg(op1) && self.match_mem(op2) { + trace!("emit xor-ireg-mem"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let mem_op2 = self.emit_mem(op2, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // xor op2, res -> res + self.backend.emit_xor_r_mem(&res_tmp, &mem_op2); + } else if self.match_ireg(op1) && self.match_ireg(op2) { + trace!("emit xor-ireg-ireg"); + + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + // mov op1, res + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + // xor op2, res -> res + self.backend.emit_xor_r_r(&res_tmp, &tmp_op2); + } else { + unimplemented!() + } + } + op::BinOp::Mul => { + // mov op1 -> rax + let op1 = &ops[op1]; + + let mreg_op1 = match op1.clone_value().ty.get_int_length() { + Some(64) => x86_64::RAX.clone(), + Some(32) => x86_64::EAX.clone(), + Some(16) => x86_64::AX.clone(), + Some(8) => x86_64::AL.clone(), + _ => unimplemented!() + }; + + if self.match_iimm(op1) { + let imm_op1 = self.node_iimm_to_i32(op1); + + self.backend.emit_mov_r_imm(&mreg_op1, imm_op1); + } else if self.match_mem(op1) { + let mem_op1 = self.emit_mem(op1, vm); + + self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1); + } else if self.match_ireg(op1) { + let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm); + + self.backend.emit_mov_r_r(&mreg_op1, ®_op1); + } else { + unimplemented!(); + } + + // mul op2 + let op2 = &ops[op2]; + if self.match_iimm(op2) { + let imm_op2 = self.node_iimm_to_i32(op2); + + // put imm in a temporary + // here we use result reg as temporary + self.backend.emit_mov_r_imm(&res_tmp, imm_op2); + + self.backend.emit_mul_r(&res_tmp); + } else if self.match_mem(op2) { + let mem_op2 = self.emit_mem(op2, vm); + + self.backend.emit_mul_mem(&mem_op2); + } else if self.match_ireg(op2) { + let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + self.backend.emit_mul_r(®_op2); + } else { + unimplemented!(); + } + + // mov rax -> result + match res_tmp.ty.get_int_length() { + Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX), + Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX), + Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX), + Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL), + _ => unimplemented!() + } + + }, + op::BinOp::Udiv => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + self.emit_udiv(op1, op2, f_content, f_context, vm); + + // mov rax -> result + match res_tmp.ty.get_int_length() { + Some(64) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); + } + Some(32) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX); + } + Some(16) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX); + } + Some(8) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL); + } + _ => unimplemented!() + } + }, + op::BinOp::Sdiv => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + self.emit_idiv(op1, op2, f_content, f_context, vm); + + // mov rax -> result + match res_tmp.ty.get_int_length() { + Some(64) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); + } + Some(32) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX); + } + Some(16) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX); + } + Some(8) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL); + } + _ => unimplemented!() + } + }, + op::BinOp::Urem => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + self.emit_udiv(op1, op2, f_content, f_context, vm); + + // mov rdx -> result + match res_tmp.ty.get_int_length() { + Some(64) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); + } + Some(32) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX); + } + Some(16) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX); + } + Some(8) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH); + } + _ => unimplemented!() + } + }, + op::BinOp::Srem => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + self.emit_idiv(op1, op2, f_content, f_context, vm); + + // mov rdx -> result + match res_tmp.ty.get_int_length() { + Some(64) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); + } + Some(32) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX); + } + Some(16) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX); + } + Some(8) => { + self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH); + } + _ => unimplemented!() + } + }, + + op::BinOp::Shl => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + if self.match_mem(op1) { + unimplemented!() + } else if self.match_ireg(op1) { + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + + if self.match_iimm(op2) { + let imm_op2 = self.node_iimm_to_i32(op2) as i8; + + // shl op1, op2 -> op1 + self.backend.emit_shl_r_imm8(&tmp_op1, imm_op2); + + // mov op1 -> result + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + } else if self.match_ireg(op2) { + let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + // mov op2 -> cl + self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2); + + // shl op1, cl -> op1 + self.backend.emit_shl_r_cl(&tmp_op1); + + // mov op1 -> result + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + } else { + panic!("unexpected op2 (not ireg not iimm): {}", op2); + } + } else { + panic!("unexpected op1 (not ireg not mem): {}", op1); + } + }, + op::BinOp::Lshr => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + if self.match_mem(op1) { + unimplemented!() + } else if self.match_ireg(op1) { + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + + if self.match_iimm(op2) { + let imm_op2 = self.node_iimm_to_i32(op2) as i8; + + // shr op1, op2 -> op1 + self.backend.emit_shr_r_imm8(&tmp_op1, imm_op2); + + // mov op1 -> result + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + } else if self.match_ireg(op2) { + let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + // mov op2 -> cl + self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2); + + // shr op1, cl -> op1 + self.backend.emit_shr_r_cl(&tmp_op1); + + // mov op1 -> result + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + } else { + panic!("unexpected op2 (not ireg not iimm): {}", op2); + } + } else { + panic!("unexpected op1 (not ireg not mem): {}", op1); + } + }, + op::BinOp::Ashr => { + let op1 = &ops[op1]; + let op2 = &ops[op2]; + + if self.match_mem(op1) { + unimplemented!() + } else if self.match_ireg(op1) { + let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm); + + if self.match_iimm(op2) { + let imm_op2 = self.node_iimm_to_i32(op2) as i8; + + // sar op1, op2 -> op1 + self.backend.emit_sar_r_imm8(&tmp_op1, imm_op2); + + // mov op1 -> result + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + } else if self.match_ireg(op2) { + let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm); + + // mov op2 -> cl + self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2); + + // sar op1, cl -> op1 + self.backend.emit_sar_r_cl(&tmp_op1); + + // mov op1 -> result + self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); + } else { + panic!("unexpected op2 (not ireg not iimm): {}", op2); + } + } else { + panic!("unexpected op1 (not ireg not mem): {}", op1); + } + }, + + + // floating point + op::BinOp::FAdd => { + if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { + trace!("emit add-fpreg-mem"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let mem_op2 = self.emit_mem(&ops[op2], vm); + + // mov op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // sub op2 res + self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2); + } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { + trace!("emit add-fpreg-fpreg"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + + // movsd op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // add op2 res + self.backend.emit_addsd_f64_f64(&res_tmp, ®_op2); + } else { + panic!("unexpected fadd: {}", node) + } + } + + op::BinOp::FSub => { + if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { + trace!("emit sub-fpreg-mem"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let mem_op2 = self.emit_mem(&ops[op2], vm); + + // mov op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // sub op2 res + self.backend.emit_subsd_f64_mem64(&res_tmp, &mem_op2); + } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { + trace!("emit sub-fpreg-fpreg"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + + // movsd op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // sub op2 res + self.backend.emit_subsd_f64_f64(&res_tmp, ®_op2); + } else { + panic!("unexpected fsub: {}", node) + } + } + + op::BinOp::FMul => { + if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { + trace!("emit mul-fpreg-mem"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let mem_op2 = self.emit_mem(&ops[op2], vm); + + // mov op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // mul op2 res + self.backend.emit_mulsd_f64_mem64(&res_tmp, &mem_op2); + } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { + trace!("emit mul-fpreg-fpreg"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + + // movsd op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // mul op2 res + self.backend.emit_mulsd_f64_f64(&res_tmp, ®_op2); + } else { + panic!("unexpected fmul: {}", node) + } + } + + op::BinOp::FDiv => { + if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) { + trace!("emit div-fpreg-mem"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let mem_op2 = self.emit_mem(&ops[op2], vm); + + // mov op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // div op2 res + self.backend.emit_divsd_f64_mem64(&res_tmp, &mem_op2); + } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { + trace!("emit div-fpreg-fpreg"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + + // movsd op1, res + self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1); + // div op2 res + self.backend.emit_divsd_f64_f64(&res_tmp, ®_op2); + } else { + panic!("unexpected fdiv: {}", node) + } + } + + op::BinOp::FRem => { + if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) { + trace!("emit frem-fpreg-fpreg"); + + let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm); + let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm); + + let reg_tmp = self.get_result_value(node); + + self.emit_runtime_entry(&entrypoints::FREM, vec![reg_op1.clone(), reg_op2.clone()], Some(vec![reg_tmp.clone()]), Some(node), f_content, f_context, vm); + } else { + panic!("unexpected fdiv: {}", node) + } + } + } + } + fn emit_alloc_sequence (&mut self, tmp_allocator: P, size: P, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P { if size.is_int_const() { // size known at compile time, we can choose to emit alloc_small or large now @@ -3329,12 +3383,12 @@ impl <'a> InstructionSelection { match node.v { TreeNode_::Instruction(ref inst) => { if inst.value.is_some() { + let ref value = inst.value.as_ref().unwrap()[0]; + if inst.value.as_ref().unwrap().len() > 1 { - panic!("expected ONE result from the node {}", node); + warn!("retrieving value from a node with more than one value: {}, use the first value: {}", node, value); } - let ref value = inst.value.as_ref().unwrap()[0]; - value.clone() } else { panic!("expected result from the node {}", node); diff --git a/src/compiler/backend/arch/x86_64/mod.rs b/src/compiler/backend/arch/x86_64/mod.rs index 96ac2eb2df9dd0595a4e6142815bc8285391843c..146340f4fe581a101c5f68f1f7836933cbd3432b 100644 --- a/src/compiler/backend/arch/x86_64/mod.rs +++ b/src/compiler/backend/arch/x86_64/mod.rs @@ -480,6 +480,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize { match inst.v { // simple BinOp(_, _, _) => 1, + BinOpWithStatus(_, _, _, _) => 2, CmpOp(_, _, _) => 1, ConvOp{..} => 0, diff --git a/src/vm/api/api_impl/muirbuilder.rs b/src/vm/api/api_impl/muirbuilder.rs index 305fcb2aa3d25686d9e22a2db88a45dd3e46c860..5e49a53efd9b211dc163891c7ef3839c088853a2 100644 --- a/src/vm/api/api_impl/muirbuilder.rs +++ b/src/vm/api/api_impl/muirbuilder.rs @@ -308,7 +308,11 @@ impl MuIRBuilder { } pub fn new_binop_with_status(&mut self, id: MuID, result_id: MuID, status_result_ids: Vec, optr: CMuBinOptr, status_flags: CMuBinOpStatus, ty: MuID, opnd1: MuID, opnd2: MuID, exc_clause: Option) { - panic!("Not implemented") + self.add_inst(id, NodeInst::NodeBinOp { + id: id, result_id: result_id, status_result_ids: status_result_ids, + optr: optr, flags: status_flags, ty: ty, opnd1: opnd1, opnd2: opnd2, + exc_clause: exc_clause + }) } pub fn new_cmp(&mut self, id: MuID, result_id: MuID, optr: CMuCmpOptr, ty: MuID, opnd1: MuID, opnd2: MuID) { diff --git a/tests/ir_macros.rs b/tests/ir_macros.rs index 20053e39e1c5cc376eca833597650c56222de22e..aa472ac86830afb6c4882f1aaef4d404306c2ad1 100644 --- a/tests/ir_macros.rs +++ b/tests/ir_macros.rs @@ -247,6 +247,16 @@ macro_rules! inst { }); }; + // BINOP with status + (($vm: expr, $fv: ident) $name: ident: $value: ident, $($flag: ident), * = BINOP_STATUS ($op: expr) ($flags: expr) $op1: ident $op2: ident) => { + let $name = $fv.new_inst(Instruction{ + hdr: MuEntityHeader::unnamed($vm.next_id()), + value: Some(vec![$value.clone_value(), $($flag.clone_value()), *]), + ops: RwLock::new(vec![$op1.clone(), $op2.clone()]), + v: Instruction_::BinOpWithStatus($op, $flags, 0, 1) + }); + }; + // CMPOP (($vm: expr, $fv: ident) $name: ident: $value: ident = CMPOP ($op: expr) $op1: ident $op2: ident) => { let $name = $fv.new_inst(Instruction{ diff --git a/tests/test_compiler/test_binop.rs b/tests/test_compiler/test_binop.rs index fe58e0bceb2961b165df4b7b3e0597f69f6fce65..8f53f834536db40157dc8d8505cd0fab5f4b2c72 100644 --- a/tests/test_compiler/test_binop.rs +++ b/tests/test_compiler/test_binop.rs @@ -353,3 +353,314 @@ fn lshr() -> VM { vm } + +#[test] +fn test_add_int64_n() { + let lib = testutil::compile_fnc("add_int64_n", &add_int64_n); + + unsafe { + let add_int64_n : libloading::Symbol u8> = lib.get(b"add_int64_n").unwrap(); + + let flag = add_int64_n(1, 1); + println!("add_int64_n(1, 1), #N = {}", flag); + assert!(flag == 0); + + let flag = add_int64_n(1, -2); + println!("add_int64_n(1, -2), #N = {}", flag); + assert!(flag == 1); + + let flag = add_int64_n(1, -1); + println!("add_int64_n(1, -1), #N = {}", flag); + assert!(flag == 0); + + let flag = add_int64_n(-1, -1); + println!("add_int64_n(-1, -1), #N = {}", flag); + assert!(flag == 1); + } +} + +fn add_int64_n() -> VM { + let vm = VM::new(); + + typedef! ((vm) int64 = mu_int(64)); + typedef! ((vm) int1 = mu_int(1)); + + funcsig! ((vm) sig = (int64, int64) -> (int1)); + funcdecl! ((vm) add_int64_n); + funcdef! ((vm) add_int64_n VERSION add_int64_n_v1); + + block! ((vm, add_int64_n_v1) blk_entry); + ssa! ((vm, add_int64_n_v1) a); + ssa! ((vm, add_int64_n_v1) b); + + // (sum, flag_n) = Add #N %a %b + ssa! ((vm, add_int64_n_v1) sum); + ssa! ((vm, add_int64_n_v1) flag_n); + inst! ((vm, add_int64_n_v1) blk_entry_add: + sum, flag_n = BINOP_STATUS (BinOp::Add) (BinOpStatus::n()) a b + ); + + inst! ((vm, add_int64_n_v1) blk_entry_ret: + RET (flag_n) + ); + + define_block! ((vm, add_int64_n_v1) blk_entry(a, b) { + blk_entry_add, blk_entry_ret + }); + + define_func_ver!((vm) add_int64_n_v1 (entry: blk_entry) {blk_entry}); + + vm +} + +#[test] +fn test_add_int64_z() { + let lib = testutil::compile_fnc("add_int64_z", &add_int64_z); + + unsafe { + let add_int64_z : libloading::Symbol u8> = lib.get(b"add_int64_z").unwrap(); + + let flag = add_int64_z(1, 1); + println!("add_int64_z(1, 1), #Z = {}", flag); + assert!(flag == 0); + + let flag = add_int64_z(1, -2); + println!("add_int64_z(1, -2), #Z = {}", flag); + assert!(flag == 0); + + let flag = add_int64_z(1, -1); + println!("add_int64_z(1, -1), #Z = {}", flag); + assert!(flag == 1); + } +} + +fn add_int64_z() -> VM { + let vm = VM::new(); + + typedef! ((vm) int64 = mu_int(64)); + typedef! ((vm) int1 = mu_int(1)); + + funcsig! ((vm) sig = (int64, int64) -> (int1)); + funcdecl! ((vm) add_int64_z); + funcdef! ((vm) add_int64_z VERSION add_int64_z_v1); + + block! ((vm, add_int64_z_v1) blk_entry); + ssa! ((vm, add_int64_z_v1) a); + ssa! ((vm, add_int64_z_v1) b); + + // (sum, flag_n) = Add #N %a %b + ssa! ((vm, add_int64_z_v1) sum); + ssa! ((vm, add_int64_z_v1) flag_z); + inst! ((vm, add_int64_z_v1) blk_entry_add: + sum, flag_z = BINOP_STATUS (BinOp::Add) (BinOpStatus::z()) a b + ); + + inst! ((vm, add_int64_z_v1) blk_entry_ret: + RET (flag_z) + ); + + define_block! ((vm, add_int64_z_v1) blk_entry(a, b) { + blk_entry_add, blk_entry_ret + }); + + define_func_ver!((vm) add_int64_z_v1 (entry: blk_entry) {blk_entry}); + + vm +} + +#[test] +fn test_add_int64_c() { + use std::u64; + + let lib = testutil::compile_fnc("add_int64_c", &add_int64_c); + + unsafe { + let add_int64_c : libloading::Symbol u8> = lib.get(b"add_int64_c").unwrap(); + + let flag = add_int64_c(u64::MAX, 1); + println!("add_int64_c(u64::MAX, 1), #C = {}", flag); + assert!(flag == 1); + + let flag = add_int64_c(u64::MAX, 0); + println!("add_int64_c(i64::MAX, 0), #C = {}", flag); + assert!(flag == 0); + } +} + +fn add_int64_c() -> VM { + let vm = VM::new(); + + typedef! ((vm) int64 = mu_int(64)); + typedef! ((vm) int1 = mu_int(1)); + + funcsig! ((vm) sig = (int64, int64) -> (int1)); + funcdecl! ((vm) add_int64_c); + funcdef! ((vm) add_int64_c VERSION add_int64_c_v1); + + block! ((vm, add_int64_c_v1) blk_entry); + ssa! ((vm, add_int64_c_v1) a); + ssa! ((vm, add_int64_c_v1) b); + + // (sum, flag_n) = Add #N %a %b + ssa! ((vm, add_int64_c_v1) sum); + ssa! ((vm, add_int64_c_v1) flag_c); + inst! ((vm, add_int64_c_v1) blk_entry_add: + sum, flag_c = BINOP_STATUS (BinOp::Add) (BinOpStatus::c()) a b + ); + + inst! ((vm, add_int64_c_v1) blk_entry_ret: + RET (flag_c) + ); + + define_block! ((vm, add_int64_c_v1) blk_entry(a, b) { + blk_entry_add, blk_entry_ret + }); + + define_func_ver!((vm) add_int64_c_v1 (entry: blk_entry) {blk_entry}); + + vm +} + +#[test] +fn test_add_int64_v() { + use std::i64; + + let lib = testutil::compile_fnc("add_int64_v", &add_int64_v); + + unsafe { + let add_int64_v : libloading::Symbol u8> = lib.get(b"add_int64_v").unwrap(); + + let flag = add_int64_v(i64::MAX, 1); + println!("add_int64_v(i64::MAX, 1), #V = {}", flag); + assert!(flag == 1); + + let flag = add_int64_v(i64::MAX, 0); + println!("add_int64_v(i64::MAX, 0), #V = {}", flag); + assert!(flag == 0); + + let flag = add_int64_v(i64::MIN, 0); + println!("add_int64_v(i64::MIN, 0), #V = {}", flag); + assert!(flag == 0); + + let flag = add_int64_v(i64::MIN, -1); + println!("add_int64_v(i64::MIN, -1), #V = {}", flag); + assert!(flag == 1); + } +} + +fn add_int64_v() -> VM { + let vm = VM::new(); + + typedef! ((vm) int64 = mu_int(64)); + typedef! ((vm) int1 = mu_int(1)); + + funcsig! ((vm) sig = (int64, int64) -> (int1)); + funcdecl! ((vm) add_int64_v); + funcdef! ((vm) add_int64_v VERSION add_int64_v_v1); + + block! ((vm, add_int64_v_v1) blk_entry); + ssa! ((vm, add_int64_v_v1) a); + ssa! ((vm, add_int64_v_v1) b); + + // (sum, flag_n) = Add #N %a %b + ssa! ((vm, add_int64_v_v1) sum); + ssa! ((vm, add_int64_v_v1) flag_v); + inst! ((vm, add_int64_v_v1) blk_entry_add: + sum, flag_v = BINOP_STATUS (BinOp::Add) (BinOpStatus::v()) a b + ); + + inst! ((vm, add_int64_v_v1) blk_entry_ret: + RET (flag_v) + ); + + define_block! ((vm, add_int64_v_v1) blk_entry(a, b) { + blk_entry_add, blk_entry_ret + }); + + define_func_ver!((vm) add_int64_v_v1 (entry: blk_entry) {blk_entry}); + + vm +} + +#[test] +fn test_add_int64_nzc() { + use std::u64; + + let lib = testutil::compile_fnc("add_int64_nzc", &add_int64_nzc); + + unsafe { + let add_int64_nzc : libloading::Symbol u8> = lib.get(b"add_int64_nzc").unwrap(); + + let flag = add_int64_nzc(u64::MAX, 1); + println!("add_int64_nzc(u64::MAX, 1), #C = {:b}", flag); + assert!(flag == 0b110); + + let flag = add_int64_nzc(u64::MAX, 0); + println!("add_int64_nzc(u64::MAX, 0), #C = {:b}", flag); + assert!(flag == 0b001); + } +} + +fn add_int64_nzc() -> VM { + let vm = VM::new(); + + typedef! ((vm) int64 = mu_int(64)); + typedef! ((vm) int8 = mu_int(8)); + typedef! ((vm) int1 = mu_int(1)); + + constdef! ((vm) int8_1 = Constant::Int(1)); + constdef! ((vm) int8_2 = Constant::Int(2)); + constdef! ((vm) int8_3 = Constant::Int(3)); + + funcsig! ((vm) sig = (int64, int64) -> (int1)); + funcdecl! ((vm) add_int64_nzc); + funcdef! ((vm) add_int64_nzc VERSION add_int64_nzc_v1); + + block! ((vm, add_int64_nzc_v1) blk_entry); + ssa! ((vm, add_int64_nzc_v1) a); + ssa! ((vm, add_int64_nzc_v1) b); + + // (sum, flag_n) = Add #N %a %b + ssa! ((vm, add_int64_nzc_v1) sum); + ssa! ((vm, add_int64_nzc_v1) flag_n); + ssa! ((vm, add_int64_nzc_v1) flag_z); + ssa! ((vm, add_int64_nzc_v1) flag_c); + + inst! ((vm, add_int64_nzc_v1) blk_entry_add: + sum, flag_n, flag_z, flag_c = BINOP_STATUS (BinOp::Add) (BinOpStatus{flag_n: true, flag_z: true, flag_c: true, flag_v: false}) a b + ); + + ssa! ((vm, add_int64_nzc_v1) shift_z); + consta! ((vm, add_int64_nzc_v1) int8_1_local = int8_1); + inst! ((vm, add_int64_nzc_v1) blk_entry_shift_z: + shift_z = BINOP (BinOp::Shl) flag_z int8_1_local + ); + + ssa! ((vm, add_int64_nzc_v1) ret); + inst! ((vm, add_int64_nzc_v1) blk_entry_add_ret1: + ret = BINOP (BinOp::Add) flag_n shift_z + ); + + ssa! ((vm, add_int64_nzc_v1) shift_c); + consta! ((vm, add_int64_nzc_v1) int8_2_local = int8_2); + inst! ((vm, add_int64_nzc_v1) blk_entry_shift_c: + shift_c = BINOP (BinOp::Shl) flag_c int8_2_local + ); + + ssa! ((vm, add_int64_nzc_v1) ret2); + inst! ((vm, add_int64_nzc_v1) blk_entry_add_ret2: + ret2 = BINOP (BinOp::Add) ret shift_c + ); + + inst! ((vm, add_int64_nzc_v1) blk_entry_ret: + RET (ret2) + ); + + define_block! ((vm, add_int64_nzc_v1) blk_entry(a, b) { + blk_entry_add, blk_entry_shift_z, blk_entry_add_ret1, blk_entry_shift_c, blk_entry_add_ret2, blk_entry_ret + }); + + define_func_ver!((vm) add_int64_nzc_v1 (entry: blk_entry) {blk_entry}); + + vm +} \ No newline at end of file