diff --git a/src/ast/src/inst.rs b/src/ast/src/inst.rs index a39b2177255f99cb9cebac318446b85783afbf26..97d266f1cd0c0daa6f8474b9ae43b473e3455560 100644 --- a/src/ast/src/inst.rs +++ b/src/ast/src/inst.rs @@ -92,6 +92,8 @@ pub enum Instruction_ { // expressions BinOp(BinOp, OpIndex, OpIndex), + BinOpWithStatus(BinOp, BinOpStatus, OpIndex, OpIndex), + CmpOp(CmpOp, OpIndex, OpIndex), ConvOp{ operation: ConvOp, @@ -282,6 +284,9 @@ impl Instruction_ { fn debug_str(&self, ops: &Vec
>) -> String {
match self {
&Instruction_::BinOp(op, op1, op2) => format!("{:?} {} {}", op, ops[op1], ops[op2]),
+ &Instruction_::BinOpWithStatus(op, status, op1, op2) => {
+ format!("{:?} {:?} {} {}", op, status, ops[op1], ops[op2])
+ }
&Instruction_::CmpOp(op, op1, op2) => format!("{:?} {} {}", op, ops[op1], ops[op2]),
&Instruction_::ConvOp{operation, ref from_ty, ref to_ty, operand} => {
format!("{:?} {} {} {}", operation, from_ty, to_ty, ops[operand])
@@ -402,6 +407,50 @@ impl Instruction_ {
}
}
+#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
+pub struct BinOpStatus {
+ pub flag_n: bool,
+ pub flag_z: bool,
+ pub flag_c: bool,
+ pub flag_v: bool
+}
+
+impl BinOpStatus {
+ pub fn n() -> BinOpStatus {
+ BinOpStatus {flag_n: true, flag_z: false, flag_c: false, flag_v: false}
+ }
+
+ pub fn z() -> BinOpStatus {
+ BinOpStatus {flag_n: false, flag_z: true, flag_c: false, flag_v: false}
+ }
+
+ pub fn c() -> BinOpStatus {
+ BinOpStatus {flag_n: false, flag_z: false, flag_c: true, flag_v: false}
+ }
+
+ pub fn v() -> BinOpStatus {
+ BinOpStatus {flag_n: false, flag_z: false, flag_c: false, flag_v: true}
+ }
+}
+
+impl fmt::Debug for BinOpStatus {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if self.flag_n {
+ write!(f, "#N").unwrap();
+ }
+ if self.flag_z {
+ write!(f, "#Z").unwrap();
+ }
+ if self.flag_c {
+ write!(f, "#C").unwrap();
+ }
+ if self.flag_v {
+ write!(f, "#V").unwrap();
+ }
+ Ok(())
+ }
+}
+
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum MemoryOrder {
NotAtomic,
diff --git a/src/ast/src/ir_semantics.rs b/src/ast/src/ir_semantics.rs
index d8dc921573a9c3e1f7421b5d02bd3cff84d3176c..a0967b6e252ada14cfd2f6835af6a7851018c140 100644
--- a/src/ast/src/ir_semantics.rs
+++ b/src/ast/src/ir_semantics.rs
@@ -3,7 +3,8 @@ use inst::Instruction_::*;
pub fn is_terminal_inst(inst: &Instruction_) -> bool {
match inst {
- &BinOp(_, _, _)
+ &BinOp(_, _, _)
+ | &BinOpWithStatus(_, _, _, _)
| &CmpOp(_, _, _)
| &ConvOp{..}
| &ExprCall{..}
@@ -56,6 +57,7 @@ pub fn is_non_terminal_inst(inst: &Instruction_) -> bool {
pub fn has_side_effect(inst: &Instruction_) -> bool {
match inst {
&BinOp(_, _, _) => false,
+ &BinOpWithStatus(_, _, _, _) => false,
&CmpOp(_, _, _) => false,
&ConvOp{..} => false,
&ExprCall{..} => true,
diff --git a/src/ast/src/op.rs b/src/ast/src/op.rs
index f7aeeb8a49b160efe5b4de90189ec7a585534dd0..bfce4d6cd15413134c460dfd1784d2723c465d0d 100644
--- a/src/ast/src/op.rs
+++ b/src/ast/src/op.rs
@@ -36,6 +36,7 @@ pub enum OpCode {
// expression
Binary(BinOp),
+ BinaryWithStatus(BinOp),
Comparison(CmpOp),
Conversion(ConvOp),
AtomicRMW(AtomicRMWOp),
@@ -253,6 +254,7 @@ pub fn is_int_cmp(op: CmpOp) -> bool {
pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
match inst.v {
Instruction_::BinOp(op, _, _) => OpCode::Binary(op),
+ Instruction_::BinOpWithStatus(op, _, _, _) => OpCode::BinaryWithStatus(op),
Instruction_::CmpOp(op, _, _) => OpCode::Comparison(op),
Instruction_::ConvOp{operation, ..} => OpCode::Conversion(operation),
Instruction_::AtomicRMW{op, ..} => OpCode::AtomicRMW(op),
diff --git a/src/compiler/backend/arch/x86_64/asm_backend.rs b/src/compiler/backend/arch/x86_64/asm_backend.rs
index b2aa59bcfdd12eda934277cb857c753d77edd4bf..e67bd9411893b52f29eb1340fd0dd164997b7f2d 100644
--- a/src/compiler/backend/arch/x86_64/asm_backend.rs
+++ b/src/compiler/backend/arch/x86_64/asm_backend.rs
@@ -1891,6 +1891,75 @@ impl CodeGenerator for ASMCodeGen {
)
}
+ // set byte
+ fn emit_sets_r8(&mut self, dest: Reg) {
+ trace!("emit: sets {}", dest);
+
+ let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
+
+ let asm = format!("sets {}", reg);
+
+ self.add_asm_inst(
+ asm,
+ linked_hashmap!{
+ id => vec![loc]
+ },
+ linked_hashmap!{},
+ false
+ )
+ }
+
+ fn emit_setz_r8(&mut self, dest: Reg) {
+ trace!("emit: setz {}", dest);
+
+ let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
+
+ let asm = format!("setz {}", reg);
+
+ self.add_asm_inst(
+ asm,
+ linked_hashmap!{
+ id => vec![loc]
+ },
+ linked_hashmap!{},
+ false
+ )
+ }
+
+ fn emit_seto_r8(&mut self, dest: Reg) {
+ trace!("emit: seto {}", dest);
+
+ let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
+
+ let asm = format!("seto {}", reg);
+
+ self.add_asm_inst(
+ asm,
+ linked_hashmap!{
+ id => vec![loc]
+ },
+ linked_hashmap!{},
+ false
+ )
+ }
+
+ fn emit_setb_r8(&mut self, dest: Reg) {
+ trace!("emit: setb {}", dest);
+
+ let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
+
+ let asm = format!("setb {}", reg);
+
+ self.add_asm_inst(
+ asm,
+ linked_hashmap!{
+ id => vec![loc]
+ },
+ linked_hashmap!{},
+ false
+ )
+ }
+
// cmov src -> dest
// binop op1, op2 (op2 is destination)
diff --git a/src/compiler/backend/arch/x86_64/codegen.rs b/src/compiler/backend/arch/x86_64/codegen.rs
index 8e92548b63a2941027f74cc16e36c5eb676ba614..6bd49afe58b344db34ae3792ebc4e75c2c9c18d4 100644
--- a/src/compiler/backend/arch/x86_64/codegen.rs
+++ b/src/compiler/backend/arch/x86_64/codegen.rs
@@ -48,6 +48,12 @@ pub trait CodeGenerator {
fn emit_movs_r_r (&mut self, dest: Reg, src: Reg);
fn emit_movz_r_r (&mut self, dest: Reg, src: Reg);
+ // set byte
+ fn emit_sets_r8 (&mut self, dest: Reg);
+ fn emit_setz_r8 (&mut self, dest: Reg);
+ fn emit_seto_r8 (&mut self, dest: Reg);
+ fn emit_setb_r8 (&mut self, dest: Reg);
+
// gpr conditional move
fn emit_cmova_r_r (&mut self, dest: Reg, src: Reg);
diff --git a/src/compiler/backend/arch/x86_64/inst_sel.rs b/src/compiler/backend/arch/x86_64/inst_sel.rs
index 132143a6db7405a7f8e6a0650e44e60322063b9f..5606865f5180d012e18a30dd881057c82876b9a9 100644
--- a/src/compiler/backend/arch/x86_64/inst_sel.rs
+++ b/src/compiler/backend/arch/x86_64/inst_sel.rs
@@ -2,7 +2,7 @@ use ast::ir::*;
use ast::ptr::*;
use ast::inst::*;
use ast::op;
-use ast::op::OpCode;
+use ast::op::*;
use ast::types;
use ast::types::*;
use vm::VM;
@@ -584,573 +584,55 @@ impl <'a> InstructionSelection {
Instruction_::BinOp(op, op1, op2) => {
trace!("instsel on BINOP");
- let ops = inst.ops.read().unwrap();
-
- let res_tmp = self.get_result_value(node);
-
- match op {
- op::BinOp::Add => {
- if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
- trace!("emit add-ireg-imm");
-
- let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, ®_op1);
- // add op2, res
- self.backend.emit_add_r_imm(&res_tmp, reg_op2);
- } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
- trace!("emit add-ireg-mem");
-
- let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_mem(&ops[op2], vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, ®_op1);
- // add op2 res
- self.backend.emit_add_r_mem(&res_tmp, ®_op2);
- } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
- trace!("emit add-ireg-ireg");
-
- let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, ®_op1);
- // add op2 res
- self.backend.emit_add_r_r(&res_tmp, ®_op2);
- } else {
- unimplemented!()
- }
- },
- op::BinOp::Sub => {
- if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
- trace!("emit sub-ireg-imm");
-
- let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
- let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, ®_op1);
- // add op2, res
- self.backend.emit_sub_r_imm(&res_tmp, imm_op2);
- } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
- trace!("emit sub-ireg-mem");
-
- let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
- let mem_op2 = self.emit_mem(&ops[op2], vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, ®_op1);
- // sub op2 res
- self.backend.emit_sub_r_mem(&res_tmp, &mem_op2);
- } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
- trace!("emit sub-ireg-ireg");
-
- let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, ®_op1);
- // add op2 res
- self.backend.emit_sub_r_r(&res_tmp, ®_op2);
- } else {
- unimplemented!()
- }
- },
- op::BinOp::And => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- if self.match_ireg(op1) && self.match_iimm(op2) {
- trace!("emit and-ireg-iimm");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let imm_op2 = self.node_iimm_to_i32(op2);
-
- // mov op1 -> res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // and op2, res -> res
- self.backend.emit_and_r_imm(&res_tmp, imm_op2);
- } else if self.match_ireg(op1) && self.match_mem(op2) {
- trace!("emit and-ireg-mem");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let mem_op2 = self.emit_mem(op2, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // and op2, res -> res
- self.backend.emit_and_r_mem(&res_tmp, &mem_op2);
- } else if self.match_ireg(op1) && self.match_ireg(op2) {
- trace!("emit and-ireg-ireg");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // and op2, res -> res
- self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
- } else {
- unimplemented!()
- }
- },
- op::BinOp::Or => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- if self.match_ireg(op1) && self.match_iimm(op2) {
- trace!("emit or-ireg-iimm");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let imm_op2 = self.node_iimm_to_i32(op2);
-
- // mov op1 -> res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // Or op2, res -> res
- self.backend.emit_or_r_imm(&res_tmp, imm_op2);
- } else if self.match_ireg(op1) && self.match_mem(op2) {
- trace!("emit or-ireg-mem");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let mem_op2 = self.emit_mem(op2, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // Or op2, res -> res
- self.backend.emit_or_r_mem(&res_tmp, &mem_op2);
- } else if self.match_ireg(op1) && self.match_ireg(op2) {
- trace!("emit or-ireg-ireg");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // Or op2, res -> res
- self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
- } else {
- unimplemented!()
- }
- },
- op::BinOp::Xor => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- if self.match_ireg(op1) && self.match_iimm(op2) {
- trace!("emit xor-ireg-iimm");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let imm_op2 = self.node_iimm_to_i32(op2);
-
- // mov op1 -> res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // xor op2, res -> res
- self.backend.emit_xor_r_imm(&res_tmp, imm_op2);
- } else if self.match_ireg(op1) && self.match_mem(op2) {
- trace!("emit xor-ireg-mem");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let mem_op2 = self.emit_mem(op2, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // xor op2, res -> res
- self.backend.emit_xor_r_mem(&res_tmp, &mem_op2);
- } else if self.match_ireg(op1) && self.match_ireg(op2) {
- trace!("emit xor-ireg-ireg");
-
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
- let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- // mov op1, res
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- // xor op2, res -> res
- self.backend.emit_xor_r_r(&res_tmp, &tmp_op2);
- } else {
- unimplemented!()
- }
- }
- op::BinOp::Mul => {
- // mov op1 -> rax
- let op1 = &ops[op1];
-
- let mreg_op1 = match op1.clone_value().ty.get_int_length() {
- Some(64) => x86_64::RAX.clone(),
- Some(32) => x86_64::EAX.clone(),
- Some(16) => x86_64::AX.clone(),
- Some(8) => x86_64::AL.clone(),
- _ => unimplemented!()
- };
-
- if self.match_iimm(op1) {
- let imm_op1 = self.node_iimm_to_i32(op1);
-
- self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
- } else if self.match_mem(op1) {
- let mem_op1 = self.emit_mem(op1, vm);
-
- self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
- } else if self.match_ireg(op1) {
- let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
-
- self.backend.emit_mov_r_r(&mreg_op1, ®_op1);
- } else {
- unimplemented!();
- }
-
- // mul op2
- let op2 = &ops[op2];
- if self.match_iimm(op2) {
- let imm_op2 = self.node_iimm_to_i32(op2);
-
- // put imm in a temporary
- // here we use result reg as temporary
- self.backend.emit_mov_r_imm(&res_tmp, imm_op2);
-
- self.backend.emit_mul_r(&res_tmp);
- } else if self.match_mem(op2) {
- let mem_op2 = self.emit_mem(op2, vm);
-
- self.backend.emit_mul_mem(&mem_op2);
- } else if self.match_ireg(op2) {
- let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- self.backend.emit_mul_r(®_op2);
- } else {
- unimplemented!();
- }
-
- // mov rax -> result
- match res_tmp.ty.get_int_length() {
- Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
- Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
- Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
- Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
- _ => unimplemented!()
- }
-
- },
- op::BinOp::Udiv => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- self.emit_udiv(op1, op2, f_content, f_context, vm);
-
- // mov rax -> result
- match res_tmp.ty.get_int_length() {
- Some(64) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
- }
- Some(32) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
- }
- Some(16) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
- }
- Some(8) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
- }
- _ => unimplemented!()
- }
- },
- op::BinOp::Sdiv => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- self.emit_idiv(op1, op2, f_content, f_context, vm);
-
- // mov rax -> result
- match res_tmp.ty.get_int_length() {
- Some(64) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
- }
- Some(32) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
- }
- Some(16) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
- }
- Some(8) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
- }
- _ => unimplemented!()
- }
- },
- op::BinOp::Urem => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- self.emit_udiv(op1, op2, f_content, f_context, vm);
-
- // mov rdx -> result
- match res_tmp.ty.get_int_length() {
- Some(64) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
- }
- Some(32) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
- }
- Some(16) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
- }
- Some(8) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
- }
- _ => unimplemented!()
- }
- },
- op::BinOp::Srem => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- self.emit_idiv(op1, op2, f_content, f_context, vm);
-
- // mov rdx -> result
- match res_tmp.ty.get_int_length() {
- Some(64) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
- }
- Some(32) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
- }
- Some(16) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
- }
- Some(8) => {
- self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
- }
- _ => unimplemented!()
- }
- },
-
- op::BinOp::Shl => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- if self.match_mem(op1) {
- unimplemented!()
- } else if self.match_ireg(op1) {
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
-
- if self.match_iimm(op2) {
- let imm_op2 = self.node_iimm_to_i32(op2) as i8;
-
- // shl op1, op2 -> op1
- self.backend.emit_shl_r_imm8(&tmp_op1, imm_op2);
-
- // mov op1 -> result
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- } else if self.match_ireg(op2) {
- let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- // mov op2 -> cl
- self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
-
- // shl op1, cl -> op1
- self.backend.emit_shl_r_cl(&tmp_op1);
-
- // mov op1 -> result
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- } else {
- panic!("unexpected op2 (not ireg not iimm): {}", op2);
- }
- } else {
- panic!("unexpected op1 (not ireg not mem): {}", op1);
- }
- },
- op::BinOp::Lshr => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- if self.match_mem(op1) {
- unimplemented!()
- } else if self.match_ireg(op1) {
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
-
- if self.match_iimm(op2) {
- let imm_op2 = self.node_iimm_to_i32(op2) as i8;
-
- // shr op1, op2 -> op1
- self.backend.emit_shr_r_imm8(&tmp_op1, imm_op2);
-
- // mov op1 -> result
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- } else if self.match_ireg(op2) {
- let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- // mov op2 -> cl
- self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
-
- // shr op1, cl -> op1
- self.backend.emit_shr_r_cl(&tmp_op1);
-
- // mov op1 -> result
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- } else {
- panic!("unexpected op2 (not ireg not iimm): {}", op2);
- }
- } else {
- panic!("unexpected op1 (not ireg not mem): {}", op1);
- }
- },
- op::BinOp::Ashr => {
- let op1 = &ops[op1];
- let op2 = &ops[op2];
-
- if self.match_mem(op1) {
- unimplemented!()
- } else if self.match_ireg(op1) {
- let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
-
- if self.match_iimm(op2) {
- let imm_op2 = self.node_iimm_to_i32(op2) as i8;
-
- // sar op1, op2 -> op1
- self.backend.emit_sar_r_imm8(&tmp_op1, imm_op2);
-
- // mov op1 -> result
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- } else if self.match_ireg(op2) {
- let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
-
- // mov op2 -> cl
- self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
-
- // sar op1, cl -> op1
- self.backend.emit_sar_r_cl(&tmp_op1);
-
- // mov op1 -> result
- self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
- } else {
- panic!("unexpected op2 (not ireg not iimm): {}", op2);
- }
- } else {
- panic!("unexpected op1 (not ireg not mem): {}", op1);
- }
- },
-
-
- // floating point
- op::BinOp::FAdd => {
- if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
- trace!("emit add-fpreg-mem");
-
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let mem_op2 = self.emit_mem(&ops[op2], vm);
-
- // mov op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // sub op2 res
- self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2);
- } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
- trace!("emit add-fpreg-fpreg");
-
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
-
- // movsd op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // add op2 res
- self.backend.emit_addsd_f64_f64(&res_tmp, ®_op2);
- } else {
- panic!("unexpected fadd: {}", node)
- }
- }
-
- op::BinOp::FSub => {
- if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
- trace!("emit sub-fpreg-mem");
-
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let mem_op2 = self.emit_mem(&ops[op2], vm);
-
- // mov op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // sub op2 res
- self.backend.emit_subsd_f64_mem64(&res_tmp, &mem_op2);
- } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
- trace!("emit sub-fpreg-fpreg");
-
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
-
- // movsd op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // sub op2 res
- self.backend.emit_subsd_f64_f64(&res_tmp, ®_op2);
- } else {
- panic!("unexpected fsub: {}", node)
- }
- }
-
- op::BinOp::FMul => {
- if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
- trace!("emit mul-fpreg-mem");
+ self.emit_binop(node, inst, op, op1, op2, f_content, f_context, vm);
+ },
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let mem_op2 = self.emit_mem(&ops[op2], vm);
+ Instruction_::BinOpWithStatus(op, status, op1, op2) => {
+ trace!("instsel on BINOP_STATUS");
- // mov op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // mul op2 res
- self.backend.emit_mulsd_f64_mem64(&res_tmp, &mem_op2);
- } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
- trace!("emit mul-fpreg-fpreg");
+ self.emit_binop(node, inst, op, op1, op2, f_content, f_context, vm);
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+ let values = inst.value.as_ref().unwrap();
+ let mut status_value_index = 1;
- // movsd op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // mul op2 res
- self.backend.emit_mulsd_f64_f64(&res_tmp, ®_op2);
- } else {
- panic!("unexpected fmul: {}", node)
- }
- }
+ // negative flag
+ if status.flag_n {
+ let tmp_status = values[status_value_index].clone();
+ status_value_index += 1;
- op::BinOp::FDiv => {
- if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
- trace!("emit div-fpreg-mem");
+ self.backend.emit_sets_r8(&tmp_status);
+ }
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let mem_op2 = self.emit_mem(&ops[op2], vm);
+ // zero flag
+ if status.flag_z {
+ let tmp_status = values[status_value_index].clone();
+ status_value_index += 1;
- // mov op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // div op2 res
- self.backend.emit_divsd_f64_mem64(&res_tmp, &mem_op2);
- } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
- trace!("emit div-fpreg-fpreg");
+ self.backend.emit_setz_r8(&tmp_status);
+ }
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+ // unsigned overflow
+ if status.flag_c {
+ let tmp_status = values[status_value_index].clone();
+ status_value_index += 1;
- // movsd op1, res
- self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
- // div op2 res
- self.backend.emit_divsd_f64_f64(&res_tmp, ®_op2);
- } else {
- panic!("unexpected fdiv: {}", node)
+ match op {
+ BinOp::Add | BinOp::Sub | BinOp::Mul => {
+ self.backend.emit_setb_r8(&tmp_status);
}
+ _ => panic!("Only Add/Sub/Mul has #C flag")
}
+ }
- op::BinOp::FRem => {
- if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
- trace!("emit frem-fpreg-fpreg");
-
- let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
- let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
-
- let reg_tmp = self.get_result_value(node);
+ // signed overflow
+ if status.flag_v {
+ let tmp_status = values[status_value_index].clone();
- self.emit_runtime_entry(&entrypoints::FREM, vec![reg_op1.clone(), reg_op2.clone()], Some(vec![reg_tmp.clone()]), Some(node), f_content, f_context, vm);
- } else {
- panic!("unexpected fdiv: {}", node)
+ match op {
+ BinOp::Add | BinOp::Sub | BinOp::Mul => {
+ self.backend.emit_seto_r8(&tmp_status);
}
+ _ => panic!("Only Add/Sub/Mul has #V flag")
}
}
}
@@ -1719,6 +1201,578 @@ impl <'a> InstructionSelection {
})
}
+ fn emit_binop (&mut self, node: &TreeNode, inst: &Instruction, op: BinOp, op1: OpIndex, op2: OpIndex, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
+ let ops = inst.ops.read().unwrap();
+
+ let res_tmp = self.get_result_value(node);
+
+ match op {
+ op::BinOp::Add => {
+ if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
+ trace!("emit add-ireg-imm");
+
+ let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, ®_op1);
+ // add op2, res
+ self.backend.emit_add_r_imm(&res_tmp, reg_op2);
+ } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
+ trace!("emit add-ireg-mem");
+
+ let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_mem(&ops[op2], vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, ®_op1);
+ // add op2 res
+ self.backend.emit_add_r_mem(&res_tmp, ®_op2);
+ } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
+ trace!("emit add-ireg-ireg");
+
+ let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, ®_op1);
+ // add op2 res
+ self.backend.emit_add_r_r(&res_tmp, ®_op2);
+ } else {
+ unimplemented!()
+ }
+ },
+ op::BinOp::Sub => {
+ if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
+ trace!("emit sub-ireg-imm");
+
+ let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
+ let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, ®_op1);
+ // add op2, res
+ self.backend.emit_sub_r_imm(&res_tmp, imm_op2);
+ } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
+ trace!("emit sub-ireg-mem");
+
+ let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(&ops[op2], vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, ®_op1);
+ // sub op2 res
+ self.backend.emit_sub_r_mem(&res_tmp, &mem_op2);
+ } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
+ trace!("emit sub-ireg-ireg");
+
+ let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, ®_op1);
+ // add op2 res
+ self.backend.emit_sub_r_r(&res_tmp, ®_op2);
+ } else {
+ unimplemented!()
+ }
+ },
+ op::BinOp::And => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ if self.match_ireg(op1) && self.match_iimm(op2) {
+ trace!("emit and-ireg-iimm");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let imm_op2 = self.node_iimm_to_i32(op2);
+
+ // mov op1 -> res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // and op2, res -> res
+ self.backend.emit_and_r_imm(&res_tmp, imm_op2);
+ } else if self.match_ireg(op1) && self.match_mem(op2) {
+ trace!("emit and-ireg-mem");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(op2, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // and op2, res -> res
+ self.backend.emit_and_r_mem(&res_tmp, &mem_op2);
+ } else if self.match_ireg(op1) && self.match_ireg(op2) {
+ trace!("emit and-ireg-ireg");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // and op2, res -> res
+ self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
+ } else {
+ unimplemented!()
+ }
+ },
+ op::BinOp::Or => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ if self.match_ireg(op1) && self.match_iimm(op2) {
+ trace!("emit or-ireg-iimm");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let imm_op2 = self.node_iimm_to_i32(op2);
+
+ // mov op1 -> res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // Or op2, res -> res
+ self.backend.emit_or_r_imm(&res_tmp, imm_op2);
+ } else if self.match_ireg(op1) && self.match_mem(op2) {
+ trace!("emit or-ireg-mem");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(op2, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // Or op2, res -> res
+ self.backend.emit_or_r_mem(&res_tmp, &mem_op2);
+ } else if self.match_ireg(op1) && self.match_ireg(op2) {
+ trace!("emit or-ireg-ireg");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // Or op2, res -> res
+ self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
+ } else {
+ unimplemented!()
+ }
+ },
+ op::BinOp::Xor => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ if self.match_ireg(op1) && self.match_iimm(op2) {
+ trace!("emit xor-ireg-iimm");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let imm_op2 = self.node_iimm_to_i32(op2);
+
+ // mov op1 -> res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // xor op2, res -> res
+ self.backend.emit_xor_r_imm(&res_tmp, imm_op2);
+ } else if self.match_ireg(op1) && self.match_mem(op2) {
+ trace!("emit xor-ireg-mem");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(op2, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // xor op2, res -> res
+ self.backend.emit_xor_r_mem(&res_tmp, &mem_op2);
+ } else if self.match_ireg(op1) && self.match_ireg(op2) {
+ trace!("emit xor-ireg-ireg");
+
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+ let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ // mov op1, res
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ // xor op2, res -> res
+ self.backend.emit_xor_r_r(&res_tmp, &tmp_op2);
+ } else {
+ unimplemented!()
+ }
+ }
+ op::BinOp::Mul => {
+ // mov op1 -> rax
+ let op1 = &ops[op1];
+
+ let mreg_op1 = match op1.clone_value().ty.get_int_length() {
+ Some(64) => x86_64::RAX.clone(),
+ Some(32) => x86_64::EAX.clone(),
+ Some(16) => x86_64::AX.clone(),
+ Some(8) => x86_64::AL.clone(),
+ _ => unimplemented!()
+ };
+
+ if self.match_iimm(op1) {
+ let imm_op1 = self.node_iimm_to_i32(op1);
+
+ self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
+ } else if self.match_mem(op1) {
+ let mem_op1 = self.emit_mem(op1, vm);
+
+ self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
+ } else if self.match_ireg(op1) {
+ let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+
+ self.backend.emit_mov_r_r(&mreg_op1, ®_op1);
+ } else {
+ unimplemented!();
+ }
+
+ // mul op2
+ let op2 = &ops[op2];
+ if self.match_iimm(op2) {
+ let imm_op2 = self.node_iimm_to_i32(op2);
+
+ // put imm in a temporary
+ // here we use result reg as temporary
+ self.backend.emit_mov_r_imm(&res_tmp, imm_op2);
+
+ self.backend.emit_mul_r(&res_tmp);
+ } else if self.match_mem(op2) {
+ let mem_op2 = self.emit_mem(op2, vm);
+
+ self.backend.emit_mul_mem(&mem_op2);
+ } else if self.match_ireg(op2) {
+ let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ self.backend.emit_mul_r(®_op2);
+ } else {
+ unimplemented!();
+ }
+
+ // mov rax -> result
+ match res_tmp.ty.get_int_length() {
+ Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
+ Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
+ Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
+ Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
+ _ => unimplemented!()
+ }
+
+ },
+ op::BinOp::Udiv => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ self.emit_udiv(op1, op2, f_content, f_context, vm);
+
+ // mov rax -> result
+ match res_tmp.ty.get_int_length() {
+ Some(64) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
+ }
+ Some(32) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
+ }
+ Some(16) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
+ }
+ Some(8) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
+ }
+ _ => unimplemented!()
+ }
+ },
+ op::BinOp::Sdiv => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ self.emit_idiv(op1, op2, f_content, f_context, vm);
+
+ // mov rax -> result
+ match res_tmp.ty.get_int_length() {
+ Some(64) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
+ }
+ Some(32) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
+ }
+ Some(16) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
+ }
+ Some(8) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
+ }
+ _ => unimplemented!()
+ }
+ },
+ op::BinOp::Urem => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ self.emit_udiv(op1, op2, f_content, f_context, vm);
+
+ // mov rdx -> result
+ match res_tmp.ty.get_int_length() {
+ Some(64) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
+ }
+ Some(32) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
+ }
+ Some(16) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
+ }
+ Some(8) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
+ }
+ _ => unimplemented!()
+ }
+ },
+ op::BinOp::Srem => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ self.emit_idiv(op1, op2, f_content, f_context, vm);
+
+ // mov rdx -> result
+ match res_tmp.ty.get_int_length() {
+ Some(64) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
+ }
+ Some(32) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
+ }
+ Some(16) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
+ }
+ Some(8) => {
+ self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
+ }
+ _ => unimplemented!()
+ }
+ },
+
+ op::BinOp::Shl => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ if self.match_mem(op1) {
+ unimplemented!()
+ } else if self.match_ireg(op1) {
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+
+ if self.match_iimm(op2) {
+ let imm_op2 = self.node_iimm_to_i32(op2) as i8;
+
+ // shl op1, op2 -> op1
+ self.backend.emit_shl_r_imm8(&tmp_op1, imm_op2);
+
+ // mov op1 -> result
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ } else if self.match_ireg(op2) {
+ let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ // mov op2 -> cl
+ self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
+
+ // shl op1, cl -> op1
+ self.backend.emit_shl_r_cl(&tmp_op1);
+
+ // mov op1 -> result
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ } else {
+ panic!("unexpected op2 (not ireg not iimm): {}", op2);
+ }
+ } else {
+ panic!("unexpected op1 (not ireg not mem): {}", op1);
+ }
+ },
+ op::BinOp::Lshr => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ if self.match_mem(op1) {
+ unimplemented!()
+ } else if self.match_ireg(op1) {
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+
+ if self.match_iimm(op2) {
+ let imm_op2 = self.node_iimm_to_i32(op2) as i8;
+
+ // shr op1, op2 -> op1
+ self.backend.emit_shr_r_imm8(&tmp_op1, imm_op2);
+
+ // mov op1 -> result
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ } else if self.match_ireg(op2) {
+ let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ // mov op2 -> cl
+ self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
+
+ // shr op1, cl -> op1
+ self.backend.emit_shr_r_cl(&tmp_op1);
+
+ // mov op1 -> result
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ } else {
+ panic!("unexpected op2 (not ireg not iimm): {}", op2);
+ }
+ } else {
+ panic!("unexpected op1 (not ireg not mem): {}", op1);
+ }
+ },
+ op::BinOp::Ashr => {
+ let op1 = &ops[op1];
+ let op2 = &ops[op2];
+
+ if self.match_mem(op1) {
+ unimplemented!()
+ } else if self.match_ireg(op1) {
+ let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
+
+ if self.match_iimm(op2) {
+ let imm_op2 = self.node_iimm_to_i32(op2) as i8;
+
+ // sar op1, op2 -> op1
+ self.backend.emit_sar_r_imm8(&tmp_op1, imm_op2);
+
+ // mov op1 -> result
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ } else if self.match_ireg(op2) {
+ let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
+
+ // mov op2 -> cl
+ self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
+
+ // sar op1, cl -> op1
+ self.backend.emit_sar_r_cl(&tmp_op1);
+
+ // mov op1 -> result
+ self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
+ } else {
+ panic!("unexpected op2 (not ireg not iimm): {}", op2);
+ }
+ } else {
+ panic!("unexpected op1 (not ireg not mem): {}", op1);
+ }
+ },
+
+
+ // floating point
+ op::BinOp::FAdd => {
+ if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
+ trace!("emit add-fpreg-mem");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(&ops[op2], vm);
+
+ // mov op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // sub op2 res
+ self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2);
+ } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
+ trace!("emit add-fpreg-fpreg");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+
+ // movsd op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // add op2 res
+ self.backend.emit_addsd_f64_f64(&res_tmp, ®_op2);
+ } else {
+ panic!("unexpected fadd: {}", node)
+ }
+ }
+
+ op::BinOp::FSub => {
+ if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
+ trace!("emit sub-fpreg-mem");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(&ops[op2], vm);
+
+ // mov op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // sub op2 res
+ self.backend.emit_subsd_f64_mem64(&res_tmp, &mem_op2);
+ } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
+ trace!("emit sub-fpreg-fpreg");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+
+ // movsd op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // sub op2 res
+ self.backend.emit_subsd_f64_f64(&res_tmp, ®_op2);
+ } else {
+ panic!("unexpected fsub: {}", node)
+ }
+ }
+
+ op::BinOp::FMul => {
+ if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
+ trace!("emit mul-fpreg-mem");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(&ops[op2], vm);
+
+ // mov op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // mul op2 res
+ self.backend.emit_mulsd_f64_mem64(&res_tmp, &mem_op2);
+ } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
+ trace!("emit mul-fpreg-fpreg");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+
+ // movsd op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // mul op2 res
+ self.backend.emit_mulsd_f64_f64(&res_tmp, ®_op2);
+ } else {
+ panic!("unexpected fmul: {}", node)
+ }
+ }
+
+ op::BinOp::FDiv => {
+ if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
+ trace!("emit div-fpreg-mem");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let mem_op2 = self.emit_mem(&ops[op2], vm);
+
+ // mov op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // div op2 res
+ self.backend.emit_divsd_f64_mem64(&res_tmp, &mem_op2);
+ } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
+ trace!("emit div-fpreg-fpreg");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+
+ // movsd op1, res
+ self.backend.emit_movsd_f64_f64(&res_tmp, ®_op1);
+ // div op2 res
+ self.backend.emit_divsd_f64_f64(&res_tmp, ®_op2);
+ } else {
+ panic!("unexpected fdiv: {}", node)
+ }
+ }
+
+ op::BinOp::FRem => {
+ if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
+ trace!("emit frem-fpreg-fpreg");
+
+ let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
+ let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);
+
+ let reg_tmp = self.get_result_value(node);
+
+ self.emit_runtime_entry(&entrypoints::FREM, vec![reg_op1.clone(), reg_op2.clone()], Some(vec![reg_tmp.clone()]), Some(node), f_content, f_context, vm);
+ } else {
+ panic!("unexpected fdiv: {}", node)
+ }
+ }
+ }
+ }
+
fn emit_alloc_sequence (&mut self, tmp_allocator: P