GitLab will be partched to the latest stable version on 15 July 2020 at 2.00pm (AEDT) to 2.30pm (AEDT) due to Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit fa95a9b4 authored by qinsoon's avatar qinsoon

add pattern for const operands

parent 42e7c338
......@@ -1608,8 +1608,8 @@ impl ASMCodeGen {
self.cur.take().unwrap()
}
/// emits an instruction (use 1 reg, define none)
fn internal_uniop_def_r(&mut self, inst: &str, op: &P<Value>) {
/// emits an instruction (use 0 reg, define 1)
fn internal_uniop_def_nouse_r(&mut self, inst: &str, op: &P<Value>) {
trace!("emit: {} {}", inst, op);
let (reg, id, loc) = self.prepare_reg(op, inst.len() + 1);
......@@ -1626,6 +1626,26 @@ impl ASMCodeGen {
)
}
/// emits an instruction (use 1 reg, define 1 reg)
fn internal_uniop_def_r(&mut self, inst: &str, op: &P<Value>) {
trace!("emit: {} {}", inst, op);
let (reg, id, loc) = self.prepare_reg(op, inst.len() + 1);
let asm = format!("{} {}", inst, reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc.clone()]
},
linked_hashmap!{
id => vec![loc]
},
false
)
}
/// emits an instruction (use 2 regs, define none)
fn internal_binop_no_def_r_r(&mut self, inst: &str, op1: &P<Value>, op2: &P<Value>) {
let len = check_op_len(op1);
......@@ -2608,46 +2628,46 @@ impl CodeGenerator for ASMCodeGen {
// set byte
fn emit_sets_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("sets", dest)
self.internal_uniop_def_nouse_r("sets", dest)
}
fn emit_setz_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("setz", dest)
self.internal_uniop_def_nouse_r("setz", dest)
}
fn emit_seto_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("seto", dest)
self.internal_uniop_def_nouse_r("seto", dest)
}
fn emit_setb_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("setb", dest)
self.internal_uniop_def_nouse_r("setb", dest)
}
fn emit_seta_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("seta", dest)
self.internal_uniop_def_nouse_r("seta", dest)
}
fn emit_setae_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setae", dest)
self.internal_uniop_def_nouse_r("setae", dest)
}
fn emit_setb_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setb", dest)
self.internal_uniop_def_nouse_r("setb", dest)
}
fn emit_setbe_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setbe", dest)
self.internal_uniop_def_nouse_r("setbe", dest)
}
fn emit_sete_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("sete", dest)
self.internal_uniop_def_nouse_r("sete", dest)
}
fn emit_setg_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setg", dest)
self.internal_uniop_def_nouse_r("setg", dest)
}
fn emit_setge_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setge", dest)
self.internal_uniop_def_nouse_r("setge", dest)
}
fn emit_setl_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setl", dest)
self.internal_uniop_def_nouse_r("setl", dest)
}
fn emit_setle_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setle", dest)
self.internal_uniop_def_nouse_r("setle", dest)
}
fn emit_setne_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setne", dest)
self.internal_uniop_def_nouse_r("setne", dest)
}
// cmov src -> dest
......@@ -2824,6 +2844,20 @@ impl CodeGenerator for ASMCodeGen {
self.internal_binop_def_r_imm("sbb", dest, src)
}
// inc and dec
fn emit_inc_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("inc", dest)
}
fn emit_inc_mem(&mut self, dest: Mem) {
unimplemented!()
}
fn emit_dec_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("dec", dest)
}
fn emit_dec_mem(&mut self, dest: Mem) {
unimplemented!()
}
fn emit_mul_r(&mut self, src: &P<Value>) {
let len = check_op_len(src);
......@@ -3526,6 +3560,14 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fp_binop_no_def_r_r("ucomiss", op1, op2);
}
// bitwise - float
fn emit_xorps_f32_f32(&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("xorps", &dest, &src)
}
fn emit_xorpd_f64_f64(&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("xorpd", &dest, &src)
}
// add - double
fn emit_addsd_f64_f64(&mut self, dest: &P<Value>, src: &P<Value>) {
......
......@@ -178,6 +178,12 @@ pub trait CodeGenerator {
fn emit_sbb_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_sbb_r_imm(&mut self, dest: Reg, src: i32);
// inc and dec
fn emit_inc_r(&mut self, dest: Reg);
fn emit_inc_mem(&mut self, dest: Mem);
fn emit_dec_r(&mut self, dest: Reg);
fn emit_dec_mem(&mut self, dest: Mem);
// multiply
fn emit_mul_r(&mut self, src: Reg);
fn emit_mul_mem(&mut self, src: Mem);
......@@ -320,6 +326,10 @@ pub trait CodeGenerator {
fn emit_comiss_f32_f32(&mut self, op1: Reg, op2: Reg);
fn emit_ucomiss_f32_f32(&mut self, op1: Reg, op2: Reg);
// fp bitwise
fn emit_xorps_f32_f32(&mut self, dest: Reg, src: Reg);
fn emit_xorpd_f64_f64(&mut self, dest: Reg, src: Reg);
// fp conversion
fn emit_cvtsi2sd_f64_r(&mut self, dest: Reg, src: Reg);
fn emit_cvtsd2si_r_f64(&mut self, dest: Reg, src: Reg);
......@@ -332,7 +342,6 @@ pub trait CodeGenerator {
fn emit_cvtss2sd_f64_f32(&mut self, dest: Reg, src: Reg);
// used for unsigned int to fp conversion
fn emit_cvttsd2si_r_f64(&mut self, dest: Reg, src: Reg);
fn emit_cvttss2si_r_f32(&mut self, dest: Reg, src: Reg);
......
......@@ -2384,8 +2384,52 @@ impl<'a> InstructionSelection {
})
}
/// emits code for binary operations (no status flags)
/// emits code for binary operations
fn emit_binop(
&mut self,
node: &TreeNode,
inst: &Instruction,
op: BinOp,
mut op1: OpIndex,
mut op2: OpIndex,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM
) {
let ref ops = inst.ops;
{
// symmetric operators, we want to make sure that if any of the operands
// will be treated specially, it is going to be op2.
// so we check op1, if it is special, we swap them
let ref node_op1 = ops[op1];
let mut swap_operands = || {
let t = op1;
op1 = op2;
op2 = t;
};
match op {
op::BinOp::Add | op::BinOp::And | op::BinOp::Or | op::BinOp::Xor |
op::BinOp::Mul => {
if self.match_iconst_zero(node_op1) || self.match_iconst_one(node_op1) ||
self.match_iimm(node_op1) ||
self.match_mem(node_op1)
{
swap_operands();
}
}
_ => {}
}
}
self.emit_binop_internal(node, inst, op, op1, op2, f_content, f_context, vm)
}
/// emits code for binary operations with the assumption that op2 may be special
fn emit_binop_internal(
&mut self,
node: &TreeNode,
inst: &Instruction,
......@@ -2399,44 +2443,64 @@ impl<'a> InstructionSelection {
let ref ops = inst.ops;
let res_tmp = self.get_result_value(node);
let ref op1 = ops[op1];
let ref op2 = ops[op2];
match op {
op::BinOp::Add => {
if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
if self.match_ireg(op1) && self.match_iconst_zero(op2) {
// add zero is nop
trace!("emit add-ireg-0");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else if self.match_ireg(op1) && self.match_iconst_one(op2) {
// add one is increment
trace!("emit add-ireg-1");
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
self.backend.emit_inc_r(&res_tmp);
} else if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit add-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let reg_op2 = self.node_iimm_to_i32(op2);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2, res
self.backend.emit_add_r_imm(&res_tmp, reg_op2);
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
} else if self.match_ireg(op1) && self.match_mem(op2) {
trace!("emit add-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_mem(&ops[op2], vm);
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let reg_op2 = self.emit_mem(op2, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_add_r_mem(&res_tmp, &reg_op2);
} else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit add-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_add_r_r(&res_tmp, &reg_op2);
} else if self.match_ireg_ex(&ops[op1]) && self.match_ireg_ex(&ops[op2]) {
} else if self.match_ireg_ex(op1) && self.match_iconst_zero(op2) {
// add one is nop
trace!("emit add-iregex-0");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit add-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(&ops[op1], f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(&ops[op2], f_content, f_context, vm);
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
......@@ -2454,41 +2518,59 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Sub => {
if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
if self.match_ireg(op1) && self.match_iconst_zero(op2) {
// sub zero is nop
trace!("emit sub-ireg-0");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else if self.match_ireg(op1) && self.match_iconst_one(op2) {
// sub one is decrement
trace!("emit sub-ireg-1");
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
self.backend.emit_dec_r(&res_tmp);
} else if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit sub-ireg-imm");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// sub op2, res
self.backend.emit_sub_r_imm(&res_tmp, imm_op2);
} else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
} else if self.match_ireg(op1) && self.match_mem(op2) {
trace!("emit sub-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2], vm);
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let mem_op2 = self.emit_mem(op2, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// sub op2 res
self.backend.emit_sub_r_mem(&res_tmp, &mem_op2);
} else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit sub-ireg-ireg");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_sub_r_r(&res_tmp, &reg_op2);
} else if self.match_ireg_ex(&ops[op1]) && self.match_ireg_ex(&ops[op2]) {
} else if self.match_ireg_ex(op1) && self.match_iconst_zero(op2) {
// sub zero is nop
trace!("emit sub-iregex-0");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit sub-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(&ops[op1], f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(&ops[op2], f_content, f_context, vm);
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
......@@ -2506,10 +2588,12 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::And => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iconst_zero(op2) {
// and with zero is setting result as zero
trace!("emit and-ireg-0");
if self.match_ireg(op1) && self.match_iimm(op2) {
self.emit_clear_value(&res_tmp, f_context, vm);
} else if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit and-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
......@@ -2539,6 +2623,11 @@ impl<'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// and op2, res -> res
self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_iconst_zero(op2) {
// and with zero is setting result as zero
trace!("emit and-iregex-0");
self.emit_clear_value(&res_tmp, f_context, vm);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit and-iregex-iregex");
......@@ -2561,9 +2650,12 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Or => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iconst_zero(op2) {
// or zero is nop
trace!("emit or-ireg-0");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
}
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit or-ireg-iimm");
......@@ -2594,6 +2686,10 @@ impl<'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg(op1) && self.match_iconst_zero(op2) {
trace!("emit or-iregex-zero");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit or-iregex-iregex");
......@@ -2616,9 +2712,6 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Xor => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit xor-ireg-iimm");
......@@ -2671,109 +2764,121 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Mul => {
// mov op1 -> rax
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
trace!("emit mul");
// we need to emit both operands first, then move one into RAX
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// move op1 -> RAX
let mreg_op1 = match op_size {
8 => x86_64::RAX.clone(),
4 => x86_64::EAX.clone(),
2 => x86_64::AX.clone(),
1 => x86_64::AL.clone(),
_ => unimplemented!()
};
self.backend.emit_mov_r_r(&mreg_op1, &tmp_op1);
// special cases
if self.match_ireg(op1) && self.match_iconst_zero(op2) {
// MUL with zero is zero
trace!("emit mul-ireg-0");
self.emit_clear_value(&res_tmp, f_context, vm);
} else if self.match_ireg(op1) && self.match_iconst_one(op2) {
// MUL with one is the original value
trace!("emit mul-ireg-1");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else if self.match_ireg_ex(op1) && self.match_iconst_zero(op2) {
// MUL with zero is zero
trace!("emit mul-iregex-0");
self.emit_clear_value(&res_tmp, f_context, vm);
} else if self.match_ireg_ex(op1) && self.match_iconst_one(op2) {
// MUL with one is the original value
trace!("emit mul-iregex-1");
self.emit_move_node_to_value(&res_tmp, op1, f_content, f_context, vm);
} else {
// mov op1 -> rax
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
trace!("emit mul");
// we need to emit both operands first, then move one into RAX
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// move op1 -> RAX
let mreg_op1 = match op_size {
8 => x86_64::RAX.clone(),
4 => x86_64::EAX.clone(),
2 => x86_64::AX.clone(),
1 => x86_64::AL.clone(),
_ => unimplemented!()
};
self.backend.emit_mov_r_r(&mreg_op1, &tmp_op1);
// mul op2
self.backend.emit_mul_r(&tmp_op2);
// mul op2
self.backend.emit_mul_r(&tmp_op2);
// mov rax -> result
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
assert!(
res_size == op_size,
"op and res do not have matching type: {}",
node
);
// mov rax -> result
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
assert!(
res_size == op_size,
"op and res do not have matching type: {}",
node
);
match res_size {
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
4 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
2 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
1 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
match res_size {
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
4 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
2 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
1 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
}
}
}
16 => {
if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit mul128");
// (hi, lo)
// a b
// x c d
// ------------
// ad bd
// ad bc
// ------------
// t1 t2
// (hi, lo)
let (b, a) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (d, c) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// mov a -> t1
let t1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t1, &a);
// imul d, t1 -> t1
self.backend.emit_imul_r_r(&t1, &d);
// mul d, b -> (RDX:RAX) as (carry:t2)
self.backend.emit_mov_r_r(&x86_64::RAX, &d);
self.backend.emit_mul_r(&b);
let t2 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t2, &x86_64::RAX);
// add t1, carry -> t1
self.backend.emit_add_r_r(&t1, &x86_64::RDX);
// mov c -> tt
let tt = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&tt, &c);
// imul b, tt -> tt
self.backend.emit_imul_r_r(&tt, &b);
// add t1, tt -> t1
self.backend.emit_add_r_r(&t1, &tt);
// result: t1(higher), t2(lower)
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &t2);
self.backend.emit_mov_r_r(&res_h, &t1);
} else {
panic!("unexpected op for node {:?}, expect int128 MUL", node)
16 => {
if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit mul128");
// (hi, lo)
// a b
// x c d
// ------------
// ad bd
// ad bc
// ------------
// t1 t2
// (hi, lo)
let (b, a) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (d, c) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// mov a -> t1
let t1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t1, &a);
// imul d, t1 -> t1
self.backend.emit_imul_r_r(&t1, &d);
// mul d, b -> (RDX:RAX) as (carry:t2)
self.backend.emit_mov_r_r(&x86_64::RAX, &d);
self.backend.emit_mul_r(&b);
let t2 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t2, &x86_64::RAX);
// add t1, carry -> t1
self.backend.emit_add_r_r(&t1, &x86_64::RDX);
// mov c -> tt
let tt = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&tt, &c);
// imul b, tt -> tt
self.backend.emit_imul_r_r(&tt, &b);
// add t1, tt -> t1
self.backend.emit_add_r_r(&t1, &tt);
// result: t1(higher), t2(lower)
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &t2);
self.backend.emit_mov_r_r(&res_h, &t1);
} else {
panic!("unexpected op for node {:?}, expect int128 MUL", node)
}
}
_ => panic!("unsupported int size: {}", op_size)
}
_ => panic!("unsupported int size: {}", op_size)
}
}
op::BinOp::Udiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
......@@ -2812,9 +2917,6 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Sdiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
......@@ -2849,9 +2951,6 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Urem => {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
......@@ -2886,9 +2985,6 @@ impl<'a> InstructionSelection {
}
}
op::BinOp::Srem => {
let op1 = &ops[op1];
let op2 = &ops[op2];