GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit db6a93a3 authored by qinsoon's avatar qinsoon

add_u128 works

parent 8a86c453
...@@ -2439,6 +2439,17 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2439,6 +2439,17 @@ impl CodeGenerator for ASMCodeGen {
self.internal_binop_def_r_mem("add", dest, src) self.internal_binop_def_r_mem("add", dest, src)
} }
// adc
fn emit_adc_r_r (&mut self, dest: Reg, src: Reg) {
self.internal_binop_def_r_r("adc", dest, src)
}
fn emit_adc_r_mem(&mut self, dest: Reg, src: Mem) {
self.internal_binop_def_r_mem("adc", dest, src)
}
fn emit_adc_r_imm(&mut self, dest: Reg, src: i32) {
self.internal_binop_def_r_imm("adc", dest, src)
}
// sub // sub
fn emit_sub_r_imm(&mut self, dest: Reg, src: i32) { fn emit_sub_r_imm(&mut self, dest: Reg, src: i32) {
self.internal_binop_def_r_imm("sub", dest, src) self.internal_binop_def_r_imm("sub", dest, src)
......
...@@ -131,6 +131,11 @@ pub trait CodeGenerator { ...@@ -131,6 +131,11 @@ pub trait CodeGenerator {
fn emit_add_r_r (&mut self, dest: Reg, src: Reg); fn emit_add_r_r (&mut self, dest: Reg, src: Reg);
fn emit_add_r_mem(&mut self, dest: Reg, src: Mem); fn emit_add_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_add_r_imm(&mut self, dest: Reg, src: i32); fn emit_add_r_imm(&mut self, dest: Reg, src: i32);
// add with carry
fn emit_adc_r_r (&mut self, dest: Reg, src: Reg);
fn emit_adc_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_adc_r_imm(&mut self, dest: Reg, src: i32);
// sub // sub
fn emit_sub_r_r (&mut self, dest: Reg, src: Reg); fn emit_sub_r_r (&mut self, dest: Reg, src: Reg);
......
...@@ -1384,8 +1384,23 @@ impl <'a> InstructionSelection { ...@@ -1384,8 +1384,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &reg_op1); self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res // add op2 res
self.backend.emit_add_r_r(&res_tmp, &reg_op2); self.backend.emit_add_r_r(&res_tmp, &reg_op2);
} else { } else if self.match_ireg_ex(&ops[op1]) && self.match_ireg_ex(&ops[op2]){
unimplemented!() trace!("emit add-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(&ops[op1], f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(&ops[op2], f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// add res_l op2_l -> res_l
self.backend.emit_add_r_r(&res_l, &op2_l);
// adc res_h op2_h -> res_h
self.backend.emit_adc_r_r(&res_h, &op2_h);
} }
}, },
op::BinOp::Sub => { op::BinOp::Sub => {
...@@ -3082,7 +3097,7 @@ impl <'a> InstructionSelection { ...@@ -3082,7 +3097,7 @@ impl <'a> InstructionSelection {
if gpr_ret_count + 1 < x86_64::RETURN_GPRs.len() { if gpr_ret_count + 1 < x86_64::RETURN_GPRs.len() {
let ret_gpr1 = x86_64::RETURN_GPRs[gpr_ret_count].clone(); let ret_gpr1 = x86_64::RETURN_GPRs[gpr_ret_count].clone();
let ret_gpr2 = x86_64::RETURN_GPRs[gpr_ret_count].clone(); let ret_gpr2 = x86_64::RETURN_GPRs[gpr_ret_count + 1].clone();
self.backend.emit_mov_r_r(&ret_gpr1, &ret_val1); self.backend.emit_mov_r_r(&ret_gpr1, &ret_val1);
self.backend.emit_mov_r_r(&ret_gpr2, &ret_val2); self.backend.emit_mov_r_r(&ret_gpr2, &ret_val2);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment