To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

Commit a7654e7e authored by qinsoon's avatar qinsoon
Browse files

[wip] more int128 test. dylib for testing includes libmu

parent db6a93a3
...@@ -34,3 +34,4 @@ time = "0.1.34" ...@@ -34,3 +34,4 @@ time = "0.1.34"
maplit = "0.1.4" maplit = "0.1.4"
docopt = "0.6" docopt = "0.6"
petgraph = "0.4.1" petgraph = "0.4.1"
extprim = "*"
\ No newline at end of file
...@@ -2461,6 +2461,17 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2461,6 +2461,17 @@ impl CodeGenerator for ASMCodeGen {
self.internal_binop_def_r_mem("sub", dest, src) self.internal_binop_def_r_mem("sub", dest, src)
} }
// sbb
fn emit_sbb_r_r (&mut self, dest: Reg, src: Reg) {
self.internal_binop_def_r_r("sbb", dest, src)
}
fn emit_sbb_r_mem(&mut self, dest: Reg, src: Mem) {
self.internal_binop_def_r_mem("sbb", dest, src)
}
fn emit_sbb_r_imm(&mut self, dest: Reg, src: i32) {
self.internal_binop_def_r_imm("sbb", dest, src)
}
fn emit_mul_r(&mut self, src: &P<Value>) { fn emit_mul_r(&mut self, src: &P<Value>) {
let len = check_op_len(src); let len = check_op_len(src);
...@@ -2508,6 +2519,10 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2508,6 +2519,10 @@ impl CodeGenerator for ASMCodeGen {
unimplemented!() unimplemented!()
} }
fn emit_imul_r_r(&mut self, dest: Reg, src: Reg) {
self.internal_binop_def_r_r("imul", dest, src)
}
fn emit_div_r (&mut self, src: &P<Value>) { fn emit_div_r (&mut self, src: &P<Value>) {
let len = check_op_len(src); let len = check_op_len(src);
......
...@@ -142,10 +142,18 @@ pub trait CodeGenerator { ...@@ -142,10 +142,18 @@ pub trait CodeGenerator {
fn emit_sub_r_mem(&mut self, dest: Reg, src: Mem); fn emit_sub_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_sub_r_imm(&mut self, dest: Reg, src: i32); fn emit_sub_r_imm(&mut self, dest: Reg, src: i32);
// sub with borrow
fn emit_sbb_r_r (&mut self, dest: Reg, src: Reg);
fn emit_sbb_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_sbb_r_imm(&mut self, dest: Reg, src: i32);
// multiply // multiply
fn emit_mul_r (&mut self, src: Reg); fn emit_mul_r (&mut self, src: Reg);
fn emit_mul_mem(&mut self, src: Mem); fn emit_mul_mem(&mut self, src: Mem);
// signed multiply
fn emit_imul_r_r(&mut self, dest: Reg, src: Reg);
// div // div
fn emit_div_r (&mut self, src: Reg); fn emit_div_r (&mut self, src: Reg);
fn emit_div_mem (&mut self, src: Mem); fn emit_div_mem (&mut self, src: Mem);
......
...@@ -1401,6 +1401,8 @@ impl <'a> InstructionSelection { ...@@ -1401,6 +1401,8 @@ impl <'a> InstructionSelection {
// adc res_h op2_h -> res_h // adc res_h op2_h -> res_h
self.backend.emit_adc_r_r(&res_h, &op2_h); self.backend.emit_adc_r_r(&res_h, &op2_h);
} else {
unimplemented!()
} }
}, },
op::BinOp::Sub => { op::BinOp::Sub => {
...@@ -1434,6 +1436,23 @@ impl <'a> InstructionSelection { ...@@ -1434,6 +1436,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &reg_op1); self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res // add op2 res
self.backend.emit_sub_r_r(&res_tmp, &reg_op2); self.backend.emit_sub_r_r(&res_tmp, &reg_op2);
} else if self.match_ireg_ex(&ops[op1]) && self.match_ireg_ex(&ops[op2]){
trace!("emit sub-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(&ops[op1], f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(&ops[op2], f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// sub res_l op2_l -> res_l
self.backend.emit_sub_r_r(&res_l, &op2_l);
// sbb res_h op2_h -> res_h
self.backend.emit_sbb_r_r(&res_h, &op2_h);
} else { } else {
unimplemented!() unimplemented!()
} }
...@@ -1472,6 +1491,23 @@ impl <'a> InstructionSelection { ...@@ -1472,6 +1491,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// and op2, res -> res // and op2, res -> res
self.backend.emit_and_r_r(&res_tmp, &tmp_op2); self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2){
trace!("emit and-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// and res_l op2_l -> res_l
self.backend.emit_and_r_r(&res_l, &op2_l);
// and res_h op2_h -> res_h
self.backend.emit_and_r_r(&res_h, &op2_h);
} else { } else {
unimplemented!() unimplemented!()
} }
...@@ -1510,6 +1546,23 @@ impl <'a> InstructionSelection { ...@@ -1510,6 +1546,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// Or op2, res -> res // Or op2, res -> res
self.backend.emit_or_r_r(&res_tmp, &tmp_op2); self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2){
trace!("emit or-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// or res_l op2_l -> res_l
self.backend.emit_or_r_r(&res_l, &op2_l);
// or res_h op2_h -> res_h
self.backend.emit_or_r_r(&res_h, &op2_h);
} else { } else {
unimplemented!() unimplemented!()
} }
...@@ -1548,6 +1601,23 @@ impl <'a> InstructionSelection { ...@@ -1548,6 +1601,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1); self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// xor op2, res -> res // xor op2, res -> res
self.backend.emit_xor_r_r(&res_tmp, &tmp_op2); self.backend.emit_xor_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2){
trace!("emit xor-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// xor res_l op2_l -> res_l
self.backend.emit_xor_r_r(&res_l, &op2_l);
// xor res_h op2_h -> res_h
self.backend.emit_xor_r_r(&res_h, &op2_h);
} else { } else {
unimplemented!() unimplemented!()
} }
...@@ -1555,12 +1625,21 @@ impl <'a> InstructionSelection { ...@@ -1555,12 +1625,21 @@ impl <'a> InstructionSelection {
op::BinOp::Mul => { op::BinOp::Mul => {
// mov op1 -> rax // mov op1 -> rax
let op1 = &ops[op1]; let op1 = &ops[op1];
let op2 = &ops[op2];
let mreg_op1 = match op1.clone_value().ty.get_int_length() { let op_len = match op1.clone_value().ty.get_int_length() {
Some(64) => x86_64::RAX.clone(), Some(len) => len,
Some(32) => x86_64::EAX.clone(), None => panic!("expected integer operand with MUL")
Some(16) => x86_64::AX.clone(), };
Some(8) => x86_64::AL.clone(), match op_len {
1...64 => {
trace!("emit mul");
let mreg_op1 = match op_len {
64 => x86_64::RAX.clone(),
32 => x86_64::EAX.clone(),
16 => x86_64::AX.clone(),
8 => x86_64::AL.clone(),
_ => unimplemented!() _ => unimplemented!()
}; };
...@@ -1581,7 +1660,6 @@ impl <'a> InstructionSelection { ...@@ -1581,7 +1660,6 @@ impl <'a> InstructionSelection {
} }
// mul op2 // mul op2
let op2 = &ops[op2];
if self.match_iimm(op2) { if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2); let imm_op2 = self.node_iimm_to_i32(op2);
...@@ -1603,34 +1681,104 @@ impl <'a> InstructionSelection { ...@@ -1603,34 +1681,104 @@ impl <'a> InstructionSelection {
} }
// mov rax -> result // mov rax -> result
match res_tmp.ty.get_int_length() { let res_len = res_tmp.ty.get_int_length().unwrap();
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX), assert!(res_len == op_len, "op and res do not have matching type: {}", node);
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX), match res_len {
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL), 64 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
32 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
16 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!() _ => unimplemented!()
} }
}
128 => {
if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit mul128");
// (hi, lo)
// a b
// x c d
// ------------
// ad bd
// ad bc
// ------------
// t1 t2
// (hi, lo)
let (b, a) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (d, c) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// mov a -> t1
let t1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t1, &a);
// imul d, t1 -> t1
self.backend.emit_imul_r_r(&t1, &d);
// mul d, b -> (RDX:RAX) as (carry:t2)
self.backend.emit_mov_r_r(&x86_64::RAX, &d);
self.backend.emit_mul_r(&b);
let t2 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t2, &x86_64::RAX);
// add t1, carry -> t1
self.backend.emit_add_r_r(&t1, &x86_64::RDX);
// mov c -> tt
let tt = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&tt, &c);
// imul b, tt -> tt
self.backend.emit_imul_r_r(&tt, &b);
// add t1, tt -> t1
self.backend.emit_add_r_r(&t1, &tt);
// result: t1(higher), t2(lower)
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &t2);
self.backend.emit_mov_r_r(&res_h, &t1);
} else {
unimplemented!()
}
}
_ => unimplemented!()
}
}, },
op::BinOp::Udiv => { op::BinOp::Udiv => {
let op1 = &ops[op1]; let op1 = &ops[op1];
let op2 = &ops[op2]; let op2 = &ops[op2];
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in UDIV")
};
match op_len {
0...64 => {
self.emit_udiv(op1, op2, f_content, f_context, vm); self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rax -> result // mov rax -> result
match res_tmp.ty.get_int_length() { match res_tmp.ty.get_int_length() {
Some(64) => { Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
} Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(32) => { Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX); _ => unimplemented!()
} }
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
} }
Some(8) => { 128 => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL); let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::UDIV_U128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
} }
_ => unimplemented!() _ => unimplemented!()
} }
...@@ -1639,21 +1787,34 @@ impl <'a> InstructionSelection { ...@@ -1639,21 +1787,34 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1]; let op1 = &ops[op1];
let op2 = &ops[op2]; let op2 = &ops[op2];
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in SDIV")
};
match op_len {
0...64 => {
self.emit_idiv(op1, op2, f_content, f_context, vm); self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rax -> result // mov rax -> result
match res_tmp.ty.get_int_length() { match res_tmp.ty.get_int_length() {
Some(64) => { Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
} Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(32) => { Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX); _ => unimplemented!()
} }
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
} }
Some(8) => { 128 => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL); let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::SDIV_I128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
} }
_ => unimplemented!() _ => unimplemented!()
} }
...@@ -1662,21 +1823,34 @@ impl <'a> InstructionSelection { ...@@ -1662,21 +1823,34 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1]; let op1 = &ops[op1];
let op2 = &ops[op2]; let op2 = &ops[op2];
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in UREM")
};
match op_len {
0...64 => {
self.emit_udiv(op1, op2, f_content, f_context, vm); self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result // mov rdx -> result
match res_tmp.ty.get_int_length() { match res_tmp.ty.get_int_length() {
Some(64) => { Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX),
} Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX),
Some(32) => { Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX); _ => unimplemented!()
} }
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
} }
Some(8) => { 128 => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH); let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::UREM_U128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
} }
_ => unimplemented!() _ => unimplemented!()
} }
...@@ -1685,21 +1859,34 @@ impl <'a> InstructionSelection { ...@@ -1685,21 +1859,34 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1]; let op1 = &ops[op1];
let op2 = &ops[op2]; let op2 = &ops[op2];
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in SREM")
};
match op_len {
0...64 => {
self.emit_idiv(op1, op2, f_content, f_context, vm); self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result // mov rdx -> result
match res_tmp.ty.get_int_length() { match res_tmp.ty.get_int_length() {
Some(64) => { Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX),
} Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX),
Some(32) => { Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH),
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX); _ => unimplemented!()
} }
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
} }
Some(8) => { 128 => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH); let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::SREM_I128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
} }
_ => unimplemented!() _ => unimplemented!()
} }
......
...@@ -8,6 +8,7 @@ extern crate stderrlog; ...@@ -8,6 +8,7 @@ extern crate stderrlog;
extern crate maplit; extern crate maplit;
#[macro_use] #[macro_use]
extern crate field_offset; extern crate field_offset;
extern crate extprim;
#[macro_use] #[macro_use]
pub extern crate ast; pub extern crate ast;
......
...@@ -128,6 +128,46 @@ lazy_static! { ...@@ -128,6 +128,46 @@ lazy_static! {
jit: RwLock::new(None) jit: RwLock::new(None)
}; };
pub static ref UDIV_U128 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![UINT64_TYPE.clone(); 2],
arg_tys: vec![UINT64_TYPE.clone(); 4]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_udiv_u128")),
jit: RwLock::new(None)
};
pub static ref SDIV_I128 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![UINT64_TYPE.clone(); 2],
arg_tys: vec![UINT64_TYPE.clone(); 4]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_sdiv_i128")),
jit: RwLock::new(None)
};