Commit a7654e7e authored by qinsoon's avatar qinsoon

[wip] more int128 test. dylib for testing includes libmu

parent db6a93a3
Pipeline #416 failed with stage
in 11 minutes and 3 seconds
......@@ -33,4 +33,5 @@ rustc-serialize = "*"
time = "0.1.34"
maplit = "0.1.4"
docopt = "0.6"
petgraph = "0.4.1"
\ No newline at end of file
petgraph = "0.4.1"
extprim = "*"
\ No newline at end of file
......@@ -2460,6 +2460,17 @@ impl CodeGenerator for ASMCodeGen {
fn emit_sub_r_mem(&mut self, dest: Reg, src: Mem) {
self.internal_binop_def_r_mem("sub", dest, src)
}
// sbb
fn emit_sbb_r_r (&mut self, dest: Reg, src: Reg) {
self.internal_binop_def_r_r("sbb", dest, src)
}
fn emit_sbb_r_mem(&mut self, dest: Reg, src: Mem) {
self.internal_binop_def_r_mem("sbb", dest, src)
}
fn emit_sbb_r_imm(&mut self, dest: Reg, src: i32) {
self.internal_binop_def_r_imm("sbb", dest, src)
}
fn emit_mul_r(&mut self, src: &P<Value>) {
let len = check_op_len(src);
......@@ -2508,6 +2519,10 @@ impl CodeGenerator for ASMCodeGen {
unimplemented!()
}
fn emit_imul_r_r(&mut self, dest: Reg, src: Reg) {
self.internal_binop_def_r_r("imul", dest, src)
}
fn emit_div_r (&mut self, src: &P<Value>) {
let len = check_op_len(src);
......
......@@ -142,10 +142,18 @@ pub trait CodeGenerator {
fn emit_sub_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_sub_r_imm(&mut self, dest: Reg, src: i32);
// sub with borrow
fn emit_sbb_r_r (&mut self, dest: Reg, src: Reg);
fn emit_sbb_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_sbb_r_imm(&mut self, dest: Reg, src: i32);
// multiply
fn emit_mul_r (&mut self, src: Reg);
fn emit_mul_mem(&mut self, src: Mem);
// signed multiply
fn emit_imul_r_r(&mut self, dest: Reg, src: Reg);
// div
fn emit_div_r (&mut self, src: Reg);
fn emit_div_mem (&mut self, src: Mem);
......
......@@ -1401,6 +1401,8 @@ impl <'a> InstructionSelection {
// adc res_h op2_h -> res_h
self.backend.emit_adc_r_r(&res_h, &op2_h);
} else {
unimplemented!()
}
},
op::BinOp::Sub => {
......@@ -1434,6 +1436,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
// add op2 res
self.backend.emit_sub_r_r(&res_tmp, &reg_op2);
} else if self.match_ireg_ex(&ops[op1]) && self.match_ireg_ex(&ops[op2]){
trace!("emit sub-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(&ops[op1], f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(&ops[op2], f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// sub res_l op2_l -> res_l
self.backend.emit_sub_r_r(&res_l, &op2_l);
// sbb res_h op2_h -> res_h
self.backend.emit_sbb_r_r(&res_h, &op2_h);
} else {
unimplemented!()
}
......@@ -1472,6 +1491,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// and op2, res -> res
self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2){
trace!("emit and-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// and res_l op2_l -> res_l
self.backend.emit_and_r_r(&res_l, &op2_l);
// and res_h op2_h -> res_h
self.backend.emit_and_r_r(&res_h, &op2_h);
} else {
unimplemented!()
}
......@@ -1510,6 +1546,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2){
trace!("emit or-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// or res_l op2_l -> res_l
self.backend.emit_or_r_r(&res_l, &op2_l);
// or res_h op2_h -> res_h
self.backend.emit_or_r_r(&res_h, &op2_h);
} else {
unimplemented!()
}
......@@ -1548,6 +1601,23 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// xor op2, res -> res
self.backend.emit_xor_r_r(&res_tmp, &tmp_op2);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2){
trace!("emit xor-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// make result split
// mov op1 to res
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &op1_l);
self.backend.emit_mov_r_r(&res_h, &op1_h);
// xor res_l op2_l -> res_l
self.backend.emit_xor_r_r(&res_l, &op2_l);
// xor res_h op2_h -> res_h
self.backend.emit_xor_r_r(&res_h, &op2_h);
} else {
unimplemented!()
}
......@@ -1555,82 +1625,160 @@ impl <'a> InstructionSelection {
op::BinOp::Mul => {
// mov op1 -> rax
let op1 = &ops[op1];
let op2 = &ops[op2];
let mreg_op1 = match op1.clone_value().ty.get_int_length() {
Some(64) => x86_64::RAX.clone(),
Some(32) => x86_64::EAX.clone(),
Some(16) => x86_64::AX.clone(),
Some(8) => x86_64::AL.clone(),
_ => unimplemented!()
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expected integer operand with MUL")
};
match op_len {
1...64 => {
trace!("emit mul");
let mreg_op1 = match op_len {
64 => x86_64::RAX.clone(),
32 => x86_64::EAX.clone(),
16 => x86_64::AX.clone(),
8 => x86_64::AL.clone(),
_ => unimplemented!()
};
if self.match_iimm(op1) {
let imm_op1 = self.node_iimm_to_i32(op1);
if self.match_iimm(op1) {
let imm_op1 = self.node_iimm_to_i32(op1);
self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
} else if self.match_mem(op1) {
let mem_op1 = self.emit_mem(op1, vm);
self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
} else if self.match_mem(op1) {
let mem_op1 = self.emit_mem(op1, vm);
self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
} else if self.match_ireg(op1) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
} else if self.match_ireg(op1) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
self.backend.emit_mov_r_r(&mreg_op1, &reg_op1);
} else {
unimplemented!();
}
self.backend.emit_mov_r_r(&mreg_op1, &reg_op1);
} else {
unimplemented!();
}
// mul op2
let op2 = &ops[op2];
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2);
// mul op2
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2);
// put imm in a temporary
// here we use result reg as temporary
self.backend.emit_mov_r_imm(&res_tmp, imm_op2);
// put imm in a temporary
// here we use result reg as temporary
self.backend.emit_mov_r_imm(&res_tmp, imm_op2);
self.backend.emit_mul_r(&res_tmp);
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2, vm);
self.backend.emit_mul_r(&res_tmp);
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2, vm);
self.backend.emit_mul_mem(&mem_op2);
} else if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_mul_mem(&mem_op2);
} else if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_mul_r(&reg_op2);
} else {
unimplemented!();
}
self.backend.emit_mul_r(&reg_op2);
} else {
unimplemented!();
}
// mov rax -> result
let res_len = res_tmp.ty.get_int_length().unwrap();
assert!(res_len == op_len, "op and res do not have matching type: {}", node);
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
match res_len {
64 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
32 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
16 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
}
}
128 => {
if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit mul128");
// (hi, lo)
// a b
// x c d
// ------------
// ad bd
// ad bc
// ------------
// t1 t2
// (hi, lo)
let (b, a) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (d, c) = self.emit_ireg_ex(op2, f_content, f_context, vm);
// mov a -> t1
let t1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t1, &a);
// imul d, t1 -> t1
self.backend.emit_imul_r_r(&t1, &d);
// mul d, b -> (RDX:RAX) as (carry:t2)
self.backend.emit_mov_r_r(&x86_64::RAX, &d);
self.backend.emit_mul_r(&b);
let t2 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t2, &x86_64::RAX);
// add t1, carry -> t1
self.backend.emit_add_r_r(&t1, &x86_64::RDX);
// mov c -> tt
let tt = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&tt, &c);
// imul b, tt -> tt
self.backend.emit_imul_r_r(&tt, &b);
// add t1, tt -> t1
self.backend.emit_add_r_r(&t1, &tt);
// result: t1(higher), t2(lower)
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.backend.emit_mov_r_r(&res_l, &t2);
self.backend.emit_mov_r_r(&res_h, &t1);
} else {
unimplemented!()
}
}
_ => unimplemented!()
}
},
op::BinOp::Udiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_udiv(op1, op2, f_content, f_context, vm);
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in UDIV")
};
match op_len {
0...64 => {
self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
}
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
128 => {
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::UDIV_U128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
}
_ => unimplemented!()
}
......@@ -1639,21 +1787,34 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_idiv(op1, op2, f_content, f_context, vm);
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in SDIV")
};
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
match op_len {
0...64 => {
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
}
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
128 => {
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::SDIV_I128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
}
_ => unimplemented!()
}
......@@ -1662,21 +1823,34 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_udiv(op1, op2, f_content, f_context, vm);
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in UREM")
};
match op_len {
0...64 => {
self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
// mov rdx -> result
match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH),
_ => unimplemented!()
}
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
128 => {
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::UREM_U128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
}
_ => unimplemented!()
}
......@@ -1685,21 +1859,34 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.emit_idiv(op1, op2, f_content, f_context, vm);
let op_len = match op1.clone_value().ty.get_int_length() {
Some(len) => len,
None => panic!("expect integer op in SREM")
};
// mov rdx -> result
match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
match op_len {
0...64 => {
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result
match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH),
_ => unimplemented!()
}
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
128 => {
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
self.emit_runtime_entry(&entrypoints::SREM_I128,
vec![op1_l.clone(), op1_h.clone(), op2_l.clone(), op2_h.clone()],
Some(vec![res_l.clone(), res_h.clone()]),
Some(node), f_content, f_context, vm);
}
_ => unimplemented!()
}
......
......@@ -8,6 +8,7 @@ extern crate stderrlog;
extern crate maplit;
#[macro_use]
extern crate field_offset;
extern crate extprim;
#[macro_use]
pub extern crate ast;
......
......@@ -128,6 +128,46 @@ lazy_static! {
jit: RwLock::new(None)
};
pub static ref UDIV_U128 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![UINT64_TYPE.clone(); 2],
arg_tys: vec![UINT64_TYPE.clone(); 4]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_udiv_u128")),
jit: RwLock::new(None)
};
pub static ref SDIV_I128 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![UINT64_TYPE.clone(); 2],
arg_tys: vec![UINT64_TYPE.clone(); 4]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_sdiv_i128")),
jit: RwLock::new(None)
};
pub static ref UREM_U128 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![UINT64_TYPE.clone(); 2],
arg_tys: vec![UINT64_TYPE.clone(); 4]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_urem_u128")),
jit: RwLock::new(None)
};
pub static ref SREM_I128 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![UINT64_TYPE.clone(); 2],
arg_tys: vec![UINT64_TYPE.clone(); 4]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_srem_i128")),
jit: RwLock::new(None)
};
// impl/decl: mod.rs
pub static ref PRINT_HEX : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
......
......@@ -12,4 +12,31 @@ pub extern fn muentry_frem_float(a: f32, b: f32) -> f32 {
use std::ops::Rem;
a.rem(b)
}
use extprim::u128::u128;
use extprim::i128::i128;
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_udiv_u128(a: u128, b: u128) -> u128 {
a.wrapping_div(b)
}
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_sdiv_i128(a: i128, b: i128) -> i128 {
a.wrapping_div(b)
}
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_urem_u128(a: u128, b: u128) -> u128 {
a.wrapping_rem(b)
}
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_srem_i128(a: i128, b: i128) -> i128 {
a.wrapping_rem(b)
}
\ No newline at end of file
......@@ -64,6 +64,15 @@ fn link_dylib_internal (files: Vec<PathBuf>, lib: &Vec<String>, libpath: &Vec<St
let mut cc = Command::new(get_test_clang_path());
// include mu static lib
let libmu_path = if cfg!(debug_assertions) {
"target/debug/libmu.a"
} else {
"target/release/libmu.a"
};
let libmu = get_path_under_mu(libmu_path);
cc.arg(format!("{}", libmu.to_str().unwrap()));
// external libs
for path in libpath.iter() {
cc.arg(format!("-L{}", path));
......
extern crate libloading;
extern crate extprim;
use mu::ast::ir::*;
use mu::ast::inst::*;
......@@ -17,11 +18,17 @@ fn test_add_u128() {
let lib = testutil::compile_fnc("add_u128", &add_u128);
unsafe {
use std::u64;
let add_u128 : libloading::Symbol<unsafe extern fn(u64, u64, u64, u64) -> (u64, u64)> = lib.get(b"add_u128").unwrap();
let res = add_u128(1, 0, 1, 0);
println!("add_u128(1, 1) = {:?}", res);
assert!(res == (2, 0));
let res = add_u128(u64::MAX, 0, 1, 0);
println!("add_u128(u64::MAX, 1) = {:?}", res);
assert!(res == (0, 1));
}
}
......@@ -54,5 +61,112 @@ fn add_u128() -> VM {
define_func_ver!((vm) add_u128_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_mul_u128() {
let lib = testutil::compile_fnc("mul_u128", &mul_u128);
unsafe {
use std::u64;
let mul_u128 : libloading::Symbol<unsafe extern fn(u64, u64, u64, u64) -> (u64, u64)> = lib.get(b"mul_u128").unwrap();
let res = mul_u128(6, 0, 7, 0);
println!("mul_u128(6, 7) = {:?}", res);
assert!(res == (42, 0));
let res = mul_u128(6, 6, 7, 7);
println!("mul_u128(??, ??) = {:?}", res);
assert!(res == (42, 84));
}
}
fn mul_u128() -> VM {
let vm = VM::new();
typedef! ((vm) u128 = mu_int(128));