WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 4d292d54 authored by qinsoon's avatar qinsoon
Browse files

lshr/ashr int128

parent 98157fa0
......@@ -1967,70 +1967,147 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_mem(op1) {
unimplemented!()
} else if self.match_ireg(op1) {
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit lshr-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2) as i8;
// mov op1 -> res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// lshr result, op2 -> result
self.backend.emit_shr_r_imm8(&res_tmp, imm_op2 as i8);
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit lshr-ireg-ireg");
// shr result, op2 -> result
self.backend.emit_shr_r_imm8(&res_tmp, imm_op2);
} else if self.match_ireg(op2) {
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, unsafe {&tmp_op2.as_type(UINT8_TYPE.clone())});
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// shr result, cl -> result
self.backend.emit_shr_r_cl(&res_tmp);
} else {
panic!("unexpected op2 (not ireg not iimm): {}", op2);
}
// lshr result, cl -> result
self.backend.emit_shr_r_cl(&res_tmp);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit lshr-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
// mov op2_l -> ecx (we do not care higher bits)
self.backend.emit_mov_r_r(&x86_64::ECX, unsafe {&op2_l.as_type(UINT32_TYPE.clone())});
// mov op1_l -> t1
let t1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t1, &op1_l);
// shrd op1_h, t1, cl -> t1
self.backend.emit_shrd_r_r_cl(&t1, &op1_h);
// mov op1_h -> t2
let t2 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t2, &op1_h);
// shr t2, cl -> t2
self.backend.emit_shr_r_cl(&t2);
// clear res_h
self.backend.emit_mov_r_imm(&res_h, 0);
// test 64, cl
self.backend.emit_test_imm_r(64i32, &x86_64::CL);
// cmovne t2 -> t1
self.backend.emit_cmovne_r_r(&t1, &t2);
// cmove t2 -> res_h
self.backend.emit_cmove_r_r(&res_h, &t2);
// mov t1 -> res_l
self.backend.emit_mov_r_r(&res_l, &t1);
} else {
panic!("unexpected op1 (not ireg not mem): {}", op1);
unimplemented!()
}
},
op::BinOp::Ashr => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_mem(op1) {
unimplemented!()
} else if self.match_ireg(op1) {
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit ashr-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2) as i8;
// mov op1 -> res
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// sar result, op2 -> result
self.backend.emit_sar_r_imm8(&res_tmp, imm_op2 as i8);
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit ashr-ireg-ireg");
// sar result, op2 -> result
self.backend.emit_sar_r_imm8(&res_tmp, imm_op2);
} else if self.match_ireg(op2) {
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
// mov op2 -> cl
self.backend.emit_mov_r_r(&x86_64::CL, unsafe {&tmp_op2.as_type(UINT8_TYPE.clone())});
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// mov op1 -> result
self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
// sar result, cl -> result
self.backend.emit_sar_r_cl(&res_tmp);
} else {
panic!("unexpected op2 (not ireg not iimm): {}", op2);
}
// sar result, cl -> result
self.backend.emit_sar_r_cl(&res_tmp);
} else if self.match_ireg_ex(op1) && self.match_ireg_ex(op2) {
trace!("emit ashr-iregex-iregex");
let (op1_l, op1_h) = self.emit_ireg_ex(op1, f_content, f_context, vm);
let (op2_l, op2_h) = self.emit_ireg_ex(op2, f_content, f_context, vm);
let (res_l, res_h) = self.split_int128(&res_tmp, f_context, vm);
// mov op2_l -> ecx
self.backend.emit_mov_r_r(&x86_64::ECX, unsafe {&op2_l.as_type(UINT32_TYPE.clone())});
// mov op1_l -> t1
let t1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t1, &op1_l);
// shrd op1_h, t1, cl -> t1
self.backend.emit_shrd_r_r_cl(&t1, &op1_h);
// mov op1_h -> t2
let t2 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t2, &op1_h);
// sar t2, cl -> t2
self.backend.emit_sar_r_cl(&t2);
// mov op1_h -> t3
let t3 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&t3, &op1_h);
// sar t3, 63 -> t3
self.backend.emit_sar_r_imm8(&t3, 63i8);
// test 64 cl
self.backend.emit_test_imm_r(64i32, &x86_64::CL);
// cmovne t2 -> t1
self.backend.emit_cmovne_r_r(&t1, &t2);
// cmove t2 -> t3
self.backend.emit_cmove_r_r(&t3, &t2);
// t1 as lower, t3 as higher
self.backend.emit_mov_r_r(&res_l, &t1);
self.backend.emit_mov_r_r(&res_h, &t3);
} else {
panic!("unexpected op1 (not ireg not mem): {}", op1);
unimplemented!()
}
},
......
......@@ -219,5 +219,103 @@ fn shl_u128() -> VM {
define_func_ver!((vm) shl_u128_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_lshr_u128() {
let lib = testutil::compile_fnc("lshr_u128", &lshr_u128);
unsafe {
use self::extprim::u128::u128;
let lshr_u128 : libloading::Symbol<unsafe extern fn(u64, u64, u64, u64) -> (u64, u64)> = lib.get(b"lshr_u128").unwrap();
let res = lshr_u128(1, 1, 64, 0);
println!("lshr_u128(100000000000...0001, 64) = {:?}", res);
assert!(res == (1, 0));
let res = lshr_u128(1, 0xffffffffffffffff, 64, 0);
println!("lshr_u128(0xffffffffffffffff0000000000000001, 64) = {:?}", res);
assert!(res == (0xffffffffffffffff, 0));
}
}
fn lshr_u128() -> VM {
let vm = VM::new();
typedef! ((vm) u128 = mu_int(128));
funcsig! ((vm) sig = (u128, u128) -> (u128));
funcdecl! ((vm) <sig> lshr_u128);
funcdef! ((vm) <sig> lshr_u128 VERSION lshr_u128_v1);
block! ((vm, lshr_u128_v1) blk_entry);
ssa! ((vm, lshr_u128_v1) <u128> a);
ssa! ((vm, lshr_u128_v1) <u128> b);
// sum = Add %a %b
ssa! ((vm, lshr_u128_v1) <u128> sum);
inst! ((vm, lshr_u128_v1) blk_entry_lshr_u128:
sum = BINOP (BinOp::Lshr) a b
);
inst! ((vm, lshr_u128_v1) blk_entry_ret:
RET (sum)
);
define_block! ((vm, lshr_u128_v1) blk_entry(a, b) {
blk_entry_lshr_u128, blk_entry_ret
});
define_func_ver!((vm) lshr_u128_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_ashr_u128() {
let lib = testutil::compile_fnc("ashr_u128", &ashr_u128);
unsafe {
use self::extprim::u128::u128;
let ashr_u128 : libloading::Symbol<unsafe extern fn(u64, u64, u64, u64) -> (u64, u64)> = lib.get(b"ashr_u128").unwrap();
let res = ashr_u128(1, 0xffffffffffffffff, 64, 0);
println!("ashr_u128(0xffffffffffffffff0000000000000001, 64) = {:?}", res);
assert!(res == (0xffffffffffffffff, 0xffffffffffffffff));
}
}
fn ashr_u128() -> VM {
let vm = VM::new();
typedef! ((vm) u128 = mu_int(128));
funcsig! ((vm) sig = (u128, u128) -> (u128));
funcdecl! ((vm) <sig> ashr_u128);
funcdef! ((vm) <sig> ashr_u128 VERSION ashr_u128_v1);
block! ((vm, ashr_u128_v1) blk_entry);
ssa! ((vm, ashr_u128_v1) <u128> a);
ssa! ((vm, ashr_u128_v1) <u128> b);
// sum = Add %a %b
ssa! ((vm, ashr_u128_v1) <u128> sum);
inst! ((vm, ashr_u128_v1) blk_entry_ashr_u128:
sum = BINOP (BinOp::Ashr) a b
);
inst! ((vm, ashr_u128_v1) blk_entry_ret:
RET (sum)
);
define_block! ((vm, ashr_u128_v1) blk_entry(a, b) {
blk_entry_ashr_u128, blk_entry_ret
});
define_func_ver!((vm) ashr_u128_v1 (entry: blk_entry) {blk_entry});
vm
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment