Commit 967d053f authored by qinsoon's avatar qinsoon

[wip] CmpOp as value. Going to massive redesign/refactor the code.

currently we only use 64bit registers, extend/truncate result if it is
not 64bits. This is awful. Going to use all the registers, for example,
AL, AX, EAX, RAX, but will not use AH (so reg alloc is simpler)
parent 6c06ffd1
......@@ -583,25 +583,16 @@ impl Value {
pub fn is_int_const(&self) -> bool {
match self.v {
Value_::Constant(_) => {
let ty : &MuType = &self.ty;
match ty.v {
MuType_::Int(_) => true,
_ => false
}
}
Value_::Constant(Constant::Int(_)) => true,
Value_::Constant(Constant::NullRef) => true,
_ => false
}
}
pub fn extract_int_const(&self) -> u64 {
match self.v {
Value_::Constant(ref c) => {
match c {
&Constant::Int(val) => val,
_ => panic!("expect int const")
}
},
Value_::Constant(Constant::Int(val)) => val,
Value_::Constant(Constant::NullRef) => 0,
_ => panic!("expect int const")
}
}
......
......@@ -13,6 +13,10 @@ lazy_static! {
MuType::new(new_internal_id(), MuType_::int(POINTER_SIZE * 8))
);
pub static ref UINT1_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(1))
);
pub static ref UINT8_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(8))
);
......
......@@ -1119,7 +1119,7 @@ fn op_postfix(op_len: usize) -> &'static str {
}
// general instruction emission
macro_rules! binop_no_def_r_r {
macro_rules! cmp_r_r {
($func_name: ident, $inst: expr, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
// with postfix
......@@ -1134,9 +1134,17 @@ macro_rules! binop_no_def_r_r {
self.add_asm_inst(
asm,
hashmap!{},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
{
if id1 == id2 {
hashmap!{
id1 => vec![loc1, loc2]
}
} else {
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
}
}
},
false
)
......@@ -1144,7 +1152,7 @@ macro_rules! binop_no_def_r_r {
}
}
macro_rules! binop_no_def_imm_r {
macro_rules! cmp_imm_r {
($func_name: ident, $inst: expr, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, op1: $imm_ty, op2: &P<Value>) {
let inst = $inst.to_string() + &op_postfix($op_len);
......@@ -1167,7 +1175,7 @@ macro_rules! binop_no_def_imm_r {
}
}
macro_rules! binop_no_def_mem_r {
macro_rules! cmp_mem_r {
($func_name: ident, $inst:expr, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
let inst = $inst.to_string() + &op_postfix($op_len);
......@@ -1212,9 +1220,17 @@ macro_rules! binop_def_r_r {
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
{
if id1 == id2 {
hashmap!{
id1 => vec![loc1, loc2]
}
} else {
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
}
}
},
false
)
......@@ -1398,6 +1414,89 @@ macro_rules! mov_mem_imm {
}
}
/// conditional move
macro_rules! binop_no_def_r_r {
($func_name: ident, $inst: expr, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{},
{
if id1 == id2 {
hashmap!{
id1 => vec![loc1, loc2]
}
} else {
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
}
}
},
false
)
}
}
}
macro_rules! binop_no_def_r_imm {
($func_name: ident, $inst: expr, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, dest: &P<Value>, src: $imm_ty) {
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + src.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, reg1);
self.add_asm_inst(
asm,
hashmap!{},
hashmap!{
id1 => vec![loc1]
},
false
)
}
}
}
macro_rules! binop_no_def_r_mem {
($func_name: ident, $inst: expr, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
let (mem, mut uses) = self.prepare_mem(src, inst.len() + 1);
let (reg, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + mem.len() + 1);
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1.clone());
} else {
uses.insert(id1, vec![loc1.clone()]);
}
let asm = format!("{} {},{}", inst, mem, reg);
self.add_asm_inst(
asm,
hashmap!{},
uses,
true
)
}
}
}
macro_rules! emit_lea_r {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
......@@ -1613,20 +1712,20 @@ impl CodeGenerator for ASMCodeGen {
}
// cmp
binop_no_def_r_r!(emit_cmp_r64_r64, "cmp", 64);
binop_no_def_r_r!(emit_cmp_r32_r32, "cmp", 32);
binop_no_def_r_r!(emit_cmp_r16_r16, "cmp", 16);
binop_no_def_r_r!(emit_cmp_r8_r8 , "cmp", 8 );
cmp_r_r!(emit_cmp_r64_r64, "cmp", 64);
cmp_r_r!(emit_cmp_r32_r32, "cmp", 32);
cmp_r_r!(emit_cmp_r16_r16, "cmp", 16);
cmp_r_r!(emit_cmp_r8_r8 , "cmp", 8 );
binop_no_def_imm_r!(emit_cmp_imm32_r64, "cmp", 64, i32);
binop_no_def_imm_r!(emit_cmp_imm32_r32, "cmp", 32, i32);
binop_no_def_imm_r!(emit_cmp_imm16_r16, "cmp", 16, i16);
binop_no_def_imm_r!(emit_cmp_imm8_r8 , "cmp", 8 , i8 );
cmp_imm_r!(emit_cmp_imm32_r64, "cmp", 64, i32);
cmp_imm_r!(emit_cmp_imm32_r32, "cmp", 32, i32);
cmp_imm_r!(emit_cmp_imm16_r16, "cmp", 16, i16);
cmp_imm_r!(emit_cmp_imm8_r8 , "cmp", 8 , i8 );
binop_no_def_mem_r!(emit_cmp_mem64_r64, "cmp", 64);
binop_no_def_mem_r!(emit_cmp_mem32_r32, "cmp", 32);
binop_no_def_mem_r!(emit_cmp_mem16_r16, "cmp", 16);
binop_no_def_mem_r!(emit_cmp_mem8_r8 , "cmp", 8 );
cmp_mem_r!(emit_cmp_mem64_r64, "cmp", 64);
cmp_mem_r!(emit_cmp_mem32_r32, "cmp", 32);
cmp_mem_r!(emit_cmp_mem16_r16, "cmp", 16);
cmp_mem_r!(emit_cmp_mem8_r8 , "cmp", 8 );
// mov
......@@ -1659,45 +1758,35 @@ impl CodeGenerator for ASMCodeGen {
// cmov
mov_r_r!(emit_cmova_r64_r64, "cmova", 64);
mov_r_imm!(emit_cmova_r64_imm32, "cmova", 64, i32);
mov_r_mem!(emit_cmova_r64_mem64, "cmova", 64);
binop_no_def_r_r! (emit_cmova_r64_r64, "cmova", 64);
binop_no_def_r_mem!(emit_cmova_r64_mem64, "cmova", 64);
mov_r_r!(emit_cmovae_r64_r64, "cmovae", 64);
mov_r_imm!(emit_cmovae_r64_imm32, "cmovae", 64, i32);
mov_r_mem!(emit_cmovae_r64_mem64, "cmovae", 64);
binop_no_def_r_r! (emit_cmovae_r64_r64, "cmovae", 64);
binop_no_def_r_mem!(emit_cmovae_r64_mem64,"cmovae", 64);
mov_r_r!(emit_cmovb_r64_r64, "cmovb", 64);
mov_r_imm!(emit_cmovb_r64_imm32, "cmovb", 64, i32);
mov_r_mem!(emit_cmovb_r64_mem64, "cmovb", 64);
binop_no_def_r_r! (emit_cmovb_r64_r64, "cmovb", 64);
binop_no_def_r_mem!(emit_cmovb_r64_mem64, "cmovb", 64);
mov_r_r!(emit_cmovbe_r64_r64, "cmovbe", 64);
mov_r_imm!(emit_cmovbe_r64_imm32, "cmovbe", 64, i32);
mov_r_mem!(emit_cmovbe_r64_mem64, "cmovbe", 64);
binop_no_def_r_r! (emit_cmovbe_r64_r64, "cmovbe", 64);
binop_no_def_r_mem!(emit_cmovbe_r64_mem64,"cmovbe", 64);
mov_r_r!(emit_cmove_r64_r64, "cmove", 64);
mov_r_imm!(emit_cmove_r64_imm32, "cmove", 64, i32);
mov_r_mem!(emit_cmove_r64_mem64, "cmove", 64);
binop_no_def_r_r! (emit_cmove_r64_r64, "cmove", 64);
binop_no_def_r_mem!(emit_cmove_r64_mem64, "cmove", 64);
mov_r_r!(emit_cmovne_r64_r64, "cmovne", 64);
mov_r_imm!(emit_cmovne_r64_imm32, "cmovne", 64, i32);
mov_r_mem!(emit_cmovne_r64_mem64, "cmovne", 64);
binop_no_def_r_r! (emit_cmovne_r64_r64, "cmovne", 64);
binop_no_def_r_mem!(emit_cmovne_r64_mem64,"cmovne", 64);
mov_r_r!(emit_cmovg_r64_r64, "cmovg", 64);
mov_r_imm!(emit_cmovg_r64_imm32, "cmovg", 64, i32);
mov_r_mem!(emit_cmovg_r64_mem64, "cmovg", 64);
binop_no_def_r_r! (emit_cmovg_r64_r64, "cmovg", 64);
binop_no_def_r_mem!(emit_cmovg_r64_mem64, "cmovg", 64);
mov_r_r!(emit_cmovge_r64_r64, "cmovge", 64);
mov_r_imm!(emit_cmovge_r64_imm32, "cmovge", 64, i32);
mov_r_mem!(emit_cmovge_r64_mem64, "cmovge", 64);
binop_no_def_r_r! (emit_cmovge_r64_r64, "cmovge", 64);
binop_no_def_r_mem!(emit_cmovge_r64_mem64,"cmovge", 64);
mov_r_r!(emit_cmovl_r64_r64, "cmovl", 64);
mov_r_imm!(emit_cmovl_r64_imm32, "cmovl", 64, i32);
mov_r_mem!(emit_cmovl_r64_mem64, "cmovl", 64);
binop_no_def_r_r! (emit_cmovl_r64_r64, "cmovl", 64);
binop_no_def_r_mem!(emit_cmovl_r64_mem64, "cmovl", 64);
mov_r_r!(emit_cmovle_r64_r64, "cmovle", 64);
mov_r_imm!(emit_cmovle_r64_imm32, "cmovle", 64, i32);
mov_r_mem!(emit_cmovle_r64_mem64, "cmovle", 64);
binop_no_def_r_r! (emit_cmovle_r64_r64, "cmovle", 64);
binop_no_def_r_mem!(emit_cmovle_r64_mem64,"cmovle", 64);
// lea
mov_r_mem!(emit_lea_r64, "lea", 64);
......@@ -1718,6 +1807,11 @@ impl CodeGenerator for ASMCodeGen {
binop_def_r_mem!(emit_and_r16_mem16, "and", 16);
binop_def_r_mem!(emit_and_r8_mem8 , "and", 8 );
// or
binop_def_r_r! (emit_or_r64_r64, "or", 64);
binop_def_r_imm!(emit_or_r64_imm32, "or", 64, i32);
binop_def_r_mem!(emit_or_r64_mem64, "or", 64);
// xor
binop_def_r_r!(emit_xor_r64_r64, "xor", 64);
binop_def_r_r!(emit_xor_r32_r32, "xor", 32);
......
......@@ -76,43 +76,33 @@ pub trait CodeGenerator {
// gpr conditional move
fn emit_cmova_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmova_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmova_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovae_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovae_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovae_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovb_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovb_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovb_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovbe_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovbe_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovbe_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmove_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmove_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmove_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovg_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovg_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovg_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovge_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovge_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovge_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovl_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovl_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovl_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovle_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovle_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovle_r64_mem64(&mut self, dest: Reg, src: Mem); // load
fn emit_cmovne_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_cmovne_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_cmovne_r64_mem64(&mut self, dest: Reg, src: Mem); // load
// lea
......@@ -168,6 +158,11 @@ pub trait CodeGenerator {
fn emit_add_r8_r8 (&mut self, dest: Reg, src: Reg);
fn emit_add_r8_mem8(&mut self, dest: Reg, src: Mem);
fn emit_add_r8_imm8(&mut self, dest: Reg, src: i8);
// or
fn emit_or_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_or_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_or_r64_mem64(&mut self, dest: Reg, src: Mem);
// sub
fn emit_sub_r64_r64 (&mut self, dest: Reg, src: Reg);
......
......@@ -258,6 +258,42 @@ impl <'a> InstructionSelection {
}
},
Instruction_::CmpOp(op, op1, op2) => {
let ops = inst.ops.read().unwrap();
let ref op1 = ops[op1];
let ref op2 = ops[op2];
if self.match_ireg(op1) {
debug_assert!(self.match_ireg(op2));
let tmp_res = self.get_result_value(node);
// set result to 0
self.backend.emit_xor_r64_r64(&tmp_res, &tmp_res);
// set tmp1 as 1 (cmov doesnt allow immediate as operand)
let tmp_1 = self.make_temporary(f_context, UINT1_TYPE.clone(), vm);
self.backend.emit_mov_r64_imm32(&tmp_1, 1);
// cmov 1 to result
match self.emit_cmp_res(node, f_content, f_context, vm) {
op::CmpOp::EQ => self.backend.emit_cmove_r64_r64 (&tmp_res, &tmp_1),
op::CmpOp::NE => self.backend.emit_cmovne_r64_r64(&tmp_res, &tmp_1),
op::CmpOp::SGE => self.backend.emit_cmovge_r64_r64(&tmp_res, &tmp_1),
op::CmpOp::SGT => self.backend.emit_cmovg_r64_r64 (&tmp_res, &tmp_1),
op::CmpOp::SLE => self.backend.emit_cmovle_r64_r64(&tmp_res, &tmp_1),
op::CmpOp::SLT => self.backend.emit_cmovl_r64_r64 (&tmp_res, &tmp_1),
op::CmpOp::UGE => self.backend.emit_cmovae_r64_r64(&tmp_res, &tmp_1),
op::CmpOp::UGT => self.backend.emit_cmova_r64_r64 (&tmp_res, &tmp_1),
op::CmpOp::ULE => self.backend.emit_cmovbe_r64_r64(&tmp_res, &tmp_1),
op::CmpOp::ULT => self.backend.emit_cmovb_r64_r64 (&tmp_res, &tmp_1),
_ => panic!("expecting integer comparison op with int values")
}
} else {
unimplemented!()
}
}
Instruction_::Branch1(ref dest) => {
let ops = inst.ops.read().unwrap();
......@@ -470,6 +506,44 @@ impl <'a> InstructionSelection {
unimplemented!()
}
},
op::BinOp::Or => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) && self.match_iimm(op2) {
trace!("emit or-ireg-iimm");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let imm_op2 = self.node_iimm_to_i32(op2);
// mov op1 -> res
self.backend.emit_mov_r64_r64(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r64_imm32(&res_tmp, imm_op2);
} else if self.match_ireg(op1) && self.match_mem(op2) {
trace!("emit or-ireg-mem");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let mem_op2 = self.emit_mem(op2, vm);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r64_mem64(&res_tmp, &mem_op2);
} else if self.match_ireg(op1) && self.match_ireg(op2) {
trace!("emit or-ireg-ireg");
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op1, res
self.backend.emit_mov_r64_r64(&res_tmp, &tmp_op1);
// Or op2, res -> res
self.backend.emit_or_r64_r64(&res_tmp, &tmp_op2);
} else {
unimplemented!()
}
},
op::BinOp::Xor => {
let op1 = &ops[op1];
let op2 = &ops[op2];
......@@ -773,29 +847,38 @@ impl <'a> InstructionSelection {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
if from_ty_len < 32 {
let mask = match to_ty_len {
8 => 0xFFi32,
16 => 0xFFFFi32,
_ => unimplemented!()
};
// mov op -> result
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_op);
// and mask result -> result
self.backend.emit_and_r64_imm32(&tmp_res, mask);
} else if from_ty_len == 32 {
let tmp_mask = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r64_imm64(&tmp_mask, 0xFFFFFFFF as i64);
// mov op -> result
if from_ty_len == to_ty_len {
// do not need to do anything
// a simple move (and hopefully it will get removed)
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_op);
// and mask result -> result
self.backend.emit_and_r64_r64(&tmp_res, &tmp_mask);
return;
} else {
unimplemented!()
// fake a zero extend by masking out higher bits
if from_ty_len < 32 {
let mask = match from_ty_len {
1 => 0x1i32,
8 => 0xFFi32,
16 => 0xFFFFi32,
_ => unimplemented!()
};
// mov op -> result
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_op);
// and mask result -> result
self.backend.emit_and_r64_imm32(&tmp_res, mask);
} else if from_ty_len == 32 {
let tmp_mask = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r64_imm64(&tmp_mask, 0xFFFFFFFF as i64);
// mov op -> result
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_op);
// and mask result -> result
self.backend.emit_and_r64_r64(&tmp_res, &tmp_mask);
} else {
unimplemented!()
}
}
} else {
panic!("unexpected op (expect ireg): {}", op);
......@@ -804,11 +887,6 @@ impl <'a> InstructionSelection {
op::ConvOp::SEXT => {
// currently only use 64bits register
// we left shift the value, then arithmetic right shift back
let from_ty_len = extract_int_len(from_ty);
let to_ty_len = extract_int_len(to_ty);
let shift : i8 = (to_ty_len - from_ty_len) as i8;
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
......@@ -816,11 +894,7 @@ impl <'a> InstructionSelection {
// mov op -> result
self.backend.emit_mov_r64_r64(&tmp_res, &tmp_op);
// shl result, shift -> result
self.backend.emit_shl_r64_imm8(&tmp_res, shift);
// sar result, shift -> result
self.backend.emit_sar_r64_imm8(&tmp_res, shift);
self.emit_sign_extend_operand(from_ty, to_ty, &tmp_res);
} else {
panic!("unexpected op (expect ireg): {}", op)
}
......@@ -1282,6 +1356,7 @@ impl <'a> InstructionSelection {
if to_ty_len < 32 {
// ignoring from_ty for now (we use 64bits register for everything)
let mask = match to_ty_len {
1 => 0x1i32,
8 => 0xFFi32,
16 => 0xFFFFi32,
_ => unimplemented!()
......@@ -1995,8 +2070,31 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
if op::is_int_cmp(op) {
if self.match_ireg(op1) && self.match_iimm(op2) {
if op::is_int_cmp(op) {
if self.match_iimm(op1) && self.match_iimm(op2) {
let tmp_op1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
let ref ty_op1 = op1.clone_value().ty;
let iimm_op1 = self.node_iimm_to_i32(op1);
self.backend.emit_mov_r64_imm32(&tmp_op1, iimm_op1);
match op {
op::CmpOp::SGE
| op::CmpOp::SGT
| op::CmpOp::SLE
| op::CmpOp::SLT => {
self.emit_sign_extend_operand(ty_op1, &tmp_op1.ty, &tmp_op1);
},
_ => {}
}
let iimm_op2 = self.node_iimm_to_i32(op2);
self.backend.emit_cmp_imm32_r64(iimm_op2, &tmp_op1);
return op;
} else if self.match_ireg(op1) && self.match_iimm(op2) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let iimm_op2 = self.node_iimm_to_i32(op2);
......
......@@ -182,6 +182,7 @@ impl RegGroup {
pub fn get(ty: &P<MuType>) -> RegGroup {
match ty.v {
// for now, only use 64bits registers
MuType_::Int(len) if len == 1 => RegGroup::GPR,
MuType_::Int(len) if len == 8 => RegGroup::GPR,
MuType_::Int(len) if len == 16 => RegGroup::GPR,
MuType_::Int(len) if len == 32 => RegGroup::GPR,
......
......@@ -175,6 +175,21 @@ macro_rules! inst {
});
};
// CONVOP
(($vm: expr, $fv: ident) $name: ident: $value: ident = CONVOP ($operation: expr) <$ty1: ident $ty2: ident> $operand: ident) => {
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
value: Some(vec![$value.clone_value()]),
ops: RwLock::new(vec![$operand.clone()]),
v: Instruction_::ConvOp{
operation: $operation,
from_ty: $ty1.clone(),
to_ty: $ty2.clone(),
operand: 0
}
});
};
// SELECT
(($vm: expr, $fv: ident) $name: ident: $value: ident = SELECT $cond: ident $op_true: ident $op_false:ident) => {
let $name = $fv.new_inst(Instruction{
......
......@@ -322,5 +322,129 @@ fn select_sge_zero() -> VM {
define_func_ver!((vm) select_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_sgt_value() {
let lib = testutil::compile_fnc("sgt_value", &sgt_value);
unsafe {
let sgt_value : libloading::Symbol<unsafe extern fn(i64, i64) -> u64> = lib.get(b"sgt_value").unwrap();
let res = sgt_value(255, 0);
println!("sgt_value(255, 0) = {}", res);
assert!(res == 1);
let res = sgt_value(255, 255);
println!("sgt_value(255, 255) = {}", res);
assert!(res == 0);
let res = sgt_value(0, 255);
println!("sgt_value(0, 255) = {}", res);
assert!(res == 0);
}
}
fn sgt_value() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) int1 = mu_int(1));
constdef!((vm) <int64> int64_0 = Constant::Int(0));
constdef!((vm) <int64> int64_1 = Constant::Int(1));
funcsig! ((vm) sig = (int64, int64) -> (int1));
funcdecl!((vm) <sig> sgt_value);
funcdef! ((vm) <sig> sgt_value VERSION sgt_value_v1);
// blk entry
block! ((vm, sgt_value_v1) blk_entry);
ssa! ((vm, sgt_value_v1) <int64> blk_entry_op1);
ssa! ((vm, sgt_value_v1) <int64> blk_entry_op2);
ssa! ((vm, sgt_value_v1) <int1> blk_entry_cond);
inst! ((vm, sgt_value_v1) blk_entry_inst_cmp:
blk_entry_cond = CMPOP (CmpOp::SGT) blk_entry_op1 blk_entry_op2
);
inst! ((vm, sgt_value_v1) blk_entry_inst_ret:
RET (blk_entry_cond)
);
define_block! ((vm, sgt_value_v1) blk_entry(blk_entry_op1, blk_entry_op2){
blk_entry_inst_cmp, blk_entry_inst_ret
});
define_func_ver!((vm) sgt_value_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_sgt_u8_value() {