Commit 6d9f2504 authored by qinsoon's avatar qinsoon

immediate will be cast into proper size

parent 0ed3ad3e
......@@ -1004,6 +1004,16 @@ impl ASMCodeGen {
(result_str, uses)
}
fn prepare_imm(&self, op: i32, len: usize) -> i32 {
match len {
64 => op,
32 => op,
16 => op as i16 as i32,
8 => op as i8 as i32,
_ => unimplemented!()
}
}
fn asm_reg_op(&self, op: &P<Value>) -> String {
let id = op.extract_ssa_id().unwrap();
......@@ -1147,9 +1157,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} {}", inst, op1, op2);
let (reg2, id2, loc2) = self.prepare_reg(op2, inst.len() + 1 + 1 + op1.to_string().len() + 1);
let imm = self.prepare_imm(op1, len);
let (reg2, id2, loc2) = self.prepare_reg(op2, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, op1, reg2);
let asm = format!("{} ${},{}", inst, imm, reg2);
self.add_asm_inst(
asm,
......@@ -1277,9 +1288,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + src.to_string().len() + 1);
let imm = self.prepare_imm(src, len);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, reg1);
let asm = format!("{} ${},{}", inst, imm, reg1);
self.add_asm_inst(
asm,
......@@ -1371,9 +1383,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + src.to_string().len() + 1);
let imm = self.prepare_imm(src, len);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, reg1);
let asm = format!("{} ${},{}", inst, imm, reg1);
self.add_asm_inst(
asm,
......@@ -1440,9 +1453,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (mem, uses) = self.prepare_mem(dest, inst.len() + 1 + 1 + src.to_string().len() + 1);
let imm = self.prepare_imm(src, len);
let (mem, uses) = self.prepare_mem(dest, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, mem);
let asm = format!("{} ${},{}", inst, imm, mem);
self.add_asm_inst(
asm,
......
......@@ -884,9 +884,6 @@ impl <'a> InstructionSelection {
match operation {
op::ConvOp::TRUNC => {
// currently only use 64bits register
// so only keep what is needed in the register (set others to 0)
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
......@@ -898,35 +895,39 @@ impl <'a> InstructionSelection {
}
}
op::ConvOp::ZEXT => {
let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
if from_ty_size != to_ty_size {
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
// movz op -> result
let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
// movz op -> result
if from_ty_size != to_ty_size {
self.backend.emit_movz_r_r(&tmp_res, &tmp_op);
} else {
panic!("unexpected op (expect ireg): {}", op);
self.backend.emit_mov_r_r(&tmp_res, &tmp_op);
}
} else {
panic!("unexpected op (expect ireg): {}", op);
}
},
op::ConvOp::SEXT => {
let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
if from_ty_size != to_ty_size {
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
// movs op -> result
let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
// movs op -> result
if from_ty_size != to_ty_size {
self.backend.emit_movs_r_r(&tmp_res, &tmp_op);
} else {
panic!("unexpected op (expect ireg): {}", op)
self.backend.emit_mov_r_r(&tmp_res, &tmp_op);
}
} else {
panic!("unexpected op (expect ireg): {}", op)
}
}
op::ConvOp::REFCAST | op::ConvOp::PTRCAST => {
......@@ -2102,7 +2103,15 @@ impl <'a> InstructionSelection {
if op::is_int_cmp(op) {
if self.match_iimm(op1) && self.match_iimm(op2) {
let tmp_op1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
let ty : &P<MuType> = match op1.clone_value().ty.get_int_length() {
Some(64) => &UINT64_TYPE,
Some(32) => &UINT32_TYPE,
Some(16) => &UINT16_TYPE,
Some(8) => &UINT8_TYPE,
_ => unimplemented!()
};
let tmp_op1 = self.make_temporary(f_context, ty.clone(), vm);
let ref ty_op1 = op1.clone_value().ty;
let iimm_op1 = self.node_iimm_to_i32(op1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment