Commit 4451f7fa authored by qinsoon's avatar qinsoon

BITCAST for x86_64

parent 2b0f49ce
...@@ -138,6 +138,14 @@ impl MuType { ...@@ -138,6 +138,14 @@ impl MuType {
} }
} }
/// is this type an integer type?
pub fn is_int(&self) -> bool {
match self.v {
MuType_::Int(_) => true,
_ => false
}
}
/// is this type a floating point type? (float/double) /// is this type a floating point type? (float/double)
pub fn is_fp(&self) -> bool { pub fn is_fp(&self) -> bool {
match self.v { match self.v {
......
...@@ -1896,6 +1896,48 @@ impl ASMCodeGen { ...@@ -1896,6 +1896,48 @@ impl ASMCodeGen {
) )
} }
/// emits a move instruction (reg64/32 -> fpr)
fn internal_mov_bitcast_fpr_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a move instruction (fpr -> reg64/32)
fn internal_mov_bitcast_r_fpr(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a move instruction (reg -> reg) /// emits a move instruction (reg -> reg)
fn internal_mov_r_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) { fn internal_mov_r_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
let len = check_op_len(dest); let len = check_op_len(dest);
...@@ -2469,23 +2511,19 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2469,23 +2511,19 @@ impl CodeGenerator for ASMCodeGen {
} }
fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg) { fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg) {
trace!("emit: movq {} -> {}", src, dest); self.internal_mov_bitcast_fpr_r("movq", dest, src)
}
let (reg1, id1, loc1) = self.prepare_reg(src, 5); fn emit_mov_fpr_r32(&mut self, dest: Reg, src: Reg) {
let (reg2, id2, loc2) = self.prepare_fpreg(dest, 5 + reg1.len() + 1); self.internal_mov_bitcast_fpr_r("movd", dest, src)
}
let asm = format!("movq {},{}", reg1, reg2); fn emit_mov_r64_fpr(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_r_fpr("movq", dest, src)
}
self.add_asm_inst( fn emit_mov_r32_fpr(&mut self, dest: Reg, src: Reg) {
asm, self.internal_mov_bitcast_r_fpr("movd", dest, src)
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
} }
fn emit_mov_r_imm(&mut self, dest: &P<Value>, src: i32) { fn emit_mov_r_imm(&mut self, dest: &P<Value>, src: i32) {
......
...@@ -70,8 +70,11 @@ pub trait CodeGenerator { ...@@ -70,8 +70,11 @@ pub trait CodeGenerator {
// mov imm64 to r64 // mov imm64 to r64
fn emit_mov_r64_imm64(&mut self, dest: Reg, src: i64); fn emit_mov_r64_imm64(&mut self, dest: Reg, src: i64);
// mov r64 to fpr // bitcast between int and floatpoint of same length
fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg); fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg);
fn emit_mov_fpr_r32(&mut self, dest: Reg, src: Reg);
fn emit_mov_r64_fpr(&mut self, dest: Reg, src: Reg);
fn emit_mov_r32_fpr(&mut self, dest: Reg, src: Reg);
fn emit_mov_r_imm(&mut self, dest: Reg, src: i32); fn emit_mov_r_imm(&mut self, dest: Reg, src: i32);
fn emit_mov_r_mem(&mut self, dest: Reg, src: Mem); // load fn emit_mov_r_mem(&mut self, dest: Reg, src: Mem); // load
......
...@@ -1358,14 +1358,14 @@ impl<'a> InstructionSelection { ...@@ -1358,14 +1358,14 @@ impl<'a> InstructionSelection {
op op
); );
let tmp_op = self.emit_fpreg(op, f_content, f_context, vm); let tmp_op = self.emit_fpreg(op, f_content, f_context, vm);
if tmp_op.ty.is_double() && tmp_res.ty.is_float() { if from_ty.is_double() && to_ty.is_float() {
self.backend.emit_cvtsd2ss_f32_f64(&tmp_res, &tmp_op); self.backend.emit_cvtsd2ss_f32_f64(&tmp_res, &tmp_op);
} else { } else {
panic!( panic!(
"FPTRUNC from {} to {} is not supported \ "FPTRUNC from {} to {} is not supported \
(only support FPTRUNC from double to float)", (only support FPTRUNC from double to float)",
tmp_op.ty, from_ty,
tmp_res.ty to_ty
); );
} }
} }
...@@ -1377,18 +1377,66 @@ impl<'a> InstructionSelection { ...@@ -1377,18 +1377,66 @@ impl<'a> InstructionSelection {
op op
); );
let tmp_op = self.emit_fpreg(op, f_content, f_context, vm); let tmp_op = self.emit_fpreg(op, f_content, f_context, vm);
if tmp_op.ty.is_float() && tmp_res.ty.is_double() { if from_ty.is_float() && to_ty.is_double() {
self.backend.emit_cvtss2sd_f64_f32(&tmp_res, &tmp_op); self.backend.emit_cvtss2sd_f64_f32(&tmp_res, &tmp_op);
} else { } else {
panic!( panic!(
"FPEXT from {} to {} is not supported\ "FPEXT from {} to {} is not supported\
(only support FPEXT from float to double)", (only support FPEXT from float to double)",
tmp_op.ty, from_ty,
tmp_res.ty to_ty
); );
} }
} }
_ => unimplemented!() op::ConvOp::BITCAST => {
let tmp_res = self.get_result_value(node);
let tmp_op = if self.match_fpreg(op) {
self.emit_fpreg(op, f_content, f_context, vm)
} else if self.match_ireg(op) {
self.emit_ireg(op, f_content, f_context, vm)
} else {
panic!("expected op for BITCAST (expected ireg/fpreg): {}", op)
};
let ref from_ty = tmp_op.ty;
let ref to_ty = tmp_res.ty;
let from_ty_size = vm.get_backend_type_size(from_ty.id());
let to_ty_size = vm.get_backend_type_size(to_ty.id());
assert!(
from_ty_size == to_ty_size,
"BITCAST only works between int/fp of same length"
);
assert!(
from_ty_size == 8 || from_ty_size == 4,
"BITCAST only works for int32/float or int64/double"
);
if from_ty.is_fp() && to_ty.is_int() {
if from_ty_size == 8 {
self.backend.emit_mov_r64_fpr(&tmp_res, &tmp_op);
} else if from_ty_size == 4 {
self.backend.emit_mov_r32_fpr(&tmp_res, &tmp_op);
} else {
unreachable!()
}
} else if from_ty.is_int() && to_ty.is_fp() {
if from_ty_size == 8 {
self.backend.emit_mov_fpr_r64(&tmp_res, &tmp_op);
} else if from_ty_size == 4 {
self.backend.emit_mov_fpr_r32(&tmp_res, &tmp_op);
} else {
unreachable!()
}
} else {
panic!(
"expected BITCAST between int and fp,\
found {} and {}",
from_ty,
to_ty
)
}
}
} }
} }
......
...@@ -114,3 +114,57 @@ fn truncate_then_call() -> VM { ...@@ -114,3 +114,57 @@ fn truncate_then_call() -> VM {
vm vm
} }
#[test]
fn test_bitcast_f32_to_u32() {
let lib = linkutils::aot::compile_fnc("bitcast_f32_to_u32", &bitcast_f32_to_u32);
unsafe {
use std::f32;
let bitcast_f32_to_u32: libloading::Symbol<unsafe extern "C" fn(f32) -> u32> =
lib.get(b"bitcast_f32_to_u32").unwrap();
let res = bitcast_f32_to_u32(f32::MAX);
println!("bitcast_f32_to_u32(f32::MAX) = {}", res);
assert!(res == 2139095039u32);
let res = bitcast_f32_to_u32(3.1415926f32);
println!("bitcast_f32_to_u32(PI) = {}", res);
assert!(res == 1078530010u32);
}
}
fn bitcast_f32_to_u32() -> VM {
let vm = VM::new();
typedef! ((vm) float = mu_float);
typedef! ((vm) u32 = mu_int(32));
funcsig! ((vm) sig = (float) -> (u32));
funcdecl! ((vm) <sig> bitcast_f32_to_u32);
funcdef! ((vm) <sig> bitcast_f32_to_u32 VERSION bitcast_f32_to_u32_v1);
// blk entry
block! ((vm, bitcast_f32_to_u32_v1) blk_entry);
ssa! ((vm, bitcast_f32_to_u32_v1) <float> f);
ssa! ((vm, bitcast_f32_to_u32_v1) <u32> i);
inst! ((vm, bitcast_f32_to_u32_v1) blk_entry_bitcast:
i = CONVOP (ConvOp::BITCAST) <float u32> f
);
inst! ((vm, bitcast_f32_to_u32_v1) blk_entry_ret:
RET (i)
);
define_block!((vm, bitcast_f32_to_u32_v1) blk_entry(f) {
blk_entry_bitcast, blk_entry_ret
});
define_func_ver!((vm) bitcast_f32_to_u32_v1 (entry: blk_entry) {
blk_entry
});
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment