GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 364ece82 authored by qinsoon's avatar qinsoon

[wip] implementing most operations for float type

parent db99809f
......@@ -37,6 +37,10 @@ lazy_static! {
MuType::new(new_internal_id(), MuType_::double())
);
pub static ref FLOAT_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::float())
);
pub static ref VOID_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::void())
);
......@@ -49,6 +53,7 @@ lazy_static! {
UINT32_TYPE.clone(),
UINT64_TYPE.clone(),
DOUBLE_TYPE.clone(),
FLOAT_TYPE.clone(),
VOID_TYPE.clone()
];
}
......@@ -179,6 +184,20 @@ impl MuType {
_ => None
}
}
pub fn is_float(&self) -> bool {
match self.v {
MuType_::Float => true,
_ => false
}
}
pub fn is_double(&self) -> bool {
match self.v {
MuType_::Double => true,
_ => false
}
}
}
pub type StructTag = MuName;
......
......@@ -1710,6 +1710,26 @@ impl ASMCodeGen {
)
}
fn internal_fp_mov_f_f(&mut self, inst: &str, dest: Reg, src: Reg) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
fn internal_fp_mov_f_mem(&mut self, inst: &str, dest: Reg, src: Mem,
is_spill_related: bool
) {
......@@ -1837,6 +1857,52 @@ impl ASMCodeGen {
unimplemented!()
}
fn internal_gpr_to_fpr(&mut self, inst: &str, dest: Reg, src: Reg) {
let len = check_op_len(src);
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {}, {}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
fn internal_fpr_to_gpr(&mut self, inst: &str, dest: Reg, src: Reg) {
let len = check_op_len(dest);
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg (dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
fn emit_spill_store_gpr(&mut self, dest: Mem, src: Reg) {
self.internal_mov_mem_r("mov", dest, src, true)
}
......@@ -2925,36 +2991,36 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, 5 + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, 5 + 1 + reg1.len() + 1);
// mov - double
let asm = format!("movsd {},{}", reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_f("movsd", dest, src)
}
// load
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_mem("movsd", dest, src, false)
}
// store
fn emit_movsd_mem64_f64(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_mem_f("movsd", dest, src, false)
}
// mov - float
fn emit_movss_f32_f32 (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_f("movss", dest, src)
}
// load
fn emit_movss_f32_mem32(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_mem("movss", dest, src, false)
}
// store
fn emit_movss_mem32_f32(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_mem_f("movss", dest, src, false)
}
// compare - double
fn emit_comisd_f64_f64 (&mut self, op1: Reg, op2: Reg) {
self.internal_fp_binop_no_def_r_r("comisd", op1, op2);
}
......@@ -2962,14 +3028,35 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fp_binop_no_def_r_r("ucomisd", op1, op2);
}
// compare - float
fn emit_comiss_f32_f32 (&mut self, op1: Reg, op2: Reg) {
self.internal_fp_binop_no_def_r_r("comiss", op1, op2);
}
fn emit_ucomiss_f32_f32 (&mut self, op1: Reg, op2: Reg) {
self.internal_fp_binop_no_def_r_r("ucomiss", op1, op2);
}
// add - double
fn emit_addsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_binop_def_r_r("addsd", dest, src);
}
fn emit_addsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_binop_def_r_mem("addsd", dest, src);
}
// add - float
fn emit_addss_f32_f32 (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_binop_def_r_r("addss", dest, src);
}
fn emit_addss_f32_mem32(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_binop_def_r_mem("addss", dest, src);
}
// sub - double
fn emit_subsd_f64_f64 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("subsd", dest, src);
}
......@@ -2977,6 +3064,17 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fp_binop_def_r_mem("subsd", dest, src);
}
// sub - float
fn emit_subss_f32_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("subss", dest, src);
}
fn emit_subss_f32_mem32(&mut self, dest: Reg, src: Mem) {
self.internal_fp_binop_def_r_mem("subss", dest, src);
}
// div - double
fn emit_divsd_f64_f64 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("divsd", dest, src);
}
......@@ -2984,6 +3082,17 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fp_binop_def_r_mem("divsd", dest, src);
}
// div - float
fn emit_divss_f32_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("divss", dest, src);
}
fn emit_divss_f32_mem32(&mut self, dest: Reg, src: Mem) {
self.internal_fp_binop_def_r_mem("divss", dest, src);
}
// mul - double
fn emit_mulsd_f64_f64 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("mulsd", dest, src);
}
......@@ -2991,50 +3100,31 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fp_binop_def_r_mem("mulsd", dest, src);
}
fn emit_cvtsi2sd_f64_r (&mut self, dest: Reg, src: Reg) {
let len = check_op_len(src);
// mul - float
let inst = "cvtsi2sd".to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg (src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
fn emit_mulss_f32_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("mulss", dest, src);
}
fn emit_mulss_f32_mem32(&mut self, dest: Reg, src: Mem) {
self.internal_fp_binop_def_r_mem("mulss", dest, src);
}
let asm = format!("{} {},{}", inst, reg1, reg2);
// convert - double
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
fn emit_cvtsi2sd_f64_r (&mut self, dest: Reg, src: Reg) {
self.internal_gpr_to_fpr("cvtsi2sd", dest, src);
}
fn emit_cvtsd2si_r_f64 (&mut self, dest: Reg, src: Reg) {
let len = check_op_len(dest);
let inst = "cvtsd2si".to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg (dest, inst.len() + 1 + reg1.len() + 1);
self.internal_fpr_to_gpr("cvtsd2si", dest, src);
}
let asm = format!("{} {},{}", inst, reg1, reg2);
// convert - single
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
fn emit_cvtsi2ss_f32_r (&mut self, dest: Reg, src: Reg) {
self.internal_gpr_to_fpr("cvtsi2ss", dest, src);
}
fn emit_cvtss2si_r_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fpr_to_gpr("cvtss2si", dest, src);
}
// unpack low data - interleave low byte
......
......@@ -188,33 +188,56 @@ pub trait CodeGenerator {
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_movsd_mem64_f64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_movss_f32_f32 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_movss_f32_mem32(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_movss_mem32_f32(&mut self, dest: &P<Value>, src: &P<Value>); // store
// fp add
fn emit_addsd_f64_f64 (&mut self, dest: Reg, src: Reg);
fn emit_addsd_f64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_addss_f32_f32 (&mut self, dest: Reg, src: Reg);
fn emit_addss_f32_mem32(&mut self, dest: Reg, src: Mem);
// fp sub
fn emit_subsd_f64_f64 (&mut self, dest: Reg, src: Reg);
fn emit_subsd_f64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_subss_f32_f32 (&mut self, dest: Reg, src: Reg);
fn emit_subss_f32_mem32(&mut self, dest: Reg, src: Mem);
// fp div
fn emit_divsd_f64_f64 (&mut self, dest: Reg, src: Reg);
fn emit_divsd_f64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_divss_f32_f32 (&mut self, dest: Reg, src: Reg);
fn emit_divss_f32_mem32(&mut self, dest: Reg, src: Mem);
// fp mul
fn emit_mulsd_f64_f64 (&mut self, dest: Reg, src: Reg);
fn emit_mulsd_f64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_mulss_f32_f32 (&mut self, dest: Reg, src: Reg);
fn emit_mulss_f32_mem32(&mut self, dest: Reg, src: Mem);
// fp comparison
fn emit_comisd_f64_f64 (&mut self, op1: Reg, op2: Reg);
fn emit_ucomisd_f64_f64 (&mut self, op1: Reg, op2: Reg);
fn emit_comiss_f32_f32 (&mut self, op1: Reg, op2: Reg);
fn emit_ucomiss_f32_f32 (&mut self, op1: Reg, op2: Reg);
// fp conversion
fn emit_cvtsi2sd_f64_r (&mut self, dest: Reg, src: Reg);
fn emit_cvtsd2si_r_f64 (&mut self, dest: Reg, src: Reg);
fn emit_cvttsd2si_r_f64 (&mut self, dest: Reg, src: Reg);
fn emit_cvtsi2ss_f32_r (&mut self, dest: Reg, src: Reg);
fn emit_cvtss2si_r_f32 (&mut self, dest: Reg, src: Reg);
// used for unsigned int to fp conversion
fn emit_cvttsd2si_r_f64 (&mut self, dest: Reg, src: Reg);
// unpack low data - interleave low byte
fn emit_punpckldq_f64_mem128(&mut self, dest: Reg, src: Mem);
// substract packed double-fp
......
......@@ -13,6 +13,7 @@ use compiler::backend::reg_alloc::validate::exact_liveness::*;
const VERIFY_SPILLING : bool = false;
#[allow(unused_variables)]
pub fn validate_regalloc(cf: &CompiledFunction,
reg_assigned: LinkedHashMap<MuID, MuID>,
spill_scratch_regs: LinkedHashMap<MuID, MuID>)
......
......@@ -108,13 +108,23 @@ lazy_static! {
};
// impl/decl: math.rs
pub static ref FREM : RuntimeEntrypoint = RuntimeEntrypoint {
pub static ref FREM_DOUBLE : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![DOUBLE_TYPE.clone()],
arg_tys: vec![DOUBLE_TYPE.clone(), DOUBLE_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_frem")),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_frem_double")),
jit: RwLock::new(None)
};
pub static ref FREM_FLOAT : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![FLOAT_TYPE.clone()],
arg_tys: vec![FLOAT_TYPE.clone(), FLOAT_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_frem_float")),
jit: RwLock::new(None)
};
......
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_frem(a: f64, b: f64) -> f64 {
pub extern fn muentry_frem_double(a: f64, b: f64) -> f64 {
use std::ops::Rem;
a.rem(b)
}
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_frem_float(a: f32, b: f32) -> f32 {
use std::ops::Rem;
a.rem(b)
}
\ No newline at end of file
......@@ -8,6 +8,10 @@ macro_rules! typedef {
let $name = $vm.declare_type(MuEntityHeader::named($vm.next_id(), Mu(stringify!($name))), MuType_::double());
$vm.set_name($name.as_entity());
};
(($vm: expr) $name: ident = mu_float) => {
let $name = $vm.declare_type(MuEntityHeader::named($vm.next_id(), Mu(stringify!($name))), MuType_::float());
$vm.set_name($name.as_entity());
};
// ref, iref, ptr
(($vm: expr) $name: ident = mu_ref($ty: ident)) => {
......
......@@ -13,49 +13,99 @@ use mu::utils::LinkedHashMap;
use std::sync::RwLock;
#[test]
fn test_fp_add() {
let lib = testutil::compile_fnc("fp_add", &fp_add);
fn test_double_add() {
let lib = testutil::compile_fnc("double_add", &double_add);
unsafe {
let fp_add : libloading::Symbol<unsafe extern fn(f64, f64) -> f64> = lib.get(b"fp_add").unwrap();
let double_add : libloading::Symbol<unsafe extern fn(f64, f64) -> f64> = lib.get(b"double_add").unwrap();
let fp_add_1_1 = fp_add(1f64, 1f64);
println!("fp_add(1, 1) = {}", fp_add_1_1);
assert!(fp_add_1_1 == 2f64);
let double_add_1_1 = double_add(1f64, 1f64);
println!("double_add(1, 1) = {}", double_add_1_1);
assert!(double_add_1_1 == 2f64);
}
}
fn fp_add() -> VM {
fn double_add() -> VM {
let vm = VM::new();
typedef! ((vm) double = mu_double);
funcsig! ((vm) fp_add_sig = (double, double) -> (double));
funcdecl! ((vm) <fp_add_sig> fp_add);
funcdef! ((vm) <fp_add_sig> fp_add VERSION fp_add_v1);
funcsig! ((vm) double_add_sig = (double, double) -> (double));
funcdecl! ((vm) <double_add_sig> double_add);
funcdef! ((vm) <double_add_sig> double_add VERSION double_add_v1);
// %entry(<@double> %a, <@double> %b):
block! ((vm, fp_add_v1) blk_entry);
ssa! ((vm, fp_add_v1) <double> a);
ssa! ((vm, fp_add_v1) <double> b);
block! ((vm, double_add_v1) blk_entry);
ssa! ((vm, double_add_v1) <double> a);
ssa! ((vm, double_add_v1) <double> b);
// %r = FADD %a %b
ssa! ((vm, fp_add_v1) <double> r);
inst! ((vm, fp_add_v1) blk_entry_fadd:
ssa! ((vm, double_add_v1) <double> r);
inst! ((vm, double_add_v1) blk_entry_fadd:
r = BINOP (BinOp::FAdd) a b
);
// RET %r
inst! ((vm, fp_add_v1) blk_entry_ret:
inst! ((vm, double_add_v1) blk_entry_ret:
RET (r)
);
define_block! ((vm, fp_add_v1) blk_entry(a, b) {
define_block! ((vm, double_add_v1) blk_entry(a, b) {
blk_entry_fadd,
blk_entry_ret
});
define_func_ver!((vm) fp_add_v1(entry: blk_entry) {
define_func_ver!((vm) double_add_v1(entry: blk_entry) {
blk_entry
});
vm
}
#[test]
fn test_float_add() {
let lib = testutil::compile_fnc("float_add", &float_add);
unsafe {
let float_add : libloading::Symbol<unsafe extern fn(f64, f64) -> f64> = lib.get(b"float_add").unwrap();
let float_add_1_1 = float_add(1f64, 1f64);
println!("float_add(1, 1) = {}", float_add_1_1);
assert!(float_add_1_1 == 2f64);
}
}
fn float_add() -> VM {
let vm = VM::new();
typedef! ((vm) float = mu_float);
funcsig! ((vm) float_add_sig = (float, float) -> (float));
funcdecl! ((vm) <float_add_sig> float_add);
funcdef! ((vm) <float_add_sig> float_add VERSION float_add_v1);
// %entry(<@float> %a, <@float> %b):
block! ((vm, float_add_v1) blk_entry);
ssa! ((vm, float_add_v1) <float> a);
ssa! ((vm, float_add_v1) <float> b);
// %r = FADD %a %b
ssa! ((vm, float_add_v1) <float> r);
inst! ((vm, float_add_v1) blk_entry_fadd:
r = BINOP (BinOp::FAdd) a b
);
// RET %r
inst! ((vm, float_add_v1) blk_entry_ret:
RET (r)
);
define_block! ((vm, float_add_v1) blk_entry(a, b) {
blk_entry_fadd,
blk_entry_ret
});
define_func_ver!((vm) float_add_v1(entry: blk_entry) {
blk_entry
});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment