GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit d84ffada authored by qinsoon's avatar qinsoon

fix. float type should work now

parent 364ece82
......@@ -2158,6 +2158,10 @@ impl CodeGenerator for ASMCodeGen {
self.internal_binop_no_def_mem_r("cmp", op1, op2)
}
fn emit_test_r_r(&mut self, op1: &P<Value>, op2: &P<Value>) {
self.internal_binop_no_def_r_r("test", op1, op2)
}
// mov
fn emit_mov_r64_imm64 (&mut self, dest: &P<Value>, src: i64) {
......@@ -2869,7 +2873,14 @@ impl CodeGenerator for ASMCodeGen {
let asm = format!("jle {}", symbol(self.mangle_block_label(dest_name.clone())));
self.add_asm_branch2(asm, dest_name);
}
}
fn emit_js(&mut self, dest_name: MuName) {
trace!("emit: js {}", dest_name);
let asm = format!("js {}", symbol(self.mangle_block_label(dest_name.clone())));
self.add_asm_branch2(asm, dest_name);
}
#[cfg(target_os = "macos")]
fn emit_call_near_rel32(&mut self, callsite: String, func: MuName, pe: Option<MuName>) -> ValueLocation {
......@@ -2996,6 +3007,9 @@ impl CodeGenerator for ASMCodeGen {
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_f("movsd", dest, src)
}
fn emit_movapd_f64_f64 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_mov_f_f("movapd", dest, src);
}
// load
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_mem("movsd", dest, src, false)
......@@ -3010,6 +3024,9 @@ impl CodeGenerator for ASMCodeGen {
fn emit_movss_f32_f32 (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_f("movss", dest, src)
}
fn emit_movaps_f32_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fp_mov_f_f("movaps", dest, src);
}
// load
fn emit_movss_f32_mem32(&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_fp_mov_f_mem("movss", dest, src, false)
......@@ -3117,6 +3134,9 @@ impl CodeGenerator for ASMCodeGen {
fn emit_cvtsd2si_r_f64 (&mut self, dest: Reg, src: Reg) {
self.internal_fpr_to_gpr("cvtsd2si", dest, src);
}
fn emit_cvttsd2si_r_f64 (&mut self, dest: Reg, src: Reg) {
self.internal_fpr_to_gpr("cvttsd2si", dest, src);
}
// convert - single
......@@ -3126,6 +3146,9 @@ impl CodeGenerator for ASMCodeGen {
fn emit_cvtss2si_r_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fpr_to_gpr("cvtss2si", dest, src);
}
fn emit_cvttss2si_r_f32 (&mut self, dest: Reg, src: Reg) {
self.internal_fpr_to_gpr("cvttss2si", dest, src);
}
// unpack low data - interleave low byte
fn emit_punpckldq_f64_mem128(&mut self, dest: Reg, src: Mem) {
......@@ -3217,48 +3240,6 @@ impl CodeGenerator for ASMCodeGen {
true
)
}
fn emit_movapd_f64_f64 (&mut self, dest: Reg, src: Reg) {
trace!("emit movapd {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, 6 + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, 6 + 1 + reg1.len() + 1);
let asm = format!("movapd {},{}", reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2.clone()]
},
linked_hashmap!{
id1 => vec![loc1.clone()]
},
false
)
}
fn emit_cvttsd2si_r_f64 (&mut self, dest: Reg, src: Reg) {
let len = check_op_len(dest);
let inst = "cvttsd2si".to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg (dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
}
use compiler::backend::code_emission::create_emit_directory;
......
......@@ -39,6 +39,8 @@ pub trait CodeGenerator {
fn emit_cmp_imm_r(&mut self, op1: i32, op2: Reg);
fn emit_cmp_mem_r(&mut self, op1: Reg, op2: Reg);
fn emit_test_r_r (&mut self, op1: Reg, op2: Reg);
// gpr move
// mov imm64 to r64
......@@ -172,6 +174,8 @@ pub trait CodeGenerator {
fn emit_jge(&mut self, dest: MuName);
fn emit_jl(&mut self, dest: MuName);
fn emit_jle(&mut self, dest: MuName);
fn emit_js(&mut self, dest: MuName);
fn emit_call_near_rel32(&mut self, callsite: String, func: MuName, pe: Option<MuName>) -> ValueLocation;
fn emit_call_near_r64 (&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation;
......@@ -237,6 +241,7 @@ pub trait CodeGenerator {
// used for unsigned int to fp conversion
fn emit_cvttsd2si_r_f64 (&mut self, dest: Reg, src: Reg);
fn emit_cvttss2si_r_f32 (&mut self, dest: Reg, src: Reg);
// unpack low data - interleave low byte
fn emit_punpckldq_f64_mem128(&mut self, dest: Reg, src: Mem);
......@@ -248,4 +253,6 @@ pub trait CodeGenerator {
// move aligned packed double-precision fp values
fn emit_movapd_f64_mem128(&mut self, dest: Reg, src: Mem);
fn emit_movapd_f64_f64 (&mut self, dest: Reg, src: Mem);
fn emit_movaps_f32_f32 (&mut self, dest: Reg, src: Reg);
}
......@@ -81,11 +81,17 @@ lazy_static! {
]))
});
pub static ref FPTOUI_C : P<Value> = P(Value{
hdr: MuEntityHeader::named(new_internal_id(), Mu("FPTOUI_C")),
pub static ref FPTOUI_C_DOUBLE : P<Value> = P(Value{
hdr: MuEntityHeader::named(new_internal_id(), Mu("FPTOUI_C_DOUBLE")),
ty : UINT64_TYPE.clone(),
v : Value_::Constant(Constant::Int(4890909195324358656u64))
});
pub static ref FPTOUI_C_FLOAT : P<Value> = P(Value{
hdr: MuEntityHeader::named(new_internal_id(), Mu("FPTOUI_C_FLOAT")),
ty : UINT32_TYPE.clone(),
v : Value_::Constant(Constant::Int(1593835520u64))
});
}
const INLINE_FASTPATH : bool = false;
......@@ -758,109 +764,227 @@ impl <'a> InstructionSelection {
op::ConvOp::UITOFP => {
let tmp_res = self.get_result_value(node);
// FIXME:
assert!(to_ty.is_double(), "only support uitofp (double)");
assert!(self.match_ireg(op), "unexpected op (expected ireg): {}", op);
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let op_ty_size = vm.get_backend_type_info(tmp_op.ty.id()).size;
match op_ty_size {
8 => {
// movd/movq op -> res
self.backend.emit_mov_fpr_r64(&tmp_res, &tmp_op);
if to_ty.is_double() {
match op_ty_size {
8 => {
// movd/movq op -> res
self.backend.emit_mov_fpr_r64(&tmp_res, &tmp_op);
// punpckldq UITOFP_C0, tmp_res -> tmp_res
// (interleaving low bytes: xmm = xmm[0] mem[0] xmm[1] mem[1]
let mem_c0 = self.get_mem_for_const(UITOFP_C0.clone(), vm);
self.backend.emit_punpckldq_f64_mem128(&tmp_res, &mem_c0);
// punpckldq UITOFP_C0, tmp_res -> tmp_res
// (interleaving low bytes: xmm = xmm[0] mem[0] xmm[1] mem[1]
let mem_c0 = self.get_mem_for_const(UITOFP_C0.clone(), vm);
self.backend.emit_punpckldq_f64_mem128(&tmp_res, &mem_c0);
// subpd UITOFP_C1, tmp_res -> tmp_res
let mem_c1 = self.get_mem_for_const(UITOFP_C1.clone(), vm);
self.backend.emit_subpd_f64_mem128(&tmp_res, &mem_c1);
// subpd UITOFP_C1, tmp_res -> tmp_res
let mem_c1 = self.get_mem_for_const(UITOFP_C1.clone(), vm);
self.backend.emit_subpd_f64_mem128(&tmp_res, &mem_c1);
// haddpd tmp_res, tmp_res -> tmp_res
self.backend.emit_haddpd_f64_f64(&tmp_res, &tmp_res);
}
4 => {
let tmp = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
// haddpd tmp_res, tmp_res -> tmp_res
self.backend.emit_haddpd_f64_f64(&tmp_res, &tmp_res);
}
4 => {
let tmp = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
// movl op -> tmp(32)
let tmp32 = unsafe {tmp.as_type(UINT32_TYPE.clone())};
self.backend.emit_mov_r_r(&tmp32, &tmp_op);
// movl op -> tmp(32)
let tmp32 = unsafe { tmp.as_type(UINT32_TYPE.clone()) };
self.backend.emit_mov_r_r(&tmp32, &tmp_op);
// cvtsi2sd %tmp(64) -> %tmp_res
self.backend.emit_cvtsi2sd_f64_r(&tmp_res, &tmp);
// cvtsi2sd %tmp(64) -> %tmp_res
self.backend.emit_cvtsi2sd_f64_r(&tmp_res, &tmp);
}
2 | 1 => {
let tmp_op32 = unsafe { tmp_op.as_type(UINT32_TYPE.clone()) };
self.backend.emit_cvtsi2sd_f64_r(&tmp_res, &tmp_op32);
}
_ => panic!("not implemented int length {}", op_ty_size)
}
2 | 1 => {
let tmp_op32 = unsafe {tmp_op.as_type(UINT32_TYPE.clone())};
self.backend.emit_cvtsi2sd_f64_r(&tmp_res, &tmp_op32);
} else if to_ty.is_float() {
match op_ty_size {
8 => {
// movl %tmp_op -> %tmp1
let tmp1 = self.make_temporary(f_context, UINT32_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&tmp1, unsafe {&tmp_op.as_type(UINT32_TYPE.clone())});
// andl %tmp1 $1 -> %tmp1
self.backend.emit_and_r_imm(&tmp1, 1);
// testq %tmp_op %tmp_op
self.backend.emit_test_r_r(&tmp_op, &tmp_op);
let blk_if_signed = format!("{}_{}_uitofp_float_if_signed", self.current_fv_id, node.id());
let blk_if_not_signed = format!("{}_{}_uitofp_float_if_not_signed", self.current_fv_id, node.id());
let blk_done = format!("{}_{}_uitofp_float_done", self.current_fv_id, node.id());
// js %if_signed
self.backend.emit_js(blk_if_signed.clone());
self.finish_block();
// blk_if_not_signed:
self.start_block(blk_if_not_signed);
// cvtsi2ss %tmp_op -> %tmp_res
self.backend.emit_cvtsi2ss_f32_r(&tmp_res, &tmp_op);
// jmp blk_done
self.backend.emit_jmp(blk_done.clone());
self.finish_block();
// blk_if_signed:
self.start_block(blk_if_signed);
// shr %tmp_op $1 -> %tmp_op
self.backend.emit_shr_r_imm8(&tmp_op, 1);
// or %tmp_op %tmp1 -> %tmp1
self.backend.emit_or_r_r(unsafe {&tmp1.as_type(UINT64_TYPE.clone())}, &tmp_op);
// cvtsi2ss %tmp1 -> %tmp_res
self.backend.emit_cvtsi2ss_f32_r(&tmp_res, &tmp1);
// addss %tmp_res %tmp_res -> %tmp_res
self.backend.emit_addss_f32_f32(&tmp_res, &tmp_res);
self.finish_block();
self.start_block(blk_done);
}
4 => {
// movl %tmp_op -> %tmp1
let tmp1 = self.make_temporary(f_context, UINT32_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&tmp1, &tmp_op);
// cvtsi2ssq %tmp1(64) -> %tmp_res
self.backend.emit_cvtsi2ss_f32_r(&tmp_res, unsafe {&tmp1.as_type(UINT64_TYPE.clone())});
}
2 | 1 => {
let tmp_op32 = unsafe {tmp_op.as_type(UINT32_TYPE.clone())};
// cvtsi2ss %tmp_op32 -> %tmp_res
self.backend.emit_cvtsi2ss_f32_r(&tmp_res, &tmp_op32);
}
_ => panic!("not implemented int length {}", op_ty_size)
}
_ => panic!("not implemented int length {}", op_ty_size)
} else {
panic!("expect double or float")
}
}
op::ConvOp::FPTOUI => {
let tmp_res = self.get_result_value(node);
// FIXME:
assert!(from_ty.is_double(), "only support fptoui (double)");
assert!(self.match_fpreg(op), "unexpected op (expected fpreg): {}", op);
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let res_ty_size = vm.get_backend_type_info(tmp_res.ty.id()).size;
match res_ty_size {
8 => {
let tmp1 = self.make_temporary(f_context, DOUBLE_TYPE.clone(), vm);
let tmp2 = self.make_temporary(f_context, DOUBLE_TYPE.clone(), vm);
if from_ty.is_double() {
match res_ty_size {
8 => {
let tmp1 = self.make_temporary(f_context, DOUBLE_TYPE.clone(), vm);
let tmp2 = self.make_temporary(f_context, DOUBLE_TYPE.clone(), vm);
// movsd FPTOUI_C -> %tmp1
let mem_c = self.get_mem_for_const(FPTOUI_C.clone(), vm);
self.backend.emit_movsd_f64_mem64(&tmp1, &mem_c);
// movsd FPTOUI_C_DOUBLE -> %tmp1
let mem_c = self.get_mem_for_const(FPTOUI_C_DOUBLE.clone(), vm);
self.backend.emit_movsd_f64_mem64(&tmp1, &mem_c);
// movapd %tmp_op -> %tmp2
self.backend.emit_movapd_f64_f64(&tmp2, &tmp_op);
// movapd %tmp_op -> %tmp2
self.backend.emit_movapd_f64_f64(&tmp2, &tmp_op);
// subsd %tmp1, %tmp2 -> %tmp2
self.backend.emit_subsd_f64_f64(&tmp2, &tmp1);
// subsd %tmp1, %tmp2 -> %tmp2
self.backend.emit_subsd_f64_f64(&tmp2, &tmp1);
// cvttsd2si %tmp2 -> %tmp_res
self.backend.emit_cvttsd2si_r_f64(&tmp_res, &tmp2);
// cvttsd2si %tmp2 -> %tmp_res
self.backend.emit_cvttsd2si_r_f64(&tmp_res, &tmp2);
let tmp_const = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
// mov 0x8000000000000000 -> %tmp_const
self.backend.emit_mov_r64_imm64(&tmp_const, -9223372036854775808i64);
let tmp_const = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
// mov 0x8000000000000000 -> %tmp_const
self.backend.emit_mov_r64_imm64(&tmp_const, -9223372036854775808i64);
// xor %tmp_res, %tmp_const -> %tmp_const
self.backend.emit_xor_r_r(&tmp_const, &tmp_res);
// xor %tmp_res, %tmp_const -> %tmp_const
self.backend.emit_xor_r_r(&tmp_const, &tmp_res);
// cvttsd2si %tmp_op -> %tmp_res
self.backend.emit_cvttsd2si_r_f64(&tmp_res, &tmp_op);
// cvttsd2si %tmp_op -> %tmp_res
self.backend.emit_cvttsd2si_r_f64(&tmp_res, &tmp_op);
// ucomisd %tmp_op %tmp1
self.backend.emit_ucomisd_f64_f64(&tmp1, &tmp_op);
// ucomisd %tmp_op %tmp1
self.backend.emit_ucomisd_f64_f64(&tmp1, &tmp_op);
// cmovaeq %tmp_const -> %tmp_res
self.backend.emit_cmovae_r_r(&tmp_res, &tmp_const);
}
4 => {
let tmp_res64 = unsafe {tmp_res.as_type(UINT64_TYPE.clone())};
// cmovaeq %tmp_const -> %tmp_res
self.backend.emit_cmovae_r_r(&tmp_res, &tmp_const);
}
4 => {
let tmp_res64 = unsafe { tmp_res.as_type(UINT64_TYPE.clone()) };
// cvttsd2si %tmp_op -> %tmp_res(64)
self.backend.emit_cvttsd2si_r_f64(&tmp_res64, &tmp_op);
// cvttsd2si %tmp_op -> %tmp_res(64)
self.backend.emit_cvttsd2si_r_f64(&tmp_res64, &tmp_op);
}
2 | 1 => {
let tmp_res32 = unsafe { tmp_res.as_type(UINT32_TYPE.clone()) };
// cvttsd2si %tmp_op -> %tmp_res(32)
self.backend.emit_cvttsd2si_r_f64(&tmp_res32, &tmp_op);
// movz %tmp_res -> %tmp_res(32)
self.backend.emit_movz_r_r(&tmp_res32, &tmp_res);
}
_ => panic!("not implemented int length {}", res_ty_size)
}
2 | 1 => {
let tmp_res32 = unsafe {tmp_res.as_type(UINT32_TYPE.clone())};
} else if from_ty.is_float() {
match res_ty_size {
8 => {
let tmp1 = self.make_temporary(f_context, FLOAT_TYPE.clone(), vm);
let tmp2 = self.make_temporary(f_context, FLOAT_TYPE.clone(), vm);
// movss FPTOUI_C_FLOAT -> %tmp1
let mem_c = self.get_mem_for_const(FPTOUI_C_FLOAT.clone(), vm);
self.backend.emit_movss_f32_mem32(&tmp1, &mem_c);
// movaps %tmp_op -> %tmp2
self.backend.emit_movaps_f32_f32(&tmp2, &tmp_op);
// subss %tmp1, %tmp2 -> %tmp2
self.backend.emit_subss_f32_f32(&tmp2, &tmp1);
// cvttss2si %tmp2 -> %tmp_res
self.backend.emit_cvttss2si_r_f32(&tmp_res, &tmp2);
let tmp_const = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
// mov 0x8000000000000000 -> %tmp_const
self.backend.emit_mov_r64_imm64(&tmp_const, -9223372036854775808i64);
// xor %tmp_res, %tmp_const -> %tmp_const
self.backend.emit_xor_r_r(&tmp_const, &tmp_res);
// cvttsd2si %tmp_op -> %tmp_res(32)
self.backend.emit_cvttsd2si_r_f64(&tmp_res32, &tmp_op);
// cvttss2si %tmp_op -> %tmp_res
self.backend.emit_cvttss2si_r_f32(&tmp_res, &tmp_op);
// movz %tmp_res -> %tmp_res(32)
self.backend.emit_movz_r_r(&tmp_res32, &tmp_res);
// ucomiss %tmp_op %tmp1
self.backend.emit_ucomiss_f32_f32(&tmp1, &tmp_op);
// cmovaeq %tmp_const -> %tmp_res
self.backend.emit_cmovae_r_r(&tmp_res, &tmp_const);
}
4 => {
let tmp_res64 = unsafe { tmp_res.as_type(UINT64_TYPE.clone())};
// cvttss2si %tmp_op -> %tmp_res(64)
self.backend.emit_cvttss2si_r_f32(&tmp_res64, &tmp_op);
}
2 | 1 => {
let tmp_res32 = unsafe {tmp_res.as_type(UINT32_TYPE.clone())};
// cvttss2si %tmp_op -> %tmp_res(32)
self.backend.emit_cvttss2si_r_f32(&tmp_res32, &tmp_op);
// movz %tmp_res(32) -> %tmp_res
self.backend.emit_movz_r_r(&tmp_res32, &tmp_res);
}
_ => panic!("not implemented int length {}", res_ty_size)
}
_ => panic!("not implemented int length {}", res_ty_size)
} else {
panic!("expect double or float")
}
}
_ => unimplemented!()
......@@ -888,7 +1012,17 @@ impl <'a> InstructionSelection {
let resolved_loc = self.emit_node_addr_to_value(loc_op, f_content, f_context, vm);
let res_temp = self.get_result_value(node);
self.emit_move_value_to_value(&res_temp, &resolved_loc);
if self.match_ireg(node) {
self.backend.emit_mov_r_mem(&res_temp, &resolved_loc);
} else if self.match_fpreg(node) {
match res_temp.ty.v {
MuType_::Double => self.backend.emit_movsd_f64_mem64(&res_temp, &resolved_loc),
MuType_::Float => self.backend.emit_movss_f32_mem32(&res_temp, &resolved_loc),
_ => panic!("expect double or float")
}
} else {
unimplemented!()
}
}
Instruction_::Store{is_ptr, order, mem_loc, value} => {
......@@ -3769,9 +3903,14 @@ impl <'a> InstructionSelection {
}
}
// FIXME: need to make sure dest and src have the same type
// which is not true all the time, especially when involving memory operand
fn emit_move_value_to_value(&mut self, dest: &P<Value>, src: &P<Value>) {
let ref src_ty = src.ty;
debug!("source type: {}", src_ty);
debug!("dest type: {}", dest.ty);
if types::is_scalar(src_ty) && !types::is_fp(src_ty) {
// gpr mov
if dest.is_int_reg() && src.is_int_reg() {
......
......@@ -67,11 +67,11 @@ fn test_float_add() {
let lib = testutil::compile_fnc("float_add", &float_add);
unsafe {
let float_add : libloading::Symbol<unsafe extern fn(f64, f64) -> f64> = lib.get(b"float_add").unwrap();
let float_add : libloading::Symbol<unsafe extern fn(f32, f32) -> f32> = lib.get(b"float_add").unwrap();
let float_add_1_1 = float_add(1f64, 1f64);
let float_add_1_1 = float_add(1f32, 1f32);
println!("float_add(1, 1) = {}", float_add_1_1);
assert!(float_add_1_1 == 2f64);
assert!(float_add_1_1 == 2f32);
}
}
......@@ -318,6 +318,55 @@ fn ui64tofp() -> VM {
vm
}
#[test]
fn test_ui64tofp_float() {
let lib = testutil::compile_fnc("ui64tofp_float", &ui64tofp_float);
unsafe {
let ui64tofp_float : libloading::Symbol<unsafe extern fn(u64) -> f32> = lib.get(b"ui64tofp_float").unwrap();
let res = ui64tofp_float(0u64);
println!("ui64tofp_float(0) = {}", res);
assert!(res == 0f32);
let res = ui64tofp_float(1u64);
println!("ui64tofp_float(1) = {}", res);
assert!(res == 1f32);
}
}
fn ui64tofp_float() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) float = mu_float);
funcsig! ((vm) sig = (int64) -> (float));
funcdecl! ((vm) <sig> ui64tofp_float);
funcdef! ((vm) <sig> ui64tofp_float VERSION ui64tofp_float_v1);
// blk entry
block! ((vm, ui64tofp_float_v1) blk_entry);
ssa! ((vm, ui64tofp_float_v1) <int64> x);
ssa! ((vm, ui64tofp_float_v1) <float> res);
inst! ((vm, ui64tofp_float_v1) blk_entry_conv:
res = CONVOP (ConvOp::UITOFP) <int64 float> x
);
inst! ((vm, ui64tofp_float_v1) blk_entry_ret:
RET (res)
);
define_block!((vm, ui64tofp_float_v1) blk_entry(x){
blk_entry_conv, blk_entry_ret
});
define_func_ver!((vm) ui64tofp_float_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_ui32tofp() {
let lib = testutil::compile_fnc("ui32tofp", &ui32tofp);
......
......@@ -1405,7 +1405,7 @@ def test_float():
assert res.returncode == 0, res.err
assert res.out == '(0.893876, 1.000000, 0.447179)\n'
@pytest.mark.xfail(reason='int128 not implemented')
@may_spawn_proc
def test_RPySOM():
from som.vm.universe import main, Exit
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment