GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit 443d866a authored by qinsoon's avatar qinsoon

fp add with two constants

currently mov constant as imm to a GPR, then mov from GPR
to xmm. A better approach should be put the immediate in memroy,
and load it
parent c075dec1
......@@ -585,6 +585,8 @@ impl Value {
false
}
},
Value_::Constant(Constant::Double(_)) => true,
Value_::Constant(Constant::Float(_)) => true,
_ => false
}
}
......
......@@ -1696,6 +1696,26 @@ impl CodeGenerator for ASMCodeGen {
self.internal_mov_r64_imm64("mov", dest, src)
}
fn emit_mov_fpr_r64 (&mut self, dest: Reg, src: Reg) {
trace!("emit: movq {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 5);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, 5 + reg1.len() + 1);
let asm = format!("movq {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2]
},
hashmap!{
id1 => vec![loc1]
},
false
)
}
fn emit_mov_r_imm (&mut self, dest: &P<Value>, src: i32) {
self.internal_mov_r_imm("mov", dest, src)
}
......
......@@ -32,7 +32,11 @@ pub trait CodeGenerator {
fn emit_cmp_mem_r(&mut self, op1: Reg, op2: Reg);
// gpr move
// mov imm64 to r64
fn emit_mov_r64_imm64 (&mut self, dest: Reg, src: i64);
// mov r64 to fpr
fn emit_mov_fpr_r64 (&mut self, dest: Reg, src: Reg);
fn emit_mov_r_imm (&mut self, dest: Reg, src: i32);
fn emit_mov_r_mem (&mut self, dest: Reg, src: Mem); // load
......
......@@ -2328,6 +2328,25 @@ impl <'a> InstructionSelection {
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::SSAVar(_) => pv.clone(),
Value_::Constant(Constant::Double(val)) => {
use std::mem;
// val into u64
let val_u64 : u64 = unsafe {mem::transmute(val)};
// mov val_u64 -> tmp_int
let tmp_int = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r64_imm64(&tmp_int, val_u64 as i64);
// movq tmp_int -> tmp_fp
let tmp_fp = self.make_temporary(f_context, DOUBLE_TYPE.clone(), vm);
self.backend.emit_mov_fpr_r64(&tmp_fp, &tmp_int);
tmp_fp
}
Value_::Constant(Constant::Float(val)) => {
unimplemented!()
},
_ => panic!("expected fpreg")
}
}
......
......@@ -357,6 +357,11 @@ impl <'a> GraphColoring<'a> {
trace!("Coalescing on {}", self.display_move(m));
// if they are not from the same register group, we cannot coalesce them
if self.ig.get_group_of(m.from) != self.ig.get_group_of(m.to) {
return;
}
let x = self.get_alias(m.from);
let y = self.get_alias(m.to);
trace!("resolve alias: from {} to {}", self.display_node(x), self.display_node(y));
......@@ -443,8 +448,6 @@ impl <'a> GraphColoring<'a> {
}
fn conservative(&self, u: NodeIndex, v: NodeIndex) -> bool {
debug_assert!(self.ig.get_group_of(u) == self.ig.get_group_of(v));
let adj_u = self.adjacent(u);
let adj_v = self.adjacent(v);
let nodes = {
......@@ -461,7 +464,7 @@ impl <'a> GraphColoring<'a> {
}
}
k < self.n_regs_for_node(u)
k < self.n_regs_for_node(u) && k < self.n_regs_for_node(v)
}
fn combine(&mut self, u: NodeIndex, v: NodeIndex) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment