GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 751795b1 authored by qinsoon's avatar qinsoon

fixed a few problems

1. asm call do not use all argument registers (otherwise it will keep
them alive)
2. spilling a register that is used and defined in one instruction will
result in creating one new temporary, instead of two
3. spilling now deals with floating point
4. SELECT with int8 is implemented using conditional jump (cmov cannot
take reg8)
5. postcall convention now deals correctly with fp return values
6. reg alloc conservative() was wrong in a few commits ago, fixed it
7. in liveness analysis, when finding a move between a temp and a
register, find the color for the register (such as RAX for EAX)
parent 0fba59ee
Pipeline #193 failed with stage
in 24 minutes and 31 seconds
...@@ -15,6 +15,7 @@ use utils::vec_utils; ...@@ -15,6 +15,7 @@ use utils::vec_utils;
use utils::string_utils; use utils::string_utils;
use ast::ptr::P; use ast::ptr::P;
use ast::ir::*; use ast::ir::*;
use ast::types::*;
use std::collections::HashMap; use std::collections::HashMap;
use std::str; use std::str;
...@@ -778,13 +779,14 @@ impl ASMCodeGen { ...@@ -778,13 +779,14 @@ impl ASMCodeGen {
fn add_asm_call(&mut self, code: String) { fn add_asm_call(&mut self, code: String) {
// a call instruction will use all the argument registers // a call instruction will use all the argument registers
// do not need
let mut uses : HashMap<MuID, Vec<ASMLocation>> = HashMap::new(); let mut uses : HashMap<MuID, Vec<ASMLocation>> = HashMap::new();
for reg in x86_64::ARGUMENT_GPRs.iter() { // for reg in x86_64::ARGUMENT_GPRs.iter() {
uses.insert(reg.id(), vec![]); // uses.insert(reg.id(), vec![]);
} // }
for reg in x86_64::ARGUMENT_FPRs.iter() { // for reg in x86_64::ARGUMENT_FPRs.iter() {
uses.insert(reg.id(), vec![]); // uses.insert(reg.id(), vec![]);
} // }
// defines: return registers // defines: return registers
let mut defines : HashMap<MuID, Vec<ASMLocation>> = HashMap::new(); let mut defines : HashMap<MuID, Vec<ASMLocation>> = HashMap::new();
...@@ -2822,14 +2824,23 @@ pub fn spill_rewrite( ...@@ -2822,14 +2824,23 @@ pub fn spill_rewrite(
vm: &VM) -> Vec<P<Value>> vm: &VM) -> Vec<P<Value>>
{ {
trace!("spill rewrite for x86_64 asm backend"); trace!("spill rewrite for x86_64 asm backend");
trace!("code before spilling");
cf.mc().trace_mc();
let mut new_nodes = vec![]; let mut new_nodes = vec![];
// record code and their insertion point, so we can do the copy/insertion all at once // record code and their insertion point, so we can do the copy/insertion all at once
let mut spill_code_before: HashMap<usize, Vec<Box<ASMCode>>> = HashMap::new(); let mut spill_code_before: HashMap<usize, Vec<Box<ASMCode>>> = HashMap::new();
let mut spill_code_after: HashMap<usize, Vec<Box<ASMCode>>> = HashMap::new(); let mut spill_code_after: HashMap<usize, Vec<Box<ASMCode>>> = HashMap::new();
// map from old to new
let mut temp_for_cur_inst : HashMap<MuID, P<Value>> = HashMap::new();
// iterate through all instructions // iterate through all instructions
for i in 0..cf.mc().number_of_insts() { for i in 0..cf.mc().number_of_insts() {
temp_for_cur_inst.clear();
trace!("---Inst {}---", i); trace!("---Inst {}---", i);
// find use of any register that gets spilled // find use of any register that gets spilled
{ {
...@@ -2843,7 +2854,7 @@ pub fn spill_rewrite( ...@@ -2843,7 +2854,7 @@ pub fn spill_rewrite(
// generate a random new temporary // generate a random new temporary
let temp_ty = val_reg.ty.clone(); let temp_ty = val_reg.ty.clone();
let temp = func.new_ssa(vm.next_id(), temp_ty).clone_value(); let temp = func.new_ssa(vm.next_id(), temp_ty.clone()).clone_value();
vec_utils::add_unique(&mut new_nodes, temp.clone()); vec_utils::add_unique(&mut new_nodes, temp.clone());
trace!("reg {} used in Inst{} is replaced as {}", val_reg, i, temp); trace!("reg {} used in Inst{} is replaced as {}", val_reg, i, temp);
...@@ -2851,7 +2862,12 @@ pub fn spill_rewrite( ...@@ -2851,7 +2862,12 @@ pub fn spill_rewrite(
let code = { let code = {
let mut codegen = ASMCodeGen::new(); let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence(); codegen.start_code_sequence();
codegen.emit_mov_r_mem(&temp, spill_mem);
if is_fp(&temp_ty) {
codegen.emit_movsd_f64_mem64(&temp, spill_mem);
} else {
codegen.emit_mov_r_mem(&temp, spill_mem);
}
codegen.finish_code_sequence_asm() codegen.finish_code_sequence_asm()
}; };
...@@ -2865,6 +2881,8 @@ pub fn spill_rewrite( ...@@ -2865,6 +2881,8 @@ pub fn spill_rewrite(
// replace register reg with temp // replace register reg with temp
cf.mc_mut().replace_use_tmp_for_inst(reg, temp.id(), i); cf.mc_mut().replace_use_tmp_for_inst(reg, temp.id(), i);
temp_for_cur_inst.insert(reg, temp.clone());
} }
} }
} }
...@@ -2878,15 +2896,26 @@ pub fn spill_rewrite( ...@@ -2878,15 +2896,26 @@ pub fn spill_rewrite(
let spill_mem = spills.get(&reg).unwrap(); let spill_mem = spills.get(&reg).unwrap();
let temp_ty = val_reg.ty.clone(); let temp = if temp_for_cur_inst.contains_key(&reg) {
let temp = func.new_ssa(vm.next_id(), temp_ty).clone_value(); temp_for_cur_inst.get(&reg).unwrap().clone()
vec_utils::add_unique(&mut new_nodes, temp.clone()); } else {
let temp_ty = val_reg.ty.clone();
let temp = func.new_ssa(vm.next_id(), temp_ty.clone()).clone_value();
vec_utils::add_unique(&mut new_nodes, temp.clone());
temp
};
trace!("reg {} defined in Inst{} is replaced as {}", val_reg, i, temp); trace!("reg {} defined in Inst{} is replaced as {}", val_reg, i, temp);
let code = { let code = {
let mut codegen = ASMCodeGen::new(); let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence(); codegen.start_code_sequence();
codegen.emit_mov_mem_r(spill_mem, &temp);
if is_fp(&temp.ty) {
codegen.emit_movsd_mem64_f64(spill_mem, &temp);
} else {
codegen.emit_mov_mem_r(spill_mem, &temp);
}
codegen.finish_code_sequence_asm() codegen.finish_code_sequence_asm()
}; };
...@@ -2914,5 +2943,9 @@ pub fn spill_rewrite( ...@@ -2914,5 +2943,9 @@ pub fn spill_rewrite(
cf.mc = Some(new_mc); cf.mc = Some(new_mc);
trace!("spill rewrite done"); trace!("spill rewrite done");
trace!("code after spilling");
cf.mc().trace_mc();
new_nodes new_nodes
} }
\ No newline at end of file
...@@ -359,6 +359,9 @@ impl <'a> GraphColoring<'a> { ...@@ -359,6 +359,9 @@ impl <'a> GraphColoring<'a> {
// if they are not from the same register group, we cannot coalesce them // if they are not from the same register group, we cannot coalesce them
if self.ig.get_group_of(m.from) != self.ig.get_group_of(m.to) { if self.ig.get_group_of(m.from) != self.ig.get_group_of(m.to) {
info!("a move instruction of two temporaries of different reigsters group");
info!("from: {:?}, to: {:?}", m.from, m.to);
return; return;
} }
...@@ -407,7 +410,10 @@ impl <'a> GraphColoring<'a> { ...@@ -407,7 +410,10 @@ impl <'a> GraphColoring<'a> {
} }
} else if (precolored_u && self.ok(u, v)) } else if (precolored_u && self.ok(u, v))
|| (!precolored_u && self.conservative(u, v)) { || (!precolored_u && self.conservative(u, v)) {
trace!("precolored_u&&ok(u,v) || !precolored_u&&conserv(u,v), coalesce and combine the move"); trace!("ok(u, v) = {}", self.ok(u, v));
trace!("conservative(u, v) = {}", self.conservative(u, v));
trace!("precolored_u&&ok(u,v) || !precolored_u&&conserv(u,v), coalesce and combine the move");
self.coalesced_moves.insert(m); self.coalesced_moves.insert(m);
self.combine(u, v); self.combine(u, v);
if !precolored_u { if !precolored_u {
...@@ -458,8 +464,7 @@ impl <'a> GraphColoring<'a> { ...@@ -458,8 +464,7 @@ impl <'a> GraphColoring<'a> {
let mut k = 0; let mut k = 0;
for n in nodes.iter() { for n in nodes.iter() {
// if self.precolored.contains(n) || self.degree(*n) >= self.n_regs_for_node(*n) { if self.precolored.contains(n) || self.degree(*n) >= self.n_regs_for_node(*n) {
if self.degree(*n) >= self.n_regs_for_node(*n) {
k += 1; k += 1;
} }
} }
...@@ -597,11 +602,17 @@ impl <'a> GraphColoring<'a> { ...@@ -597,11 +602,17 @@ impl <'a> GraphColoring<'a> {
trace!("Assigning color to {}", self.display_node(n)); trace!("Assigning color to {}", self.display_node(n));
let mut ok_colors : LinkedHashSet<MuID> = self.colors.get(&self.ig.get_group_of(n)).unwrap().clone(); let mut ok_colors : LinkedHashSet<MuID> = self.colors.get(&self.ig.get_group_of(n)).unwrap().clone();
trace!("all the colors for this temp: {:?}", ok_colors);
for w in self.ig.outedges_of(n) { for w in self.ig.outedges_of(n) {
let w = self.get_alias(w); let w_alias = self.get_alias(w);
match self.ig.get_color_of(w) { match self.ig.get_color_of(w_alias) {
None => {}, // do nothing None => {}, // do nothing
Some(color) => {ok_colors.remove(&color);} Some(color) => {
trace!("color {} is used for its neighbor {:?} (aliasing to {:?})", color, self.display_node(w), self.display_node(w_alias));
ok_colors.remove(&color);
}
} }
} }
trace!("available colors: {:?}", ok_colors); trace!("available colors: {:?}", ok_colors);
......
...@@ -92,6 +92,27 @@ impl InterferenceGraph { ...@@ -92,6 +92,27 @@ impl InterferenceGraph {
} }
fn add_move(&mut self, src: NodeIndex, dst: NodeIndex) { fn add_move(&mut self, src: NodeIndex, dst: NodeIndex) {
let src = {
let temp_src = self.get_temp_of(src);
if temp_src < MACHINE_ID_END {
let alias = backend::get_color_for_precolored(self.get_temp_of(src));
self.get_node(alias)
} else {
src
}
};
let dst = {
let temp_dst = self.get_temp_of(dst);
if temp_dst < MACHINE_ID_END {
let alias = backend::get_color_for_precolored(self.get_temp_of(dst));
self.get_node(alias)
} else {
dst
}
};
self.moves.insert(Move{from: src, to: dst}); self.moves.insert(Move{from: src, to: dst});
} }
...@@ -126,11 +147,8 @@ impl InterferenceGraph { ...@@ -126,11 +147,8 @@ impl InterferenceGraph {
} }
pub fn is_interferenced_with(&self, node1: NodeIndex, node2: NodeIndex) -> bool { pub fn is_interferenced_with(&self, node1: NodeIndex, node2: NodeIndex) -> bool {
trace!("trying to find edge between {:?} and {:?}", node1, node2);
let edge = self.graph.find_edge(node1, node2); let edge = self.graph.find_edge(node1, node2);
trace!("edge: {:?}", edge);
edge.is_some() edge.is_some()
} }
......
...@@ -263,6 +263,64 @@ fn select_eq_zero() -> VM { ...@@ -263,6 +263,64 @@ fn select_eq_zero() -> VM {
vm vm
} }
#[test]
fn test_select_u8_eq_zero() {
let lib = testutil::compile_fnc("select_u8_eq_zero", &select_u8_eq_zero);
unsafe {
let select_eq_zero : libloading::Symbol<unsafe extern fn(u8) -> u8> = lib.get(b"select_u8_eq_zero").unwrap();
let res = select_eq_zero(0);
println!("select_u8_eq_zero(0) = {}", res);
assert!(res == 1);
let res = select_eq_zero(1);
println!("select_u8_eq_zero(1) = {}", res);
assert!(res == 0);
}
}
fn select_u8_eq_zero() -> VM {
let vm = VM::new();
typedef! ((vm) int8 = mu_int(8));
typedef! ((vm) int1 = mu_int(1));
constdef!((vm) <int8> int8_0 = Constant::Int(0));
constdef!((vm) <int8> int8_1 = Constant::Int(1));
funcsig! ((vm) sig = (int8) -> (int8));
funcdecl!((vm) <sig> select_u8_eq_zero);
funcdef! ((vm) <sig> select_u8_eq_zero VERSION select_u8_eq_zero_v1);
// blk entry
block! ((vm, select_u8_eq_zero_v1) blk_entry);
ssa! ((vm, select_u8_eq_zero_v1) <int8> blk_entry_n);
ssa! ((vm, select_u8_eq_zero_v1) <int1> blk_entry_cond);
consta!((vm, select_u8_eq_zero_v1) int8_0_local = int8_0);
consta!((vm, select_u8_eq_zero_v1) int8_1_local = int8_1);
inst! ((vm, select_u8_eq_zero_v1) blk_entry_inst_cmp:
blk_entry_cond = CMPOP (CmpOp::EQ) blk_entry_n int8_0_local
);
ssa! ((vm, select_u8_eq_zero_v1) <int8> blk_entry_ret);
inst! ((vm, select_u8_eq_zero_v1) blk_entry_inst_select:
blk_entry_ret = SELECT blk_entry_cond int8_1_local int8_0_local
);
inst! ((vm, select_u8_eq_zero_v1) blk_entry_inst_ret:
RET (blk_entry_ret)
);
define_block! ((vm, select_u8_eq_zero_v1) blk_entry(blk_entry_n){
blk_entry_inst_cmp, blk_entry_inst_select, blk_entry_inst_ret
});
define_func_ver!((vm) select_u8_eq_zero_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test] #[test]
fn test_select_sge_zero() { fn test_select_sge_zero() {
let lib = testutil::compile_fnc("select_sge_zero", &select_sge_zero); let lib = testutil::compile_fnc("select_sge_zero", &select_sge_zero);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment