GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 8dc8cb77 authored by qinsoon's avatar qinsoon

regalloc should use usable registers count to decide whether a register

should be spilled. Add a simple mechanic to stop possible infinite loop
in regalloc.
parent 7f88d839
Pipeline #711 passed with stages
in 28 minutes and 36 seconds
......@@ -3747,7 +3747,6 @@ impl <'a> InstructionSelection {
// Note: the stack pointer should now be what it was when the function was called
self.backend.emit_ret(&LR); // return to the Link Register
//self.backend.emit_fake_ret();
self.finish_block();
}
......
......@@ -667,9 +667,7 @@ lazy_static! {
D15.clone()
];
// put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_MACHINE_REGS : Vec<P<Value>> = vec![
pub static ref ALL_USABLE_GPRS : Vec<P<Value>> = vec![
X0.clone(),
X1.clone(),
X2.clone(),
......@@ -702,7 +700,9 @@ lazy_static! {
X28.clone(),
//X29.clone(), // Frame Pointer
//X30.clone(), // Link Register
];
pub static ref ALL_USABLE_FPRS : Vec<P<Value>> = vec![
D0.clone(),
D1.clone(),
D2.clone(),
......@@ -738,6 +738,14 @@ lazy_static! {
D14.clone(),
D15.clone(),
];
// put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_MACHINE_REGS : Vec<P<Value>> = {
let mut ret = vec![];
ret.extend_from_slice(&ALL_USABLE_GPRS);
ret.extend_from_slice(&ALL_USABLE_FPRS);
ret
};
}
pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
......@@ -749,7 +757,7 @@ pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
}
}
pub fn number_of_regs_in_group(group: RegGroup) -> usize {
pub fn number_of_usable_regs_in_group(group: RegGroup) -> usize {
match group {
RegGroup::GPR => ALL_GPRS.len(),
RegGroup::FPR => ALL_FPRS.len(),
......
......@@ -436,11 +436,11 @@ lazy_static! {
map
};
/// all the usable registers for register allocators to assign
/// all the usable general purpose registers for reg allocator to assign
// order matters here (since register allocator will prioritize assigning temporaries
// to a register that appears early)
// we put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_MACHINE_REGS : Vec<P<Value>> = vec![
pub static ref ALL_USABLE_GPRS : Vec<P<Value>> = vec![
// caller saved registers
RAX.clone(),
RCX.clone(),
......@@ -457,6 +457,13 @@ lazy_static! {
R13.clone(),
R14.clone(),
R15.clone(),
];
/// all the usable floating point registers for reg allocator to assign
// order matters here (since register allocator will prioritize assigning temporaries
// to a register that appears early)
// we put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_FPRS : Vec<P<Value>> = vec![
// floating point registers
XMM0.clone(),
XMM1.clone(),
......@@ -475,6 +482,17 @@ lazy_static! {
XMM14.clone(),
XMM15.clone()
];
/// all the usable registers for register allocators to assign
// order matters here (since register allocator will prioritize assigning temporaries
// to a register that appears early)
// we put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_MACHINE_REGS : Vec<P<Value>> = {
let mut ret = vec![];
ret.extend_from_slice(&ALL_USABLE_GPRS);
ret.extend_from_slice(&ALL_USABLE_FPRS);
ret
};
}
/// creates context for each machine register in FunctionContext
......@@ -488,11 +506,11 @@ pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
}
/// gets the number of registers in a certain register group
pub fn number_of_regs_in_group(group: RegGroup) -> usize {
pub fn number_of_usable_regs_in_group(group: RegGroup) -> usize {
match group {
RegGroup::GPR => ALL_GPRS.len(),
RegGroup::GPREX => ALL_GPRS.len(),
RegGroup::FPR => ALL_FPRS.len()
RegGroup::GPR => ALL_USABLE_GPRS.len(),
RegGroup::GPREX => ALL_USABLE_GPRS.len(),
RegGroup::FPR => ALL_USABLE_FPRS.len()
}
}
......
......@@ -62,7 +62,7 @@ pub use compiler::backend::x86_64::is_aliased;
pub use compiler::backend::x86_64::get_color_for_precolored;
/// returns the number of registers in a given RegGroup
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::number_of_regs_in_group;
pub use compiler::backend::x86_64::number_of_usable_regs_in_group;
/// returns the number of all machine registers
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::number_of_all_regs;
......@@ -137,7 +137,7 @@ pub use compiler::backend::aarch64::is_aliased;
pub use compiler::backend::aarch64::get_color_for_precolored;
/// returns the number of registers in a given RegGroup
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_regs_in_group;
pub use compiler::backend::aarch64::number_of_usable_regs_in_group;
/// returns the number of all machine registers
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_all_regs;
......
......@@ -32,6 +32,7 @@ use compiler::backend::reg_alloc::graph_coloring::liveness::Move;
use compiler::backend::reg_alloc::graph_coloring::petgraph::graph::NodeIndex;
const COALESCING : bool = true;
const MAX_REWRITE_ITERATIONS_ALLOWED : usize = 10;
/// GraphColoring algorithm
/// based on Appel's book section 11.4
......@@ -42,15 +43,16 @@ pub struct GraphColoring<'a> {
pub vm: &'a VM,
pub ig: InterferenceGraph,
/// how many coloring iteration have we done?
/// In case that a bug may trigger the coloring iterate endlessly, we use this count to stop
iteration_count: usize,
/// machine registers, preassigned a color
precolored: LinkedHashSet<NodeIndex>,
/// all colors available
colors: LinkedHashMap<backend::RegGroup, LinkedHashSet<MuID>>,
/// temporaries, not precolored and not yet processed
initial: Vec<NodeIndex>,
/// whether a temp is spillable
// FIXME: not used at the moment
spillable: LinkedHashMap<MuID, bool>,
/// list of low-degree non-move-related nodes
worklist_simplify: LinkedHashSet<NodeIndex>,
......@@ -96,14 +98,20 @@ pub struct GraphColoring<'a> {
impl <'a> GraphColoring<'a> {
/// starts coloring
pub fn start (func: &'a mut MuFunctionVersion, cf: &'a mut CompiledFunction, vm: &'a VM) -> GraphColoring<'a> {
GraphColoring::start_with_spill_history(LinkedHashMap::new(), LinkedHashMap::new(), func, cf, vm)
GraphColoring::start_with_spill_history(LinkedHashMap::new(), LinkedHashMap::new(), 0, func, cf, vm)
}
/// restarts coloring with spill history
fn start_with_spill_history(spill_history: LinkedHashMap<MuID, P<Value>>,
spill_scratch_temps: LinkedHashMap<MuID, MuID>,
iteration_count: usize,
func: &'a mut MuFunctionVersion, cf: &'a mut CompiledFunction, vm: &'a VM) -> GraphColoring<'a>
{
assert!(iteration_count < MAX_REWRITE_ITERATIONS_ALLOWED,
"reach graph coloring max rewrite iterations ({}), probably something is going wrong",
MAX_REWRITE_ITERATIONS_ALLOWED);
let iteration_count = iteration_count + 1;
trace!("Initializing coloring allocator...");
cf.mc().trace_mc();
......@@ -114,6 +122,7 @@ impl <'a> GraphColoring<'a> {
cf: cf,
vm: vm,
ig: ig,
iteration_count: iteration_count,
precolored: LinkedHashSet::new(),
colors: {
let mut map = LinkedHashMap::new();
......@@ -132,7 +141,6 @@ impl <'a> GraphColoring<'a> {
constrained_moves: LinkedHashSet::new(),
alias: LinkedHashMap::new(),
worklist_spill: Vec::new(),
spillable: LinkedHashMap::new(),
spilled_nodes: Vec::new(),
spill_history: spill_history,
spill_scratch_temps: spill_scratch_temps,
......@@ -235,7 +243,7 @@ impl <'a> GraphColoring<'a> {
self.rewrite_program();
// recursively redo graph coloring
return GraphColoring::start_with_spill_history(self.spill_history.clone(), self.spill_scratch_temps.clone(), self.func, self.cf, self.vm);
return GraphColoring::start_with_spill_history(self.spill_history.clone(), self.spill_scratch_temps.clone(), self.iteration_count, self.func, self.cf, self.vm);
}
self
......@@ -282,12 +290,22 @@ impl <'a> GraphColoring<'a> {
}
fn n_regs_for_node(&self, node: NodeIndex) -> usize {
backend::number_of_regs_in_group(self.ig.get_group_of(node))
backend::number_of_usable_regs_in_group(self.ig.get_group_of(node))
}
fn is_move_related(&mut self, node: NodeIndex) -> bool {
!self.node_moves(node).is_empty()
}
fn is_spillable(&self, temp: MuID) -> bool {
// if a temporary is created as scratch temp for a spilled temporary, we
// should not spill it again (infinite loop otherwise)
if self.spill_scratch_temps.contains_key(&temp) {
false
} else {
true
}
}
fn node_moves(&mut self, node: NodeIndex) -> LinkedHashSet<Move> {
let mut moves = LinkedHashSet::new();
......@@ -632,15 +650,7 @@ impl <'a> GraphColoring<'a> {
} else if {
// m is not none
let temp = self.ig.get_temp_of(m.unwrap());
let spillable = {match self.spillable.get(&temp) {
None => {
//by default, its spillable
true
},
Some(b) => *b
}};
!spillable
!self.is_spillable(temp)
} {
m = Some(n);
} else if (self.ig.get_spill_cost(n) / (self.degree(n) as f32))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment