GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit fdec1e4f authored by qinsoon's avatar qinsoon

Add a mov coelescing test. Some code rewriting for liveness analysis,

changed a condition on coealescing. Fixed a bug in spilling rewrite.
Currently test_extern_Func is broken
parent 606b9453
Pipeline #172 passed with stage
in 18 minutes and 15 seconds
......@@ -74,7 +74,7 @@ impl ASMCode {
false
}
fn is_block_end(&self, inst: usize) -> bool {
fn is_last_inst_in_block(&self, inst: usize) -> bool {
for block in self.blocks.values() {
if block.end_inst == inst + 1 {
return true;
......@@ -108,6 +108,7 @@ impl ASMCode {
insert_before: HashMap<usize, Vec<Box<ASMCode>>>,
insert_after: HashMap<usize, Vec<Box<ASMCode>>>) -> Box<ASMCode>
{
trace!("insert spilling code");
let mut ret = ASMCode {
name: self.name.clone(),
code: vec![],
......@@ -124,8 +125,11 @@ impl ASMCode {
let mut location_map : HashMap<usize, usize> = HashMap::new();
for i in 0..self.number_of_insts() {
trace!("Inst{}", i);
if self.is_block_start(i) {
cur_block_start = i + inst_offset;
trace!(" block start is shifted to {}", cur_block_start);
}
// insert code before this instruction
......@@ -133,6 +137,7 @@ impl ASMCode {
for insert in insert_before.get(&i).unwrap() {
ret.append_code_sequence_all(insert);
inst_offset += insert.number_of_insts();
trace!(" inserted {} insts before", insert.number_of_insts());
}
}
......@@ -141,6 +146,7 @@ impl ASMCode {
// old ith inst is now the (i + inst_offset)th instruction
location_map.insert(i, i + inst_offset);
trace!(" Inst{} is now Inst{}", i, i + inst_offset);
// this instruction has been offset by several instructions('inst_offset')
// update its info
......@@ -169,19 +175,28 @@ impl ASMCode {
for insert in insert_after.get(&i).unwrap() {
ret.append_code_sequence_all(insert);
inst_offset += insert.number_of_insts();
trace!(" inserted {} insts after", insert.number_of_insts());
}
}
if self.is_block_end(i) {
let cur_block_end = i + inst_offset;
if self.is_last_inst_in_block(i) {
let cur_block_end = i + 1 + inst_offset;
// copy the block
let (name, block) = self.get_block_by_inst(i);
let mut new_block = block.clone();
new_block.start_inst = cur_block_start;
let mut new_block = ASMBlock{
start_inst: cur_block_start,
end_inst: cur_block_end,
livein: vec![],
liveout: vec![]
};
trace!(" old block: {:?}", block);
trace!(" new block: {:?}", new_block);
cur_block_start = usize::MAX;
new_block.end_inst = cur_block_end;
// add to the new code
ret.blocks.insert(name.clone(), new_block);
......@@ -780,18 +795,11 @@ impl ASMCodeGen {
}
fn add_asm_ret(&mut self, code: String) {
let uses : HashMap<MuID, Vec<ASMLocation>> = {
let mut ret = HashMap::new();
for reg in x86_64::RETURN_GPRs.iter() {
ret.insert(reg.id(), vec![]);
}
for reg in x86_64::RETURN_FPRs.iter() {
ret.insert(reg.id(), vec![]);
}
ret
};
self.add_asm_inst(code, hashmap!{}, uses, false);
// return instruction does not use anything (not RETURN REGS)
// otherwise it will keep RETURN REGS alive
// and if there is no actual move into RETURN REGS, it will keep RETURN REGS for alive for very long
// and prevents anything using those regsiters
self.add_asm_inst(code, hashmap!{}, hashmap!{}, false);
}
fn add_asm_branch(&mut self, code: String, target: MuName) {
......
......@@ -390,6 +390,8 @@ impl <'a> GraphColoring<'a> {
self.add_worklist(u);
}
} else if precolored_v || self.ig.is_adj(u, v) {
trace!("precolored_v: {}", precolored_v);
trace!("is_adj(u, v): {}", self.ig.is_adj(u, v));
trace!("v is precolored or u,v is adjacent, the move is constrained");
self.constrained_moves.insert(m);
if !precolored_u {
......@@ -430,11 +432,11 @@ impl <'a> GraphColoring<'a> {
fn ok(&self, u: NodeIndex, v: NodeIndex) -> bool {
for t in self.adjacent(v).iter() {
let t = *t;
if !self.precolored.contains(&t)
|| self.degree(t) < self.n_regs_for_node(t)
|| self.ig.is_adj(t, u) {
if !(self.degree(t) < self.n_regs_for_node(t)
|| self.precolored.contains(&t)
|| self.ig.is_adj(t, u)) {
return false;
}
}
}
true
......@@ -453,7 +455,8 @@ impl <'a> GraphColoring<'a> {
let mut k = 0;
for n in nodes.iter() {
if self.precolored.contains(n) || self.degree(*n) >= self.n_regs_for_node(*n) {
// if self.precolored.contains(n) || self.degree(*n) >= self.n_regs_for_node(*n) {
if self.degree(*n) >= self.n_regs_for_node(*n) {
k += 1;
}
}
......
use compiler::machine_code::CompiledFunction;
use ast::ir::*;
use compiler::backend;
use utils::vec_utils;
use utils::LinkedHashSet;
use std::collections::{HashMap, HashSet};
......@@ -101,7 +100,12 @@ impl InterferenceGraph {
}
pub fn is_interferenced_with(&self, node1: NodeIndex, node2: NodeIndex) -> bool {
self.graph.find_edge(node1, node2).is_some()
trace!("trying to find edge between {:?} and {:?}", node1, node2);
let edge = self.graph.find_edge(node1, node2);
trace!("edge: {:?}", edge);
edge.is_some()
}
pub fn color_node(&mut self, node: NodeIndex, color: MuID) {
......@@ -160,6 +164,9 @@ impl InterferenceGraph {
}
pub fn print(&self, context: &FunctionContext) {
use compiler::backend::reg_alloc::graph_coloring::petgraph::dot::Dot;
use compiler::backend::reg_alloc::graph_coloring::petgraph::dot::Config;
debug!("");
debug!("Interference Graph");
......@@ -175,72 +182,74 @@ impl InterferenceGraph {
}
debug!("graph:");
debug!("{:?}", self.graph);
debug!("\n\n{:?}\n", Dot::with_config(&self.graph, &[Config::EdgeNoLabel]));
debug!("");
}
}
#[allow(unused_variables)]
fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
fn build_live_set (cf: &mut CompiledFunction) {
info!("start building live set");
let n_insts = cf.mc().number_of_insts();
let mut livein : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut liveout : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut livein : Vec<LinkedHashSet<MuID>> = vec![LinkedHashSet::new(); n_insts];
let mut liveout : Vec<LinkedHashSet<MuID>> = vec![LinkedHashSet::new(); n_insts];
let mut is_changed = true;
while is_changed {
// reset
is_changed = false;
for n in 0..n_insts {
let in_set_old = livein[n].to_vec(); // copy to new vec
let out_set_old = liveout[n].to_vec();
// in[n] <- use[n] + (out[n] - def[n])
// (1) in[n] = use[n]
let mut in_set_new = vec![];
in_set_new.extend_from_slice(&cf.mc().get_inst_reg_uses(n));
// (2) diff = out[n] - def[n]
let mut diff = liveout[n].to_vec();
for def in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, def);
let in_set_old = livein[n].clone();
let out_set_old = liveout[n].clone();
// in[n] <- use[n] + (out[n] - def[n]);
{
let ref mut inset = livein[n];
inset.clear();
// (1) in[n] = use[n]
inset.add_from_vec(cf.mc().get_inst_reg_uses(n));
// (2) + out[n]
inset.add_all(liveout[n].clone());
// (3) - def[n]
for def in cf.mc().get_inst_reg_defines(n) {
inset.remove(&def);
}
}
// (3) in[n] = in[n] + diff
vec_utils::append_unique(&mut in_set_new, &mut diff);
// update livein[n]
livein[n].clear();
livein[n].extend_from_slice(&in_set_new);
// out[n] <- union(in[s] for every successor s of n)
let mut union = vec![];
for s in cf.mc().get_succs(n) {
vec_utils::append_clone_unique(&mut union, &livein[*s]);
{
let ref mut outset = liveout[n];
outset.clear();
for s in cf.mc().get_succs(n) {
outset.add_all(livein[*s].clone());
}
}
// update liveout[n]
liveout[n].clear();
liveout[n].extend_from_slice(&union);
let n_changed = !vec_utils::is_identical_ignore_order(&livein[n], &in_set_old)
|| !vec_utils::is_identical_ignore_order(&liveout[n], &out_set_old);
// is in/out changed in this iteration?
let n_changed = !in_set_old.equals(&livein[n]) || !out_set_old.equals(&liveout[n]);
is_changed = is_changed || n_changed;
}
}
for block in cf.mc().get_all_blocks().to_vec() {
let start_inst = cf.mc().get_block_range(&block).unwrap().start;
cf.mc_mut().set_ir_block_livein(&block, livein[start_inst].to_vec());
cf.mc_mut().set_ir_block_livein(&block, livein[start_inst].clone().to_vec());
let end_inst = cf.mc().get_block_range(&block).unwrap().end;
cf.mc_mut().set_ir_block_liveout(&block, liveout[end_inst].to_vec());
cf.mc_mut().set_ir_block_liveout(&block, liveout[end_inst].clone().to_vec());
}
}
// from Tailoring Graph-coloring Register Allocation For Runtime Compilation, Figure 4
pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion) -> InterferenceGraph {
build_live_set(cf, func);
build_live_set(cf);
let mut ig = InterferenceGraph::new();
......
......@@ -24,12 +24,40 @@ impl<K: Hash + Eq> LinkedHashSet<K> {
ret
}
pub fn to_vec(mut self) -> Vec<K> {
let mut ret = vec![];
while !self.is_empty() {
ret.push(self.pop_front().unwrap());
}
ret
}
pub fn clear(&mut self) {
self.0.clear();
}
}
impl<K: Hash + Eq, S: BuildHasher> LinkedHashSet<K, S> {
pub fn len(&self) -> usize {
self.0.len()
}
pub fn pop_front(&mut self) -> Option<K> {
match self.0.pop_front() {
Some((k, _)) => Some(k),
None => None
}
}
pub fn pop_back(&mut self) -> Option<K> {
match self.0.pop_back() {
Some((k, _)) => Some(k),
None => None
}
}
pub fn insert(&mut self, k: K) -> Option<()> {
self.0.insert(k, ())
}
......@@ -56,19 +84,32 @@ impl<K: Hash + Eq, S: BuildHasher> LinkedHashSet<K, S> {
self.0.is_empty()
}
pub fn pop_front(&mut self) -> Option<K> {
match self.0.pop_front() {
Some((k, _)) => Some(k),
None => None
}
}
pub fn add_all(&mut self, mut other: Self) {
while !other.is_empty() {
let entry = other.pop_front().unwrap();
self.insert(entry);
}
}
pub fn add_from_vec(&mut self, mut vec: Vec<K>) {
while !vec.is_empty() {
self.insert(vec.pop().unwrap());
}
}
pub fn equals(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
for ele in self.iter() {
if !other.contains(ele) {
return false;
}
}
true
}
}
impl<K: Hash + Eq + Clone> Clone for LinkedHashSet<K> {
......
......@@ -260,7 +260,7 @@ macro_rules! inst {
};
// RET
(($vm: expr, $fv: ident) $name: ident: RET ($($val: ident), *)) => {
(($vm: expr, $fv: ident) $name: ident: RET ($($val: ident), +)) => {
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
value: None,
......@@ -271,4 +271,13 @@ macro_rules! inst {
})
});
};
// RET (no value)
(($vm: expr, $fv: ident) $name: ident: RET) => {
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
value: None,
ops: RwLock::new(vec![]),
v: Instruction_::Return(vec![])
});
};
}
\ No newline at end of file
......@@ -564,5 +564,85 @@ fn create_simple_spill() -> VM {
vm.define_func_version(func_ver);
vm
}
#[test]
#[cfg(target_arch = "x86_64")]
fn test_coalesce_branch_moves() {
VM::start_logging_trace();
let vm = Arc::new(coalesce_branch_moves());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("coalesce_branch_moves");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
// check
let fv_id = func_ver.id();
let cfs = vm.compiled_funcs().read().unwrap();
let cf = cfs.get(&fv_id).unwrap().read().unwrap();
let mut n_mov_insts = 0;
let mc = cf.mc();
for i in 0..mc.number_of_insts() {
if mc.is_move(i) {
n_mov_insts += 1;
}
}
assert!(n_mov_insts == 1, "The function should not yield any mov instructions other than mov %rsp->%rbp (some possible coalescing failed)");
}
}
fn coalesce_branch_moves() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
funcsig! ((vm) sig = (int64, int64, int64, int64) -> ());
funcdecl!((vm) <sig> coalesce_branch_moves);
funcdef! ((vm) <sig> coalesce_branch_moves VERSION coalesce_branch_moves_v1);
// blk entry
block! ((vm, coalesce_branch_moves_v1) blk_entry);
ssa! ((vm, coalesce_branch_moves_v1) <int64> arg0);
ssa! ((vm, coalesce_branch_moves_v1) <int64> arg1);
ssa! ((vm, coalesce_branch_moves_v1) <int64> arg2);
ssa! ((vm, coalesce_branch_moves_v1) <int64> arg3);
block! ((vm, coalesce_branch_moves_v1) blk1);
inst! ((vm, coalesce_branch_moves_v1) blk_entry_branch:
BRANCH blk1 (arg0, arg1, arg2, arg3)
);
define_block!((vm, coalesce_branch_moves_v1) blk_entry (arg0, arg1, arg2, arg3) {blk_entry_branch});
ssa! ((vm, coalesce_branch_moves_v1) <int64> blk1_arg0);
ssa! ((vm, coalesce_branch_moves_v1) <int64> blk1_arg1);
ssa! ((vm, coalesce_branch_moves_v1) <int64> blk1_arg2);
ssa! ((vm, coalesce_branch_moves_v1) <int64> blk1_arg3);
inst! ((vm, coalesce_branch_moves_v1) blk1_ret:
RET
);
define_block!((vm, coalesce_branch_moves_v1) blk1 (blk1_arg0, blk1_arg1, blk1_arg2, blk1_arg3) {
blk1_ret
});
define_func_ver!((vm) coalesce_branch_moves_v1 (entry: blk_entry){
blk_entry, blk1
});
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment