Commit a1a433f4 authored by qinsoon's avatar qinsoon

[wip] take TraceHint into consideration when scheduling trace

parent a5943a79
Pipeline #738 failed with stages
in 28 minutes and 41 seconds
......@@ -480,6 +480,12 @@ impl fmt::Debug for Block {
}
}
impl fmt::Display for Block {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.name())
}
}
impl Block {
pub fn new(entity: MuEntityHeader) -> Block {
Block{hdr: entity, content: None, trace_hint: TraceHint::None, control_flow: ControlFlow::default()}
......@@ -503,7 +509,12 @@ impl Block {
}
/// TraceHint is a hint for the compiler to generate better trace for this block
#[derive(Clone)]
// Note: for a sequence of blocks that are supposed to be fast/slow path, only mark the
// first block with TraceHint, and let the trace scheduler to normally layout other
// blocks. Otherwise, the scheduler will take every TraceHint into consideration,
// and may not generate the trace as expected.
// FIXME: Issue #18
#[derive(Clone, PartialEq)]
pub enum TraceHint {
/// no hint provided. Trace scheduler should use its own heuristics to decide
None,
......@@ -516,8 +527,7 @@ pub enum TraceHint {
}
/// ControlFlow stores compilation info about control flows of a block
// FIXME: Issue #18
// FIXME: Issue #18
#[derive(Debug, Clone)]
pub struct ControlFlow {
pub preds : Vec<MuID>,
......
......@@ -90,6 +90,7 @@ impl CompilerPass for RetSink {
let mut new_body = vec![];
for node in block_content.body.iter() {
trace!("{}", node);
match node.v {
TreeNode_::Instruction(Instruction {ref ops, v: Instruction_::Return(ref arg_index), ..}) => {
let branch_to_sink = func.new_inst(Instruction {
......@@ -101,6 +102,7 @@ impl CompilerPass for RetSink {
args: arg_index.iter().map(|i| DestArg::Normal(*i)).collect()
})
});
trace!(">> rewrite ret to {}", branch_to_sink);
new_body.push(branch_to_sink);
}
_ => new_body.push(node.clone())
......
......@@ -15,7 +15,7 @@
use ast::ir::*;
use vm::VM;
use compiler::CompilerPass;
use utils::LinkedHashSet;
use std::any::Any;
pub struct TraceGen {
......@@ -28,6 +28,8 @@ impl TraceGen {
}
}
const LOG_TRACE_SCHEDULE : bool = true;
impl CompilerPass for TraceGen {
fn name(&self) -> &'static str {
self.name
......@@ -37,60 +39,139 @@ impl CompilerPass for TraceGen {
self
}
#[allow(unused_variables)]
#[allow(unused_variables)] // vm is not used here
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
// we put the high probability edge into a hot trace, and others into cold paths
// and traverse cold_path later
let trace = {
let mut trace : Vec<MuID> = vec![];
let mut work_stack : Vec<MuID> = vec![];
let entry = func.content.as_ref().unwrap().entry;
work_stack.push(entry);
// main work stack
let mut work_stack : LinkedHashSet<MuID> = LinkedHashSet::new();
// slow path queue (they are scheduled after main work stack is finished)
let mut slowpath_queue : LinkedHashSet<MuID> = LinkedHashSet::new();
// return sink (always schedule this after all blocks)
let mut ret_sink : Option<MuID> = None;
let f_content = func.content.as_ref().unwrap();
let entry = f_content.entry;
work_stack.insert(entry);
while !work_stack.is_empty() {
let cur = work_stack.pop().unwrap();
let cur_block = func.content.as_ref().unwrap().get_block(cur);
trace!("check block {}", cur);
trace!("add {:?} to trace", cur);
trace.push(cur);
// get hot path
let hot_edge = {
match cur_block.control_flow.get_hottest_succ() {
Some(tag) => tag,
None => continue
}
while !work_stack.is_empty() || !slowpath_queue.is_empty() {
let cur_block : &Block = {
let ret = if let Some(b) = work_stack.pop_back() {
b
} else if let Some(b) = slowpath_queue.pop_front() {
b
} else {
unreachable!()
};
f_content.get_block(ret)
};
// push cold paths (that are not in the trace and not in the work_stack) to work_stack
let mut cold_edges = cur_block.control_flow.succs.clone();
cold_edges.retain(|x| !x.target.eq(&hot_edge) && !trace.contains(&x.target) &&!work_stack.contains(&x.target));
let mut cold_edge_tags = cold_edges.iter().map(|x| x.target).collect::<Vec<MuID>>();
trace!("push cold edges {:?} to work stack", cold_edge_tags);
work_stack.append(&mut cold_edge_tags);
trace_if!(LOG_TRACE_SCHEDULE, "---check block {}---", cur_block);
// append current block to the trace
trace_if!(LOG_TRACE_SCHEDULE, "add {} to trace", cur_block);
trace.push(cur_block.id());
// trying to find next block
let next_block : MuID = match find_next_block(cur_block, func) {
Some(id) => id,
None => continue
};
trace_if!(LOG_TRACE_SCHEDULE, "find next block as {}", f_content.get_block(next_block));
// put other succeeding blocks to different work stacks
let mut all_successors : LinkedHashSet<MuID> =
LinkedHashSet::from_vec(cur_block.control_flow.succs.iter().map(|x| x.target).collect());
// remove next block from it
all_successors.remove(&next_block);
// push other successors to different work queues
for succ_id in all_successors.iter() {
let succ = f_content.get_block(*succ_id);
match succ.trace_hint {
TraceHint::None => {
trace_if!(LOG_TRACE_SCHEDULE, "push {} to work stack", succ);
work_stack.insert(*succ_id);
}
TraceHint::SlowPath => {
trace_if!(LOG_TRACE_SCHEDULE, "push {} to slow path", succ);
slowpath_queue.insert(*succ_id);
}
TraceHint::ReturnSink => {
assert!(ret_sink.is_none(), "cannot have more than one return sink");
trace_if!(LOG_TRACE_SCHEDULE, "set {} as return sink", succ);
ret_sink = Some(*succ_id);
}
TraceHint::FastPath => {
panic!("trying to delay the insertion of a block with fastpath hint: {}. \
Either we missed to pick it as next block, or the current checking \
block has several succeeding blocks with fastpath hint which is \
not reasonable", succ);
}
}
}
// if hot edge is not in the trace, push it to the trace
if !trace.contains(&hot_edge) && !work_stack.contains(&hot_edge) {
trace!("push hot edge {:?} to work stack", hot_edge);
work_stack.push(hot_edge);
if !trace.contains(&next_block) && !work_stack.contains(&next_block) {
trace_if!(LOG_TRACE_SCHEDULE, "push hot edge {:?} to work stack", next_block);
work_stack.insert(next_block);
} else {
trace!("hot edge {:?} already in trace, ignore", hot_edge);
trace_if!(LOG_TRACE_SCHEDULE, "hot edge {:?} already in trace, ignore", next_block);
}
trace!("");
trace_if!(LOG_TRACE_SCHEDULE, "");
}
// add return sink
if let Some(ret_sink) = ret_sink {
assert!(!trace.contains(&ret_sink), "return sink should not already be scheduled");
trace.push(ret_sink);
}
trace
};
func.block_trace = Some(trace);
}
#[allow(unused_variables)]
#[allow(unused_variables)] // vm is not used here
fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
debug!("trace for {}", func);
debug!("{:?}", func.block_trace.as_ref().unwrap());
}
}
/// returns the successor of current block.
/// We first look at trace hint, if there is no trace hint to indicate next block,
/// we layout the block with the highest probability as next block (in case of a tie,
/// returns the first met successor). If current block does not have any successor,
/// returns None.
fn find_next_block(cur_block: &Block, func: &MuFunctionVersion) -> Option<MuID> {
let f_content = func.content.as_ref().unwrap();
let ref succs = cur_block.control_flow.succs;
let has_fastpath = succs.iter()
.find(|edge| f_content.get_block(edge.target).trace_hint == TraceHint::FastPath);
if has_fastpath.is_some() {
Some(has_fastpath.unwrap().target)
} else {
// we need to find next path by examining probability
if succs.len() == 0 {
None
} else {
let mut hot_blk = succs[0].target;
let mut hot_prob = succs[0].probability;
for edge in succs.iter() {
if edge.probability > hot_prob {
hot_blk = edge.target;
hot_prob = edge.probability;
}
}
Some(hot_blk)
}
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment