Commit 6c64ef82 authored by qinsoon's avatar qinsoon

rewrite liveness analysis. Do local LA first then do global LA.

No regression, but new tests still doesn't run. Marked them
as expect fail or skip. Going to work on them later.
parent cf103d2f
...@@ -265,7 +265,7 @@ impl ASMCode { ...@@ -265,7 +265,7 @@ impl ASMCode {
} }
fn control_flow_analysis(&mut self) { fn control_flow_analysis(&mut self) {
const TRACE_CFA : bool = false; const TRACE_CFA : bool = true;
// control flow analysis // control flow analysis
let n_insts = self.number_of_insts(); let n_insts = self.number_of_insts();
...@@ -276,6 +276,9 @@ impl ASMCode { ...@@ -276,6 +276,9 @@ impl ASMCode {
let block_start = { let block_start = {
let mut ret = vec![]; let mut ret = vec![];
for block in blocks.values() { for block in blocks.values() {
if TRACE_CFA {
trace!("Block starts at {}", block.start_inst);
}
ret.push(block.start_inst); ret.push(block.start_inst);
} }
ret ret
...@@ -285,19 +288,38 @@ impl ASMCode { ...@@ -285,19 +288,38 @@ impl ASMCode {
if TRACE_CFA { if TRACE_CFA {
trace!("---inst {}---", i); trace!("---inst {}---", i);
} }
// determine predecessor - if cur is not block start, its predecessor is previous insts
let is_block_start = block_start.contains(&i); // skip symbol
if !is_block_start { if asm[i].is_symbol {
if i > 0 { continue;
if TRACE_CFA { }
trace!("inst {}: not a block start", i);
trace!("inst {}: set PREDS as previous inst {}", i, i - 1); // determine predecessor
// we check if it is a fallthrough block
if i != 0 {
let last_inst = ASMCode::find_prev_inst(i, asm);
match last_inst {
Some(last_inst) => {
let last_inst_branch = asm[last_inst].branch.clone();
match last_inst_branch {
// if it is a fallthrough, we set its preds as last inst
ASMBranchTarget::None => {
if !asm[i].preds.contains(&last_inst) {
asm[i].preds.push(last_inst);
if TRACE_CFA {
trace!("inst {}: set PREDS as previous inst - fallthrough {}", i, last_inst);
}
}
}
// otherwise do nothing
_ => {}
}
} }
asm[i].preds.push(i - 1); None => {}
} }
} else {
// if cur is a branch target, we already set its predecessor
// if cur is a fall-through block, we set it in a sanity check pass
} }
// determine successor // determine successor
...@@ -324,7 +346,7 @@ impl ASMCode { ...@@ -324,7 +346,7 @@ impl ASMCode {
// branch to target // branch to target
let target_n = self.blocks.get(target).unwrap().start_inst; let target_n = self.blocks.get(target).unwrap().start_inst;
// cur insts' succ is target and next inst // cur insts' succ is target
asm[i].succs.push(target_n); asm[i].succs.push(target_n);
if TRACE_CFA { if TRACE_CFA {
...@@ -333,18 +355,25 @@ impl ASMCode { ...@@ -333,18 +355,25 @@ impl ASMCode {
trace!("inst {}: set SUCCS as branch target {}", i, target_n); trace!("inst {}: set SUCCS as branch target {}", i, target_n);
} }
if i < n_insts - 1 {
if TRACE_CFA {
trace!("inst {}: set SUCCS as next inst", i + 1);
}
asm[i].succs.push(i + 1);
}
// target's pred is cur // target's pred is cur
asm[target_n].preds.push(i); asm[target_n].preds.push(i);
if TRACE_CFA { if TRACE_CFA {
trace!("inst {}: set PREDS as {}", target_n, i); trace!("inst {}: set PREDS as {}", target_n, i);
} }
if let Some(next_inst) = ASMCode::find_next_inst(i, asm) {
// cur succ is next inst
asm[i].succs.push(next_inst);
// next inst's pred is cur
asm[next_inst].preds.push(i);
if TRACE_CFA {
trace!("inst {}: SET SUCCS as c-branch fallthrough target {}", i, next_inst);
}
} else {
panic!("conditional branch does not have a fallthrough target");
}
}, },
ASMBranchTarget::Return => { ASMBranchTarget::Return => {
if TRACE_CFA { if TRACE_CFA {
...@@ -357,21 +386,52 @@ impl ASMCode { ...@@ -357,21 +386,52 @@ impl ASMCode {
if TRACE_CFA { if TRACE_CFA {
trace!("inst {}: not a branch inst", i); trace!("inst {}: not a branch inst", i);
} }
if i < n_insts - 1 { if let Some(next_inst) = ASMCode::find_next_inst(i, asm) {
if TRACE_CFA { if TRACE_CFA {
trace!("inst {}: set SUCCS as next inst {}", i, i + 1); trace!("inst {}: set SUCCS as next inst {}", i, next_inst);
} }
asm[i].succs.push(i + 1); asm[i].succs.push(next_inst);
} }
} }
} }
} }
}
fn find_prev_inst(i: usize, asm: &Vec<ASMInst>) -> Option<usize> {
if i == 0 {
None
} else {
let mut cur = i - 1;
while cur != 0 {
if !asm[cur].is_symbol {
return Some(cur);
}
if cur == 0 {
return None;
} else {
cur -= 1;
}
}
None
}
}
// a sanity check for fallthrough blocks fn find_next_inst(i: usize, asm: &Vec<ASMInst>) -> Option<usize> {
for i in 0..n_insts { if i >= asm.len() - 1 {
if i != 0 && asm[i].preds.len() == 0 { None
asm[i].preds.push(i - 1); } else {
let mut cur = i + 1;
while cur < asm.len() {
if !asm[cur].is_symbol {
return Some(cur);
}
cur += 1;
} }
None
} }
} }
...@@ -667,6 +727,8 @@ struct ASMInst { ...@@ -667,6 +727,8 @@ struct ASMInst {
uses: LinkedHashMap<MuID, Vec<ASMLocation>>, uses: LinkedHashMap<MuID, Vec<ASMLocation>>,
is_mem_op_used: bool, is_mem_op_used: bool,
is_symbol: bool,
preds: Vec<usize>, preds: Vec<usize>,
succs: Vec<usize>, succs: Vec<usize>,
branch: ASMBranchTarget branch: ASMBranchTarget
...@@ -679,6 +741,7 @@ impl ASMInst { ...@@ -679,6 +741,7 @@ impl ASMInst {
defines: LinkedHashMap::new(), defines: LinkedHashMap::new(),
uses: LinkedHashMap::new(), uses: LinkedHashMap::new(),
is_mem_op_used: false, is_mem_op_used: false,
is_symbol: true,
preds: vec![], preds: vec![],
succs: vec![], succs: vec![],
branch: ASMBranchTarget::None branch: ASMBranchTarget::None
...@@ -697,6 +760,7 @@ impl ASMInst { ...@@ -697,6 +760,7 @@ impl ASMInst {
code: inst, code: inst,
defines: defines, defines: defines,
uses: uses, uses: uses,
is_symbol: false,
is_mem_op_used: is_mem_op_used, is_mem_op_used: is_mem_op_used,
preds: vec![], preds: vec![],
succs: vec![], succs: vec![],
...@@ -709,6 +773,7 @@ impl ASMInst { ...@@ -709,6 +773,7 @@ impl ASMInst {
code: "".to_string(), code: "".to_string(),
defines: LinkedHashMap::new(), defines: LinkedHashMap::new(),
uses: LinkedHashMap::new(), uses: LinkedHashMap::new(),
is_symbol: false,
is_mem_op_used: false, is_mem_op_used: false,
preds: vec![], preds: vec![],
succs: vec![], succs: vec![],
......
...@@ -356,7 +356,8 @@ impl <'a> InstructionSelection { ...@@ -356,7 +356,8 @@ impl <'a> InstructionSelection {
} }
// jcc // jcc
_ => { _ => {
let blk_true = format!("{}_select_true", node.id()); let blk_true = format!("{}_select_true", node.id());
let blk_false = format!("{}_select_false", node.id());
let blk_end = format!("{}_select_end", node.id()); let blk_end = format!("{}_select_end", node.id());
// jump to blk_true if true // jump to blk_true if true
...@@ -382,6 +383,14 @@ impl <'a> InstructionSelection { ...@@ -382,6 +383,14 @@ impl <'a> InstructionSelection {
_ => unimplemented!() _ => unimplemented!()
} }
// finishing current block
let cur_block = self.current_block.as_ref().unwrap().clone();
self.backend.end_block(cur_block.clone());
// blk_false:
self.current_block = Some(blk_false.clone());
self.backend.start_block(blk_false.clone());
// mov false result here // mov false result here
self.emit_move_node_to_value(&tmp_res, &false_val, f_content, f_context, vm); self.emit_move_node_to_value(&tmp_res, &false_val, f_content, f_context, vm);
...@@ -505,6 +514,9 @@ impl <'a> InstructionSelection { ...@@ -505,6 +514,9 @@ impl <'a> InstructionSelection {
} else { } else {
panic!("expecting ireg cond to be either iimm or ireg: {}", cond); panic!("expecting ireg cond to be either iimm or ireg: {}", cond);
} }
self.finish_block();
self.start_block(format!("{}_switch_not_met_case_{}", node.id(), case_op_index));
} }
// emit default // emit default
...@@ -1729,6 +1741,9 @@ impl <'a> InstructionSelection { ...@@ -1729,6 +1741,9 @@ impl <'a> InstructionSelection {
self.backend.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size); self.backend.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size);
self.backend.emit_jg(blk_alloc_large.clone()); self.backend.emit_jg(blk_alloc_large.clone());
self.finish_block();
self.start_block(format!("{}_allocsmall", node.id()));
// alloc small here // alloc small here
let tmp_res = self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_content, f_context, vm); let tmp_res = self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_content, f_context, vm);
...@@ -1827,6 +1842,10 @@ impl <'a> InstructionSelection { ...@@ -1827,6 +1842,10 @@ impl <'a> InstructionSelection {
let slowpath = format!("{}_allocslow", node.id()); let slowpath = format!("{}_allocslow", node.id());
self.backend.emit_jg(slowpath.clone()); self.backend.emit_jg(slowpath.clone());
// finish current block
self.finish_block();
self.start_block(format!("{}_updatecursor", node.id()));
// update cursor // update cursor
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset] // ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm); self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
...@@ -3449,6 +3468,16 @@ impl <'a> InstructionSelection { ...@@ -3449,6 +3468,16 @@ impl <'a> InstructionSelection {
const_mem_val const_mem_val
} }
} }
fn finish_block(&mut self) {
let cur_block = self.current_block.as_ref().unwrap().clone();
self.backend.end_block(cur_block.clone());
}
fn start_block(&mut self, block: String) {
self.current_block = Some(block.clone());
self.backend.start_block(block);
}
} }
impl CompilerPass for InstructionSelection { impl CompilerPass for InstructionSelection {
......
...@@ -175,7 +175,7 @@ pub trait MachineCode { ...@@ -175,7 +175,7 @@ pub trait MachineCode {
fn set_ir_block_liveout(&mut self, block: &str, set: Vec<MuID>); fn set_ir_block_liveout(&mut self, block: &str, set: Vec<MuID>);
fn get_all_blocks(&self) -> Vec<MuName>; fn get_all_blocks(&self) -> Vec<MuName>;
// returns [start_inst, end_inst), inclusive at both end // returns [start_inst, end_inst) // end_inst not included
fn get_block_range(&self, block: &str) -> Option<ops::Range<usize>>; fn get_block_range(&self, block: &str) -> Option<ops::Range<usize>>;
// functions for rewrite // functions for rewrite
......
...@@ -85,6 +85,7 @@ impl Default for CompilerPolicy { ...@@ -85,6 +85,7 @@ impl Default for CompilerPolicy {
} }
} }
// rewrite parts of the hprof crates to print via log (instead of print!())
use self::hprof::ProfileNode; use self::hprof::ProfileNode;
use std::rc::Rc; use std::rc::Rc;
......
...@@ -142,8 +142,8 @@ impl Inlining { ...@@ -142,8 +142,8 @@ impl Inlining {
let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap(); let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap();
let inlined_fv_guard = inlined_fv_lock.read().unwrap(); let inlined_fv_guard = inlined_fv_lock.read().unwrap();
trace!("QINSOON_DEBUG: orig_content: {:?}", inlined_fv_guard.get_orig_ir().unwrap()); trace!("orig_content: {:?}", inlined_fv_guard.get_orig_ir().unwrap());
trace!("QINSOON_DEBUG: content : {:?}", inlined_fv_guard.content.as_ref().unwrap()); trace!("content : {:?}", inlined_fv_guard.content.as_ref().unwrap());
let new_inlined_entry_id = vm.next_id(); let new_inlined_entry_id = vm.next_id();
......
...@@ -1016,7 +1016,7 @@ def test_make_boot_image_simple(): ...@@ -1016,7 +1016,7 @@ def test_make_boot_image_simple():
assert res.returncode == 0, res.err assert res.returncode == 0, res.err
assert res.out == '%s\nabc\n123\n' % exe assert res.out == '%s\nabc\n123\n' % exe
@pytest.mark.xfail(reason='unimplemented')
@may_spawn_proc @may_spawn_proc
def test_rpytarget_print_argv(): def test_rpytarget_print_argv():
from rpython.translator.interactive import Translation from rpython.translator.interactive import Translation
...@@ -1040,7 +1040,7 @@ def test_rpytarget_print_argv(): ...@@ -1040,7 +1040,7 @@ def test_rpytarget_print_argv():
assert res.out == '[%s, abc, 123]\n' % exe assert res.out == '[%s, abc, 123]\n' % exe
@pytest.mark.xfail(reason='not implemented yet') @pytest.mark.skipif("True")
@may_spawn_proc @may_spawn_proc
def test_rpytarget_sha1sum(): def test_rpytarget_sha1sum():
john1 = \ john1 = \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment