WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.2% of users enabled 2FA.

Commit dcefcea6 authored by qinsoon's avatar qinsoon
Browse files

[wip] inlining quicksort not working

parent 3741fdca
...@@ -96,16 +96,16 @@ if [ "$OS" == "linux" ]; then ...@@ -96,16 +96,16 @@ if [ "$OS" == "linux" ]; then
if [ $# -eq 0 ]; then if [ $# -eq 0 ]; then
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v
else else
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v -k $@ RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest -v $@
fi fi
elif [ "$OS" == "Darwin" ]; then elif [ "$OS" == "Darwin" ]; then
if [ $# -eq 0 ]; then if [ $# -eq 0 ]; then
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v
else else
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v -k $@ RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest -v $@
fi fi
else else
echo "unknown OS. do not use this script to run" echo "unknown OS. do not use this script to run"
exit exit
fi fi
\ No newline at end of file
...@@ -104,7 +104,10 @@ pub struct MuFunctionVersion { ...@@ -104,7 +104,10 @@ pub struct MuFunctionVersion {
pub func_id: MuID, pub func_id: MuID,
pub sig: P<MuFuncSig>, pub sig: P<MuFuncSig>,
pub orig_content: Option<FunctionContent>,
pub content: Option<FunctionContent>, pub content: Option<FunctionContent>,
pub context: FunctionContext, pub context: FunctionContext,
pub force_inline: bool, pub force_inline: bool,
...@@ -142,6 +145,7 @@ impl MuFunctionVersion { ...@@ -142,6 +145,7 @@ impl MuFunctionVersion {
hdr: MuEntityHeader::unnamed(id), hdr: MuEntityHeader::unnamed(id),
func_id: func, func_id: func,
sig: sig, sig: sig,
orig_content: None,
content: None, content: None,
context: FunctionContext::new(), context: FunctionContext::new(),
block_trace: None, block_trace: None,
...@@ -150,6 +154,7 @@ impl MuFunctionVersion { ...@@ -150,6 +154,7 @@ impl MuFunctionVersion {
} }
pub fn define(&mut self, content: FunctionContent) { pub fn define(&mut self, content: FunctionContent) {
self.orig_content = Some(content.clone());
self.content = Some(content); self.content = Some(content);
} }
......
...@@ -469,4 +469,52 @@ pub fn is_valid_x86_imm(op: &P<Value>) -> bool { ...@@ -469,4 +469,52 @@ pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
}, },
_ => false _ => false
} }
}
use ast::inst::*;
pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
use ast::inst::Instruction_::*;
match inst.v {
// simple
BinOp(_, _, _) => 1,
CmpOp(_, _, _) => 1,
ConvOp{..} => 0,
// control flow
Branch1(_) => 1,
Branch2{..} => 1,
Select{..} => 2,
Watchpoint{..} => 1,
WPBranch{..} => 2,
Switch{..} => 3,
// call
ExprCall{..} | ExprCCall{..} | Call{..} | CCall{..} => 5,
Return(_) => 1,
TailCall(_) => 1,
// memory access
Load{..} | Store{..} => 1,
CmpXchg{..} => 1,
AtomicRMW{..} => 1,
AllocA(_) => 1,
AllocAHybrid(_, _) => 1,
Fence(_) => 1,
// memory addressing
GetIRef(_) | GetFieldIRef{..} | GetElementIRef{..} | ShiftIRef{..} | GetVarPartIRef{..} => 0,
// runtime
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
ThreadExit => 10,
Throw(_) => 10,
SwapStack{..} => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
// others
Move(_) => 0,
ExnInstruction{ref inner, ..} => estimate_insts_for_ir(&inner)
}
} }
\ No newline at end of file
...@@ -23,6 +23,8 @@ pub type Mem<'a> = &'a P<Value>; ...@@ -23,6 +23,8 @@ pub type Mem<'a> = &'a P<Value>;
#[path = "arch/x86_64/mod.rs"] #[path = "arch/x86_64/mod.rs"]
pub mod x86_64; pub mod x86_64;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::init_machine_regs_for_func; pub use compiler::backend::x86_64::init_machine_regs_for_func;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
......
...@@ -25,6 +25,8 @@ impl Inlining { ...@@ -25,6 +25,8 @@ impl Inlining {
fn check(&mut self, vm: &VM, func: &mut MuFunctionVersion) -> bool { fn check(&mut self, vm: &VM, func: &mut MuFunctionVersion) -> bool {
debug!("check inline"); debug!("check inline");
self.should_inline.clear();
let mut inline_something = false; let mut inline_something = false;
for func_id in func.get_static_call_edges().values() { for func_id in func.get_static_call_edges().values() {
...@@ -78,20 +80,20 @@ impl Inlining { ...@@ -78,20 +80,20 @@ impl Inlining {
// some heuristics here to decide if we should inline the function // some heuristics here to decide if we should inline the function
// to be more precise. we should be target specific // to be more precise. we should be target specific
let n_params = fv.sig.arg_tys.len(); let n_params = fv.sig.arg_tys.len();
let n_insts = fv.content.as_ref().unwrap().blocks.values().fold(0usize, |mut sum, ref block| {sum += block.number_of_irs(); sum}); let n_insts = estimate_insts(&fv);
let out_calls = fv.get_static_call_edges(); let out_calls = fv.get_static_call_edges();
let has_throw = fv.has_throw(); let has_throw = fv.has_throw();
// now we use a simple heuristic here: // now we use a simple heuristic here:
// insts fewer than 10, no static out calls, no throw // insts fewer than 10, no static out calls, no throw
let should_inline = n_insts <= 10 && out_calls.len() == 0 && !has_throw; let should_inline = n_insts <= 25 && out_calls.len() == 0 && !has_throw;
trace!("func has {} insts", n_insts); trace!("func {} has {} insts (estimated)", callee, n_insts);
trace!(" has {} out calls", out_calls.len()); trace!(" has {} out calls", out_calls.len());
trace!(" has throws? {}", has_throw); trace!(" has throws? {}", has_throw);
trace!("SO func should be inlined? {}", should_inline); trace!("SO func should be inlined? {}", should_inline);
self.should_inline.insert(fv_id, should_inline); self.should_inline.insert(callee, should_inline);
should_inline should_inline
} }
...@@ -116,109 +118,120 @@ impl Inlining { ...@@ -116,109 +118,120 @@ impl Inlining {
trace!("check inst: {}", inst); trace!("check inst: {}", inst);
let inst_id = inst.id(); let inst_id = inst.id();
if call_edges.contains_key(&inst_id) { if call_edges.contains_key(&inst_id) {
trace!("inserting inlined function at {}", inst); let call_target = call_edges.get(&inst_id).unwrap();
if self.should_inline.contains_key(call_target) && *self.should_inline.get(call_target).unwrap() {
// from TreeNode into Inst (we do not need old TreeNode)
let inst = inst.into_inst().unwrap(); trace!("inserting inlined function at {}", inst);
// (inline expansion) // from TreeNode into Inst (we do not need old TreeNode)
let inst = inst.into_inst().unwrap();
let inlined_func = *call_edges.get(&inst.id()).unwrap();
trace!("function being inlined is {}", inlined_func); // (inline expansion)
let inlined_fvid = match vm.get_cur_version_of(inlined_func) { let inlined_func = *call_edges.get(&inst.id()).unwrap();
Some(fvid) => fvid, trace!("function being inlined is {}", inlined_func);
None => panic!("cannot resolve current version of Func {}, which is supposed to be inlined", inlined_func)
}; let inlined_fvid = match vm.get_cur_version_of(inlined_func) {
Some(fvid) => fvid,
let inlined_fvs_guard = vm.func_vers().read().unwrap(); None => panic!("cannot resolve current version of Func {}, which is supposed to be inlined", inlined_func)
let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap(); };
let inlined_fv_guard = inlined_fv_lock.read().unwrap();
let inlined_fvs_guard = vm.func_vers().read().unwrap();
let inlined_entry = inlined_fv_guard.content.as_ref().unwrap().entry; let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap();
let inlined_fv_guard = inlined_fv_lock.read().unwrap();
// change current call insts to a branch
trace!("turning CALL instruction into a branch"); let new_inlined_entry_id = vm.next_id();
let ops = inst.ops.read().unwrap();
// change current call insts to a branch
match inst.v { trace!("turning CALL instruction into a branch");
Instruction_::ExprCall {ref data, ..} => { let ops = inst.ops.read().unwrap();
let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect(); match inst.v {
Instruction_::ExprCall {ref data, ..} => {
let branch = TreeNode::new_boxed_inst(Instruction{ let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
hdr: inst.hdr.clone(), let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect();
value: None,
ops: RwLock::new(arg_nodes.clone()), let branch = TreeNode::new_boxed_inst(Instruction{
v: Instruction_::Branch1(Destination{ hdr: inst.hdr.clone(),
target: inlined_entry, value: None,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect() ops: RwLock::new(arg_nodes.clone()),
}) v: Instruction_::Branch1(Destination{
}); // this block doesnt exist yet, we will fix it later
target: new_inlined_entry_id,
trace!("branch inst: {}", branch); args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
// add branch to current block });
cur_block.content.as_mut().unwrap().body.push(branch);
trace!("branch inst: {}", branch);
// finish current block
new_blocks.push(cur_block.clone()); // add branch to current block
let old_name = cur_block.name().unwrap(); cur_block.content.as_mut().unwrap().body.push(branch);
// start a new block // finish current block
cur_block = Block::new(vm.next_id()); new_blocks.push(cur_block.clone());
cur_block.content = Some(BlockContent{ let old_name = cur_block.name().unwrap();
args: {
if inst.value.is_none() { // start a new block
vec![] cur_block = Block::new(vm.next_id());
} else { cur_block.content = Some(BlockContent{
inst.value.unwrap() args: {
} if inst.value.is_none() {
}, vec![]
exn_arg: None, } else {
body: vec![], inst.value.unwrap()
keepalives: None }
}); },
let new_name = format!("{}_cont_after_inline_{}", old_name, inst_id); exn_arg: None,
trace!("create continue block for EXPRCALL/CCALL: {}", &new_name); body: vec![],
vm.set_name(cur_block.as_entity(), new_name); keepalives: None
});
// deal with the inlined function let new_name = format!("{}_cont_after_inline_{}", old_name, inst_id);
copy_inline_blocks(&mut new_blocks, cur_block.id(), inlined_fv_guard.content.as_ref().unwrap()); trace!("create continue block for EXPRCALL/CCALL: {}", &new_name);
copy_inline_context(f_context, &inlined_fv_guard.context); vm.set_name(cur_block.as_entity(), new_name);
},
// deal with the inlined function
Instruction_::Call {ref data, ref resume} => { copy_inline_blocks(&mut new_blocks, cur_block.id(),
let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect(); inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect(); vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
let branch = Instruction{ },
hdr: inst.hdr.clone(),
value: None, Instruction_::Call {ref data, ref resume} => {
ops: RwLock::new(arg_nodes), let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
v: Instruction_::Branch1(Destination{ let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect();
target: inlined_entry,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect() let branch = Instruction{
}) hdr: inst.hdr.clone(),
}; value: None,
ops: RwLock::new(arg_nodes),
// add branch to current block v: Instruction_::Branch1(Destination{
cur_block.content.as_mut().unwrap().body.push(TreeNode::new_boxed_inst(branch)); target: new_inlined_entry_id,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
// if normal_dest expects different number of arguments })
// other than the inlined function returns, we need an intermediate block to pass extra arguments };
if resume.normal_dest.args.len() != inlined_fv_guard.sig.ret_tys.len() {
unimplemented!() // add branch to current block
} cur_block.content.as_mut().unwrap().body.push(TreeNode::new_boxed_inst(branch));
// deal with inlined function // if normal_dest expects different number of arguments
let next_block = resume.normal_dest.target; // other than the inlined function returns, we need an intermediate block to pass extra arguments
if resume.normal_dest.args.len() != inlined_fv_guard.sig.ret_tys.len() {
copy_inline_blocks(&mut new_blocks, next_block, inlined_fv_guard.content.as_ref().unwrap()); unimplemented!()
copy_inline_context(f_context, &inlined_fv_guard.context); }
},
// deal with inlined function
_ => panic!("unexpected callsite: {}", inst) let next_block = resume.normal_dest.target;
copy_inline_blocks(&mut new_blocks, next_block,
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
_ => panic!("unexpected callsite: {}", inst)
}
} else {
cur_block.content.as_mut().unwrap().body.push(inst.clone());
} }
} else { } else {
cur_block.content.as_mut().unwrap().body.push(inst.clone()); cur_block.content.as_mut().unwrap().body.push(inst.clone());
...@@ -235,10 +248,41 @@ impl Inlining { ...@@ -235,10 +248,41 @@ impl Inlining {
} }
} }
fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &FunctionContent) { fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &FunctionContent, entry_block: MuID, vm: &VM) {
trace!("trying to copy inlined function blocks to caller"); trace!("trying to copy inlined function blocks to caller");
// old id -> new id
let mut block_map : HashMap<MuID, MuID> = HashMap::new();
for block in callee.blocks.values() {
if block.id() == callee.entry {
block_map.insert(block.id(), entry_block);
} else {
block_map.insert(block.id(), vm.next_id());
}
}
let fix_dest = |dest : Destination| {
Destination {
target: *block_map.get(&dest.target).unwrap(),
args: dest.args
}
};
let fix_resume = |resume : ResumptionData| {
ResumptionData {
normal_dest: fix_dest(resume.normal_dest),
exn_dest: fix_dest(resume.exn_dest)
}
};
for block in callee.blocks.values() { for block in callee.blocks.values() {
let mut block = block.clone(); let new_id = *block_map.get(&block.id()).unwrap();
let mut block = Block {
hdr: MuEntityHeader::named(new_id, format!("IB{}_for_{}", new_id, block.id())),
content: block.content.clone(),
control_flow: ControlFlow::default()
};
// check its last instruction // check its last instruction
{ {
...@@ -268,6 +312,79 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio ...@@ -268,6 +312,79 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
block_content.body.push(TreeNode::new_boxed_inst(branch)); block_content.body.push(TreeNode::new_boxed_inst(branch));
}, },
// fix destination
Instruction_::Branch1(dest) => {
let branch = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Branch1(fix_dest(dest))
};
block_content.body.push(TreeNode::new_boxed_inst(branch));
}
Instruction_::Branch2{cond, true_dest, false_dest, true_prob} => {
let branch2 = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Branch2 {
cond: cond,
true_dest: fix_dest(true_dest),
false_dest: fix_dest(false_dest),
true_prob: true_prob
}
};
block_content.body.push(TreeNode::new_boxed_inst(branch2));
}
Instruction_::Call{data, resume} => {
let call = Instruction{
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Call {
data: data,
resume: fix_resume(resume)
}
};
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::CCall{data, resume} => {
let call = Instruction{
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::CCall {
data: data,
resume: fix_resume(resume)
}
};
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::Switch {cond, default, mut branches} => {
let switch = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Switch {
cond: cond,
default: fix_dest(default),
branches: branches.drain(..).map(|(op, dest)| (op, fix_dest(dest))).collect()
}
};
block_content.body.push(TreeNode::new_boxed_inst(switch));
}
Instruction_::Watchpoint{..}
| Instruction_::WPBranch{..}
| Instruction_::SwapStack{..}
| Instruction_::ExnInstruction{..} => unimplemented!(),
_ => {block_content.body.push(last_inst_clone);} _ => {block_content.body.push(last_inst_clone);}
} }
}, },
...@@ -289,6 +406,27 @@ fn copy_inline_context(caller: &mut FunctionContext, callee: &FunctionContext) { ...@@ -289,6 +406,27 @@ fn copy_inline_context(caller: &mut FunctionContext, callee: &FunctionContext) {
} }
} }
fn estimate_insts(fv: &MuFunctionVersion) -> usize {
let f_content = fv.content.as_ref().unwrap();
let mut insts = 0;
for block in f_content.blocks.values() {
let ref body = block.content.as_ref().unwrap().body;
for inst in body.iter() {
use compiler::backend;
match inst.v {
TreeNode_::Value(_) => unreachable!(),
TreeNode_::Instruction(ref inst) => {insts += backend::estimate_insts_for_ir(inst);}
}
}
}
insts
}
impl CompilerPass for Inlining { impl CompilerPass for Inlining {
fn name(&self) -> &'static str { fn name(&self) -> &'static str {
self.name self.name
......
...@@ -68,7 +68,7 @@ pub fn compile_fnc<'a>(fnc_name: &'static str, build_fnc: &'a Fn() -> VM) -> ll: ...@@ -68,7 +68,7 @@ pub fn compile_fnc<'a>(fnc_name: &'static str, build_fnc: &'a Fn() -> VM) -> ll:
} }
pub fn compile_fncs<'a>(entry: &'static str, fnc_names: Vec<&'static str>, build_fnc: &'a Fn() -> VM) -> ll::Library { pub fn compile_fncs<'a>(entry: &'static str, fnc_names: Vec<&'static str>, build_fnc: &'a Fn() -> VM) -> ll::Library {
VM::start_logging_trace; VM::start_logging_trace();
let vm = Arc::new(build_fnc()); let vm = Arc::new(build_fnc());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone()); let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
......
...@@ -1147,6 +1147,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -1147,6 +1147,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
hdr: hdr, hdr: hdr,
func_id: func_id, func_id: func_id,
sig: impl_sig, sig: impl_sig,
orig_content: Some(ctn.clone()),
content: Some(ctn), content: Some(ctn),
context: fcb.ctx, context: fcb.ctx,