WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.2% of users enabled 2FA.

Commit dcefcea6 authored by qinsoon's avatar qinsoon
Browse files

[wip] inlining quicksort not working

parent 3741fdca
......@@ -96,16 +96,16 @@ if [ "$OS" == "linux" ]; then
if [ $# -eq 0 ]; then
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v
else
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v -k $@
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest -v $@
fi
elif [ "$OS" == "Darwin" ]; then
if [ $# -eq 0 ]; then
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v
else
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v -k $@
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest -v $@
fi
else
echo "unknown OS. do not use this script to run"
exit
fi
\ No newline at end of file
fi
......@@ -104,7 +104,10 @@ pub struct MuFunctionVersion {
pub func_id: MuID,
pub sig: P<MuFuncSig>,
pub orig_content: Option<FunctionContent>,
pub content: Option<FunctionContent>,
pub context: FunctionContext,
pub force_inline: bool,
......@@ -142,6 +145,7 @@ impl MuFunctionVersion {
hdr: MuEntityHeader::unnamed(id),
func_id: func,
sig: sig,
orig_content: None,
content: None,
context: FunctionContext::new(),
block_trace: None,
......@@ -150,6 +154,7 @@ impl MuFunctionVersion {
}
pub fn define(&mut self, content: FunctionContent) {
self.orig_content = Some(content.clone());
self.content = Some(content);
}
......
......@@ -469,4 +469,52 @@ pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
},
_ => false
}
}
use ast::inst::*;
pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
use ast::inst::Instruction_::*;
match inst.v {
// simple
BinOp(_, _, _) => 1,
CmpOp(_, _, _) => 1,
ConvOp{..} => 0,
// control flow
Branch1(_) => 1,
Branch2{..} => 1,
Select{..} => 2,
Watchpoint{..} => 1,
WPBranch{..} => 2,
Switch{..} => 3,
// call
ExprCall{..} | ExprCCall{..} | Call{..} | CCall{..} => 5,
Return(_) => 1,
TailCall(_) => 1,
// memory access
Load{..} | Store{..} => 1,
CmpXchg{..} => 1,
AtomicRMW{..} => 1,
AllocA(_) => 1,
AllocAHybrid(_, _) => 1,
Fence(_) => 1,
// memory addressing
GetIRef(_) | GetFieldIRef{..} | GetElementIRef{..} | ShiftIRef{..} | GetVarPartIRef{..} => 0,
// runtime
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
ThreadExit => 10,
Throw(_) => 10,
SwapStack{..} => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
// others
Move(_) => 0,
ExnInstruction{ref inner, ..} => estimate_insts_for_ir(&inner)
}
}
\ No newline at end of file
......@@ -23,6 +23,8 @@ pub type Mem<'a> = &'a P<Value>;
#[path = "arch/x86_64/mod.rs"]
pub mod x86_64;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::init_machine_regs_for_func;
#[cfg(target_arch = "x86_64")]
......
......@@ -25,6 +25,8 @@ impl Inlining {
fn check(&mut self, vm: &VM, func: &mut MuFunctionVersion) -> bool {
debug!("check inline");
self.should_inline.clear();
let mut inline_something = false;
for func_id in func.get_static_call_edges().values() {
......@@ -78,20 +80,20 @@ impl Inlining {
// some heuristics here to decide if we should inline the function
// to be more precise. we should be target specific
let n_params = fv.sig.arg_tys.len();
let n_insts = fv.content.as_ref().unwrap().blocks.values().fold(0usize, |mut sum, ref block| {sum += block.number_of_irs(); sum});
let n_insts = estimate_insts(&fv);
let out_calls = fv.get_static_call_edges();
let has_throw = fv.has_throw();
// now we use a simple heuristic here:
// insts fewer than 10, no static out calls, no throw
let should_inline = n_insts <= 10 && out_calls.len() == 0 && !has_throw;
let should_inline = n_insts <= 25 && out_calls.len() == 0 && !has_throw;
trace!("func has {} insts", n_insts);
trace!("func {} has {} insts (estimated)", callee, n_insts);
trace!(" has {} out calls", out_calls.len());
trace!(" has throws? {}", has_throw);
trace!("SO func should be inlined? {}", should_inline);
self.should_inline.insert(fv_id, should_inline);
self.should_inline.insert(callee, should_inline);
should_inline
}
......@@ -116,109 +118,120 @@ impl Inlining {
trace!("check inst: {}", inst);
let inst_id = inst.id();
if call_edges.contains_key(&inst_id) {
trace!("inserting inlined function at {}", inst);
// from TreeNode into Inst (we do not need old TreeNode)
let inst = inst.into_inst().unwrap();
// (inline expansion)
let inlined_func = *call_edges.get(&inst.id()).unwrap();
trace!("function being inlined is {}", inlined_func);
let inlined_fvid = match vm.get_cur_version_of(inlined_func) {
Some(fvid) => fvid,
None => panic!("cannot resolve current version of Func {}, which is supposed to be inlined", inlined_func)
};
let inlined_fvs_guard = vm.func_vers().read().unwrap();
let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap();
let inlined_fv_guard = inlined_fv_lock.read().unwrap();
let inlined_entry = inlined_fv_guard.content.as_ref().unwrap().entry;
// change current call insts to a branch
trace!("turning CALL instruction into a branch");
let ops = inst.ops.read().unwrap();
match inst.v {
Instruction_::ExprCall {ref data, ..} => {
let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect();
let branch = TreeNode::new_boxed_inst(Instruction{
hdr: inst.hdr.clone(),
value: None,
ops: RwLock::new(arg_nodes.clone()),
v: Instruction_::Branch1(Destination{
target: inlined_entry,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
});
trace!("branch inst: {}", branch);
// add branch to current block
cur_block.content.as_mut().unwrap().body.push(branch);
// finish current block
new_blocks.push(cur_block.clone());
let old_name = cur_block.name().unwrap();
// start a new block
cur_block = Block::new(vm.next_id());
cur_block.content = Some(BlockContent{
args: {
if inst.value.is_none() {
vec![]
} else {
inst.value.unwrap()
}
},
exn_arg: None,
body: vec![],
keepalives: None
});
let new_name = format!("{}_cont_after_inline_{}", old_name, inst_id);
trace!("create continue block for EXPRCALL/CCALL: {}", &new_name);
vm.set_name(cur_block.as_entity(), new_name);
// deal with the inlined function
copy_inline_blocks(&mut new_blocks, cur_block.id(), inlined_fv_guard.content.as_ref().unwrap());
copy_inline_context(f_context, &inlined_fv_guard.context);
},
Instruction_::Call {ref data, ref resume} => {
let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect();
let branch = Instruction{
hdr: inst.hdr.clone(),
value: None,
ops: RwLock::new(arg_nodes),
v: Instruction_::Branch1(Destination{
target: inlined_entry,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
};
// add branch to current block
cur_block.content.as_mut().unwrap().body.push(TreeNode::new_boxed_inst(branch));
// if normal_dest expects different number of arguments
// other than the inlined function returns, we need an intermediate block to pass extra arguments
if resume.normal_dest.args.len() != inlined_fv_guard.sig.ret_tys.len() {
unimplemented!()
}
// deal with inlined function
let next_block = resume.normal_dest.target;
copy_inline_blocks(&mut new_blocks, next_block, inlined_fv_guard.content.as_ref().unwrap());
copy_inline_context(f_context, &inlined_fv_guard.context);
},
_ => panic!("unexpected callsite: {}", inst)
let call_target = call_edges.get(&inst_id).unwrap();
if self.should_inline.contains_key(call_target) && *self.should_inline.get(call_target).unwrap() {
trace!("inserting inlined function at {}", inst);
// from TreeNode into Inst (we do not need old TreeNode)
let inst = inst.into_inst().unwrap();
// (inline expansion)
let inlined_func = *call_edges.get(&inst.id()).unwrap();
trace!("function being inlined is {}", inlined_func);
let inlined_fvid = match vm.get_cur_version_of(inlined_func) {
Some(fvid) => fvid,
None => panic!("cannot resolve current version of Func {}, which is supposed to be inlined", inlined_func)
};
let inlined_fvs_guard = vm.func_vers().read().unwrap();
let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap();
let inlined_fv_guard = inlined_fv_lock.read().unwrap();
let new_inlined_entry_id = vm.next_id();
// change current call insts to a branch
trace!("turning CALL instruction into a branch");
let ops = inst.ops.read().unwrap();
match inst.v {
Instruction_::ExprCall {ref data, ..} => {
let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect();
let branch = TreeNode::new_boxed_inst(Instruction{
hdr: inst.hdr.clone(),
value: None,
ops: RwLock::new(arg_nodes.clone()),
v: Instruction_::Branch1(Destination{
// this block doesnt exist yet, we will fix it later
target: new_inlined_entry_id,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
});
trace!("branch inst: {}", branch);
// add branch to current block
cur_block.content.as_mut().unwrap().body.push(branch);
// finish current block
new_blocks.push(cur_block.clone());
let old_name = cur_block.name().unwrap();
// start a new block
cur_block = Block::new(vm.next_id());
cur_block.content = Some(BlockContent{
args: {
if inst.value.is_none() {
vec![]
} else {
inst.value.unwrap()
}
},
exn_arg: None,
body: vec![],
keepalives: None
});
let new_name = format!("{}_cont_after_inline_{}", old_name, inst_id);
trace!("create continue block for EXPRCALL/CCALL: {}", &new_name);
vm.set_name(cur_block.as_entity(), new_name);
// deal with the inlined function
copy_inline_blocks(&mut new_blocks, cur_block.id(),
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
Instruction_::Call {ref data, ref resume} => {
let arg_nodes : Vec<P<TreeNode>> = data.args.iter().map(|x| ops[*x].clone()).collect();
let arg_indices: Vec<OpIndex> = (0..arg_nodes.len()).collect();
let branch = Instruction{
hdr: inst.hdr.clone(),
value: None,
ops: RwLock::new(arg_nodes),
v: Instruction_::Branch1(Destination{
target: new_inlined_entry_id,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
};
// add branch to current block
cur_block.content.as_mut().unwrap().body.push(TreeNode::new_boxed_inst(branch));
// if normal_dest expects different number of arguments
// other than the inlined function returns, we need an intermediate block to pass extra arguments
if resume.normal_dest.args.len() != inlined_fv_guard.sig.ret_tys.len() {
unimplemented!()
}
// deal with inlined function
let next_block = resume.normal_dest.target;
copy_inline_blocks(&mut new_blocks, next_block,
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
_ => panic!("unexpected callsite: {}", inst)
}
} else {
cur_block.content.as_mut().unwrap().body.push(inst.clone());
}
} else {
cur_block.content.as_mut().unwrap().body.push(inst.clone());
......@@ -235,10 +248,41 @@ impl Inlining {
}
}
fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &FunctionContent) {
fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &FunctionContent, entry_block: MuID, vm: &VM) {
trace!("trying to copy inlined function blocks to caller");
// old id -> new id
let mut block_map : HashMap<MuID, MuID> = HashMap::new();
for block in callee.blocks.values() {
if block.id() == callee.entry {
block_map.insert(block.id(), entry_block);
} else {
block_map.insert(block.id(), vm.next_id());
}
}
let fix_dest = |dest : Destination| {
Destination {
target: *block_map.get(&dest.target).unwrap(),
args: dest.args
}
};
let fix_resume = |resume : ResumptionData| {
ResumptionData {
normal_dest: fix_dest(resume.normal_dest),
exn_dest: fix_dest(resume.exn_dest)
}
};
for block in callee.blocks.values() {
let mut block = block.clone();
let new_id = *block_map.get(&block.id()).unwrap();
let mut block = Block {
hdr: MuEntityHeader::named(new_id, format!("IB{}_for_{}", new_id, block.id())),
content: block.content.clone(),
control_flow: ControlFlow::default()
};
// check its last instruction
{
......@@ -268,6 +312,79 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
block_content.body.push(TreeNode::new_boxed_inst(branch));
},
// fix destination
Instruction_::Branch1(dest) => {
let branch = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Branch1(fix_dest(dest))
};
block_content.body.push(TreeNode::new_boxed_inst(branch));
}
Instruction_::Branch2{cond, true_dest, false_dest, true_prob} => {
let branch2 = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Branch2 {
cond: cond,
true_dest: fix_dest(true_dest),
false_dest: fix_dest(false_dest),
true_prob: true_prob
}
};
block_content.body.push(TreeNode::new_boxed_inst(branch2));
}
Instruction_::Call{data, resume} => {
let call = Instruction{
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Call {
data: data,
resume: fix_resume(resume)
}
};
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::CCall{data, resume} => {
let call = Instruction{
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::CCall {
data: data,
resume: fix_resume(resume)
}
};
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::Switch {cond, default, mut branches} => {
let switch = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Switch {
cond: cond,
default: fix_dest(default),
branches: branches.drain(..).map(|(op, dest)| (op, fix_dest(dest))).collect()
}
};
block_content.body.push(TreeNode::new_boxed_inst(switch));
}
Instruction_::Watchpoint{..}
| Instruction_::WPBranch{..}
| Instruction_::SwapStack{..}
| Instruction_::ExnInstruction{..} => unimplemented!(),
_ => {block_content.body.push(last_inst_clone);}
}
},
......@@ -289,6 +406,27 @@ fn copy_inline_context(caller: &mut FunctionContext, callee: &FunctionContext) {
}
}
fn estimate_insts(fv: &MuFunctionVersion) -> usize {
let f_content = fv.content.as_ref().unwrap();
let mut insts = 0;
for block in f_content.blocks.values() {
let ref body = block.content.as_ref().unwrap().body;
for inst in body.iter() {
use compiler::backend;
match inst.v {
TreeNode_::Value(_) => unreachable!(),
TreeNode_::Instruction(ref inst) => {insts += backend::estimate_insts_for_ir(inst);}
}
}
}
insts
}
impl CompilerPass for Inlining {
fn name(&self) -> &'static str {
self.name
......
......@@ -68,7 +68,7 @@ pub fn compile_fnc<'a>(fnc_name: &'static str, build_fnc: &'a Fn() -> VM) -> ll:
}
pub fn compile_fncs<'a>(entry: &'static str, fnc_names: Vec<&'static str>, build_fnc: &'a Fn() -> VM) -> ll::Library {
VM::start_logging_trace;
VM::start_logging_trace();
let vm = Arc::new(build_fnc());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
......
......@@ -1147,6 +1147,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
hdr: hdr,
func_id: func_id,
sig: impl_sig,
orig_content: Some(ctn.clone()),
content: Some(ctn),
context: fcb.ctx,
block_trace: None,
......
......@@ -316,7 +316,7 @@ macro_rules! inst {
// CALL
(($vm: expr, $fv: ident) $name: ident: $res: ident = EXPRCALL ($cc: expr, is_abort: $is_abort: expr) $func: ident ($($val: ident), +)) => {
let ops = vec![$func, $($val.clone()), *];
let ops = vec![$func.clone(), $($val.clone()), *];
let ops_len = ops.len();
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
......
......@@ -81,5 +81,85 @@ fn inline_add() -> VM {
define_func_ver!((vm) add_trampoline_v1 (entry: tramp_blk_entry) {tramp_blk_entry});
}
vm
}
#[test]
fn test_inline_add_twice() {
let lib = testutil::compile_fncs("add_twice", vec!["add_twice", "add"], &inline_add_twice);
unsafe {
let add_twice : libloading::Symbol<unsafe extern fn(u64, u64, u64) -> u64> = lib.get(b"add_twice").unwrap();
let res = add_twice(1, 1, 1);
println!("add(1, 1, 1) = {}", res);
assert!(res == 3);
}
}
fn inline_add_twice() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
funcsig! ((vm) sig = (int64, int64) -> (int64));
funcdecl! ((vm) <sig> add);
{
// add
funcdef! ((vm) <sig> add VERSION add_v1);
block! ((vm, add_v1) blk_entry);
ssa! ((vm, add_v1) <int64> x);
ssa! ((vm, add_v1) <int64> y);
ssa! ((vm, add_v1) <int64> res);
inst! ((vm, add_v1) blk_entry_add:
res = BINOP (BinOp::Add) x y
);
inst! ((vm, add_v1) blk_entry_ret:
RET (res)
);
define_block! ((vm, add_v1) blk_entry(x, y) {blk_entry_add, blk_entry_ret});
define_func_ver!((