WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.2% of users enabled 2FA.

Commit dcefcea6 authored by qinsoon's avatar qinsoon
Browse files

[wip] inlining quicksort not working

parent 3741fdca
......@@ -96,14 +96,14 @@ if [ "$OS" == "linux" ]; then
if [ $# -eq 0 ]; then
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v
else
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest . -v -k $@
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang-3.8 python -m pytest -v $@
fi
elif [ "$OS" == "Darwin" ]; then
if [ $# -eq 0 ]; then
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v
else
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest . -v -k $@
RUST_BACKTRACE=1 PYTHONPATH=$project_path/mu-client-pypy MU_RUST=$project_path CC=clang python2 -m pytest -v $@
fi
else
echo "unknown OS. do not use this script to run"
......
......@@ -104,7 +104,10 @@ pub struct MuFunctionVersion {
pub func_id: MuID,
pub sig: P<MuFuncSig>,
pub orig_content: Option<FunctionContent>,
pub content: Option<FunctionContent>,
pub context: FunctionContext,
pub force_inline: bool,
......@@ -142,6 +145,7 @@ impl MuFunctionVersion {
hdr: MuEntityHeader::unnamed(id),
func_id: func,
sig: sig,
orig_content: None,
content: None,
context: FunctionContext::new(),
block_trace: None,
......@@ -150,6 +154,7 @@ impl MuFunctionVersion {
}
pub fn define(&mut self, content: FunctionContent) {
self.orig_content = Some(content.clone());
self.content = Some(content);
}
......
......@@ -470,3 +470,51 @@ pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
_ => false
}
}
use ast::inst::*;
pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
use ast::inst::Instruction_::*;
match inst.v {
// simple
BinOp(_, _, _) => 1,
CmpOp(_, _, _) => 1,
ConvOp{..} => 0,
// control flow
Branch1(_) => 1,
Branch2{..} => 1,
Select{..} => 2,
Watchpoint{..} => 1,
WPBranch{..} => 2,
Switch{..} => 3,
// call
ExprCall{..} | ExprCCall{..} | Call{..} | CCall{..} => 5,
Return(_) => 1,
TailCall(_) => 1,
// memory access
Load{..} | Store{..} => 1,
CmpXchg{..} => 1,
AtomicRMW{..} => 1,
AllocA(_) => 1,
AllocAHybrid(_, _) => 1,
Fence(_) => 1,
// memory addressing
GetIRef(_) | GetFieldIRef{..} | GetElementIRef{..} | ShiftIRef{..} | GetVarPartIRef{..} => 0,
// runtime
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
ThreadExit => 10,
Throw(_) => 10,
SwapStack{..} => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
// others
Move(_) => 0,
ExnInstruction{ref inner, ..} => estimate_insts_for_ir(&inner)
}
}
\ No newline at end of file
......@@ -23,6 +23,8 @@ pub type Mem<'a> = &'a P<Value>;
#[path = "arch/x86_64/mod.rs"]
pub mod x86_64;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::init_machine_regs_for_func;
#[cfg(target_arch = "x86_64")]
......
......@@ -25,6 +25,8 @@ impl Inlining {
fn check(&mut self, vm: &VM, func: &mut MuFunctionVersion) -> bool {
debug!("check inline");
self.should_inline.clear();
let mut inline_something = false;
for func_id in func.get_static_call_edges().values() {
......@@ -78,20 +80,20 @@ impl Inlining {
// some heuristics here to decide if we should inline the function
// to be more precise. we should be target specific
let n_params = fv.sig.arg_tys.len();
let n_insts = fv.content.as_ref().unwrap().blocks.values().fold(0usize, |mut sum, ref block| {sum += block.number_of_irs(); sum});
let n_insts = estimate_insts(&fv);
let out_calls = fv.get_static_call_edges();
let has_throw = fv.has_throw();
// now we use a simple heuristic here:
// insts fewer than 10, no static out calls, no throw
let should_inline = n_insts <= 10 && out_calls.len() == 0 && !has_throw;
let should_inline = n_insts <= 25 && out_calls.len() == 0 && !has_throw;
trace!("func has {} insts", n_insts);
trace!("func {} has {} insts (estimated)", callee, n_insts);
trace!(" has {} out calls", out_calls.len());
trace!(" has throws? {}", has_throw);
trace!("SO func should be inlined? {}", should_inline);
self.should_inline.insert(fv_id, should_inline);
self.should_inline.insert(callee, should_inline);
should_inline
}
......@@ -116,6 +118,9 @@ impl Inlining {
trace!("check inst: {}", inst);
let inst_id = inst.id();
if call_edges.contains_key(&inst_id) {
let call_target = call_edges.get(&inst_id).unwrap();
if self.should_inline.contains_key(call_target) && *self.should_inline.get(call_target).unwrap() {
trace!("inserting inlined function at {}", inst);
// from TreeNode into Inst (we do not need old TreeNode)
......@@ -135,7 +140,7 @@ impl Inlining {
let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap();
let inlined_fv_guard = inlined_fv_lock.read().unwrap();
let inlined_entry = inlined_fv_guard.content.as_ref().unwrap().entry;
let new_inlined_entry_id = vm.next_id();
// change current call insts to a branch
trace!("turning CALL instruction into a branch");
......@@ -151,7 +156,8 @@ impl Inlining {
value: None,
ops: RwLock::new(arg_nodes.clone()),
v: Instruction_::Branch1(Destination{
target: inlined_entry,
// this block doesnt exist yet, we will fix it later
target: new_inlined_entry_id,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
});
......@@ -184,7 +190,9 @@ impl Inlining {
vm.set_name(cur_block.as_entity(), new_name);
// deal with the inlined function
copy_inline_blocks(&mut new_blocks, cur_block.id(), inlined_fv_guard.content.as_ref().unwrap());
copy_inline_blocks(&mut new_blocks, cur_block.id(),
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
......@@ -197,7 +205,7 @@ impl Inlining {
value: None,
ops: RwLock::new(arg_nodes),
v: Instruction_::Branch1(Destination{
target: inlined_entry,
target: new_inlined_entry_id,
args: arg_indices.iter().map(|x| DestArg::Normal(*x)).collect()
})
};
......@@ -214,7 +222,9 @@ impl Inlining {
// deal with inlined function
let next_block = resume.normal_dest.target;
copy_inline_blocks(&mut new_blocks, next_block, inlined_fv_guard.content.as_ref().unwrap());
copy_inline_blocks(&mut new_blocks, next_block,
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
......@@ -223,6 +233,9 @@ impl Inlining {
} else {
cur_block.content.as_mut().unwrap().body.push(inst.clone());
}
} else {
cur_block.content.as_mut().unwrap().body.push(inst.clone());
}
}
new_blocks.push(cur_block);
......@@ -235,10 +248,41 @@ impl Inlining {
}
}
fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &FunctionContent) {
fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &FunctionContent, entry_block: MuID, vm: &VM) {
trace!("trying to copy inlined function blocks to caller");
// old id -> new id
let mut block_map : HashMap<MuID, MuID> = HashMap::new();
for block in callee.blocks.values() {
let mut block = block.clone();
if block.id() == callee.entry {
block_map.insert(block.id(), entry_block);
} else {
block_map.insert(block.id(), vm.next_id());
}
}
let fix_dest = |dest : Destination| {
Destination {
target: *block_map.get(&dest.target).unwrap(),
args: dest.args
}
};
let fix_resume = |resume : ResumptionData| {
ResumptionData {
normal_dest: fix_dest(resume.normal_dest),
exn_dest: fix_dest(resume.exn_dest)
}
};
for block in callee.blocks.values() {
let new_id = *block_map.get(&block.id()).unwrap();
let mut block = Block {
hdr: MuEntityHeader::named(new_id, format!("IB{}_for_{}", new_id, block.id())),
content: block.content.clone(),
control_flow: ControlFlow::default()
};
// check its last instruction
{
......@@ -268,6 +312,79 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
block_content.body.push(TreeNode::new_boxed_inst(branch));
},
// fix destination
Instruction_::Branch1(dest) => {
let branch = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Branch1(fix_dest(dest))
};
block_content.body.push(TreeNode::new_boxed_inst(branch));
}
Instruction_::Branch2{cond, true_dest, false_dest, true_prob} => {
let branch2 = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Branch2 {
cond: cond,
true_dest: fix_dest(true_dest),
false_dest: fix_dest(false_dest),
true_prob: true_prob
}
};
block_content.body.push(TreeNode::new_boxed_inst(branch2));
}
Instruction_::Call{data, resume} => {
let call = Instruction{
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Call {
data: data,
resume: fix_resume(resume)
}
};
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::CCall{data, resume} => {
let call = Instruction{
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::CCall {
data: data,
resume: fix_resume(resume)
}
};
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::Switch {cond, default, mut branches} => {
let switch = Instruction {
hdr: hdr,
value: value,
ops: ops,
v: Instruction_::Switch {
cond: cond,
default: fix_dest(default),
branches: branches.drain(..).map(|(op, dest)| (op, fix_dest(dest))).collect()
}
};
block_content.body.push(TreeNode::new_boxed_inst(switch));
}
Instruction_::Watchpoint{..}
| Instruction_::WPBranch{..}
| Instruction_::SwapStack{..}
| Instruction_::ExnInstruction{..} => unimplemented!(),
_ => {block_content.body.push(last_inst_clone);}
}
},
......@@ -289,6 +406,27 @@ fn copy_inline_context(caller: &mut FunctionContext, callee: &FunctionContext) {
}
}
fn estimate_insts(fv: &MuFunctionVersion) -> usize {
let f_content = fv.content.as_ref().unwrap();
let mut insts = 0;
for block in f_content.blocks.values() {
let ref body = block.content.as_ref().unwrap().body;
for inst in body.iter() {
use compiler::backend;
match inst.v {
TreeNode_::Value(_) => unreachable!(),
TreeNode_::Instruction(ref inst) => {insts += backend::estimate_insts_for_ir(inst);}
}
}
}
insts
}
impl CompilerPass for Inlining {
fn name(&self) -> &'static str {
self.name
......
......@@ -68,7 +68,7 @@ pub fn compile_fnc<'a>(fnc_name: &'static str, build_fnc: &'a Fn() -> VM) -> ll:
}
pub fn compile_fncs<'a>(entry: &'static str, fnc_names: Vec<&'static str>, build_fnc: &'a Fn() -> VM) -> ll::Library {
VM::start_logging_trace;
VM::start_logging_trace();
let vm = Arc::new(build_fnc());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
......
......@@ -1147,6 +1147,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
hdr: hdr,
func_id: func_id,
sig: impl_sig,
orig_content: Some(ctn.clone()),
content: Some(ctn),
context: fcb.ctx,
block_trace: None,
......
......@@ -316,7 +316,7 @@ macro_rules! inst {
// CALL
(($vm: expr, $fv: ident) $name: ident: $res: ident = EXPRCALL ($cc: expr, is_abort: $is_abort: expr) $func: ident ($($val: ident), +)) => {
let ops = vec![$func, $($val.clone()), *];
let ops = vec![$func.clone(), $($val.clone()), *];
let ops_len = ops.len();
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
......
......@@ -83,3 +83,83 @@ fn inline_add() -> VM {
vm
}
#[test]
fn test_inline_add_twice() {
let lib = testutil::compile_fncs("add_twice", vec!["add_twice", "add"], &inline_add_twice);
unsafe {
let add_twice : libloading::Symbol<unsafe extern fn(u64, u64, u64) -> u64> = lib.get(b"add_twice").unwrap();
let res = add_twice(1, 1, 1);
println!("add(1, 1, 1) = {}", res);
assert!(res == 3);
}
}
fn inline_add_twice() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
funcsig! ((vm) sig = (int64, int64) -> (int64));
funcdecl! ((vm) <sig> add);
{
// add
funcdef! ((vm) <sig> add VERSION add_v1);
block! ((vm, add_v1) blk_entry);
ssa! ((vm, add_v1) <int64> x);
ssa! ((vm, add_v1) <int64> y);
ssa! ((vm, add_v1) <int64> res);
inst! ((vm, add_v1) blk_entry_add:
res = BINOP (BinOp::Add) x y
);
inst! ((vm, add_v1) blk_entry_ret:
RET (res)
);
define_block! ((vm, add_v1) blk_entry(x, y) {blk_entry_add, blk_entry_ret});
define_func_ver!((vm) add_v1 (entry: blk_entry) {blk_entry});
}
{
// add_twice
typedef! ((vm) funcref_to_sig = mu_funcref(sig));
constdef! ((vm) <funcref_to_sig> funcref_add = Constant::FuncRef(add));
funcsig! ((vm) add_twice_sig = (int64, int64, int64) -> (int64));
funcdecl! ((vm) <add_twice_sig> add_twice);
funcdef! ((vm) <add_twice_sig> add_twice VERSION add_twice_v1);
block! ((vm, add_twice_v1) blk_entry);
ssa! ((vm, add_twice_v1) <int64> x);
ssa! ((vm, add_twice_v1) <int64> y);
ssa! ((vm, add_twice_v1) <int64> z);
consta! ((vm, add_twice_v1) funcref_add_local = funcref_add);
ssa! ((vm, add_twice_v1) <int64> add_twice_res1);
inst! ((vm, add_twice_v1) call:
add_twice_res1 = EXPRCALL (CallConvention::Mu, is_abort: false) funcref_add_local (x, y)
);
ssa! ((vm, add_twice_v1) <int64> add_twice_res2);
inst! ((vm, add_twice_v1) call2:
add_twice_res2 = EXPRCALL (CallConvention::Mu, is_abort: false) funcref_add_local (add_twice_res1, z)
);
inst! ((vm, add_twice_v1) ret:
RET (add_twice_res2)
);
define_block! ((vm, add_twice_v1) blk_entry(x, y, z) {call, call2, ret});
define_func_ver!((vm) add_twice_v1 (entry: blk_entry) {blk_entry});
}
vm
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment