To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 61d3bf99 authored by John Zhang's avatar John Zhang
Browse files

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents 65655a3e 47e9b088
......@@ -266,7 +266,10 @@ pub enum Instruction_ {
// common inst
CommonInst_GetThreadLocal,
CommonInst_SetThreadLocal(OpIndex)
CommonInst_SetThreadLocal(OpIndex),
// internal use: mov from ops[0] to value
Move(OpIndex)
}
impl Instruction_ {
......@@ -379,8 +382,13 @@ impl Instruction_ {
&Instruction_::ExnInstruction{ref inner, ref resume} => {
format!("{} {}", inner.debug_str(ops), resume.debug_str(ops))
},
// common inst
&Instruction_::CommonInst_GetThreadLocal => format!("COMMONINST GetThreadLocal"),
&Instruction_::CommonInst_SetThreadLocal(op) => format!("COMMONINST SetThreadLocal {}", ops[op])
&Instruction_::CommonInst_SetThreadLocal(op) => format!("COMMONINST SetThreadLocal {}", ops[op]),
// move
&Instruction_::Move(from) => format!("MOVE {}", ops[from])
}
}
}
......
......@@ -568,6 +568,14 @@ impl Value {
}
}
pub unsafe fn as_type(&self, ty: P<MuType>) -> P<Value> {
P(Value{
hdr: self.hdr.clone(),
ty: ty,
v: self.v.clone()
})
}
pub fn is_fp_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
......
......@@ -28,7 +28,8 @@ pub fn is_terminal_inst(inst: &Instruction_) -> bool {
| &Select{..}
| &Fence(_)
| &CommonInst_GetThreadLocal
| &CommonInst_SetThreadLocal(_) => false,
| &CommonInst_SetThreadLocal(_)
| &Move(_) => false,
&Return(_)
| &ThreadExit
| &Throw(_)
......@@ -91,5 +92,6 @@ pub fn has_side_effect(inst: &Instruction_) -> bool {
&ExnInstruction{..} => true,
&CommonInst_GetThreadLocal => true,
&CommonInst_SetThreadLocal(_) => true,
&Move(_) => false,
}
}
......@@ -59,7 +59,9 @@ pub enum OpCode {
GetVarPartIRef,
CommonInst_GetThreadLocal,
CommonInst_SetThreadLocal
CommonInst_SetThreadLocal,
Move
}
pub fn pick_op_code_for_ssa(ty: &P<MuType>) -> OpCode {
......@@ -285,6 +287,7 @@ pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
Instruction_::Switch{..} => OpCode::Switch,
Instruction_::ExnInstruction{..} => OpCode::ExnInstruction,
Instruction_::CommonInst_GetThreadLocal => OpCode::CommonInst_GetThreadLocal,
Instruction_::CommonInst_SetThreadLocal(_) => OpCode::CommonInst_SetThreadLocal
Instruction_::CommonInst_SetThreadLocal(_) => OpCode::CommonInst_SetThreadLocal,
Instruction_::Move(_) => OpCode::Move,
}
}
#![allow(unused_variables)]
use compiler::backend;
use compiler::backend::AOT_EMIT_CONTEXT_FILE;
use compiler::backend::AOT_EMIT_DIR;
use compiler::backend::RegGroup;
......
......@@ -73,6 +73,7 @@ impl <'a> InstructionSelection {
TreeNode_::Instruction(ref inst) => {
match inst.v {
Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
trace!("instsel on BRANCH2");
// 'branch_if_true' == true, we emit cjmp the same as CmpOp (je for EQ, jne for NE)
// 'branch_if_true' == false, we emit opposite cjmp as CmpOp (jne for EQ, je for NE)
let (fallthrough_dest, branch_dest, branch_if_true) = {
......@@ -93,7 +94,7 @@ impl <'a> InstructionSelection {
let ref cond = ops[cond];
if self.match_cmp_res(cond) {
trace!("emit cmp_eq-branch2");
trace!("emit cmp_res-branch2");
match self.emit_cmp_res(cond, f_content, f_context, vm) {
op::CmpOp::EQ => {
if branch_if_true {
......@@ -182,6 +183,7 @@ impl <'a> InstructionSelection {
},
Instruction_::Select{cond, true_val, false_val} => {
trace!("instsel on SELECT");
let ops = inst.ops.read().unwrap();
let ref cond = ops[cond];
......@@ -252,6 +254,7 @@ impl <'a> InstructionSelection {
},
Instruction_::CmpOp(op, op1, op2) => {
trace!("instsel on CMPOP");
let ops = inst.ops.read().unwrap();
let ref op1 = ops[op1];
let ref op2 = ops[op2];
......@@ -292,6 +295,7 @@ impl <'a> InstructionSelection {
}
Instruction_::Branch1(ref dest) => {
trace!("instsel on BRANCH1");
let ops = inst.ops.read().unwrap();
self.process_dest(&ops, dest, f_content, f_context, vm);
......@@ -304,6 +308,7 @@ impl <'a> InstructionSelection {
},
Instruction_::Switch{cond, ref default, ref branches} => {
trace!("instsel on SWITCH");
let ops = inst.ops.read().unwrap();
let ref cond = ops[cond];
......@@ -350,6 +355,8 @@ impl <'a> InstructionSelection {
}
Instruction_::ExprCall{ref data, is_abort} => {
trace!("instsel on EXPRCALL");
if is_abort {
unimplemented!()
}
......@@ -363,6 +370,8 @@ impl <'a> InstructionSelection {
},
Instruction_::Call{ref data, ref resume} => {
trace!("instsel on CALL");
self.emit_mu_call(
inst,
data,
......@@ -372,6 +381,8 @@ impl <'a> InstructionSelection {
},
Instruction_::ExprCCall{ref data, is_abort} => {
trace!("instsel on EXPRCCALL");
if is_abort {
unimplemented!()
}
......@@ -380,16 +391,22 @@ impl <'a> InstructionSelection {
}
Instruction_::CCall{ref data, ref resume} => {
trace!("instsel on CCALL");
self.emit_c_call_ir(inst, data, Some(resume), node, f_content, f_context, vm);
}
Instruction_::Return(_) => {
trace!("instsel on RETURN");
self.emit_common_epilogue(inst, f_content, f_context, vm);
self.backend.emit_ret();
},
Instruction_::BinOp(op, op1, op2) => {
trace!("instsel on BINOP");
let ops = inst.ops.read().unwrap();
let res_tmp = self.get_result_value(node);
......@@ -871,17 +888,12 @@ impl <'a> InstructionSelection {
}
Instruction_::ConvOp{operation, ref from_ty, ref to_ty, operand} => {
trace!("instsel on CONVOP");
let ops = inst.ops.read().unwrap();
let ref op = ops[operand];
let extract_int_len = |x: &P<MuType>| {
match x.v {
MuType_::Int(len) => len,
_ => panic!("only expect int types, found: {}", x)
}
};
match operation {
op::ConvOp::TRUNC => {
if self.match_ireg(op) {
......@@ -904,7 +916,19 @@ impl <'a> InstructionSelection {
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
if from_ty_size != to_ty_size {
if from_ty_size == 4 && to_ty_size == 8 {
// zero extend from 32 bits to 64 bits is a mov instruction
// x86 does not have movzlq (32 to 64)
// tmp_op is int32, but tmp_res is int64
// we want to force a 32-to-32 mov, so high bits of the destination will be zeroed
let tmp_res32 = unsafe {tmp_res.as_type(UINT32_TYPE.clone())};
self.backend.emit_mov_r_r(&tmp_res32, &tmp_op);
} else {
self.backend.emit_movz_r_r(&tmp_res, &tmp_op);
}
} else {
self.backend.emit_mov_r_r(&tmp_res, &tmp_op);
}
......@@ -949,6 +973,8 @@ impl <'a> InstructionSelection {
// load on x64 generates mov inst (no matter what order is specified)
// https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
Instruction_::Load{is_ptr, order, mem_loc} => {
trace!("instsel on LOAD");
let ops = inst.ops.read().unwrap();
let ref loc_op = ops[mem_loc];
......@@ -975,6 +1001,8 @@ impl <'a> InstructionSelection {
}
Instruction_::Store{is_ptr, order, mem_loc, value} => {
trace!("instsel on STORE");
let ops = inst.ops.read().unwrap();
let ref loc_op = ops[mem_loc];
let ref val_op = ops[value];
......@@ -1016,6 +1044,8 @@ impl <'a> InstructionSelection {
| Instruction_::GetFieldIRef{..}
| Instruction_::GetVarPartIRef{..}
| Instruction_::ShiftIRef{..} => {
trace!("instsel on GET/FIELD/VARPARTIREF, SHIFTIREF");
let mem_addr = self.emit_get_mem_from_inst(node, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
......@@ -1023,6 +1053,7 @@ impl <'a> InstructionSelection {
}
Instruction_::ThreadExit => {
trace!("instsel on THREADEXIT");
// emit a call to swap_back_to_native_stack(sp_loc: Address)
// get thread local and add offset to get sp_loc
......@@ -1033,6 +1064,7 @@ impl <'a> InstructionSelection {
}
Instruction_::CommonInst_GetThreadLocal => {
trace!("instsel on GETTHREADLOCAL");
// get thread local
let tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
......@@ -1042,6 +1074,8 @@ impl <'a> InstructionSelection {
self.emit_load_base_offset(&tmp_res, &tl, *thread::USER_TLS_OFFSET as i32, vm);
}
Instruction_::CommonInst_SetThreadLocal(op) => {
trace!("instsel on SETTHREADLOCAL");
let ops = inst.ops.read().unwrap();
let ref op = ops[op];
......@@ -1056,7 +1090,20 @@ impl <'a> InstructionSelection {
self.emit_store_base_offset(&tl, *thread::USER_TLS_OFFSET as i32, &tmp_op, vm);
}
Instruction_::Move(op) => {
trace!("instsel on MOVE (internal IR)");
let ops = inst.ops.read().unwrap();
let ref op = ops[op];
let tmp_res = self.get_result_value(node);
self.emit_move_node_to_value(&tmp_res, op, f_content, f_context, vm);
}
Instruction_::New(ref ty) => {
trace!("instsel on NEW");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => panic!("cannot use NEW for hybrid, use NEWHYBRID instead"),
......@@ -1074,6 +1121,8 @@ impl <'a> InstructionSelection {
}
Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on NEWHYBRID");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {},
......@@ -1162,6 +1211,8 @@ impl <'a> InstructionSelection {
}
Instruction_::Throw(op_index) => {
trace!("instsel on THROW");
let ops = inst.ops.read().unwrap();
let ref exception_obj = ops[op_index];
......@@ -2136,14 +2187,10 @@ impl <'a> InstructionSelection {
};
let tmp_op1 = self.make_temporary(f_context, ty.clone(), vm);
let ref ty_op1 = op1.clone_value().ty;
let iimm_op1 = self.node_iimm_to_i32(op1);
self.backend.emit_mov_r_imm(&tmp_op1, iimm_op1);
let iimm_op2 = self.node_iimm_to_i32(op2);
self.backend.emit_cmp_imm_r(iimm_op2, &tmp_op1);
return op;
......@@ -2248,6 +2295,7 @@ impl <'a> InstructionSelection {
if x86_64::is_valid_x86_imm(pv) {
let val = self.value_iimm_to_i32(&pv);
debug!("tmp's ty: {}", tmp.ty);
self.backend.emit_mov_r_imm(&tmp, val)
} else {
self.backend.emit_mov_r64_imm64(&tmp, val as i64);
......
......@@ -436,9 +436,9 @@ pub fn is_callee_saved(reg_id: MuID) -> bool {
}
pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
use std::i32;
use std::u32;
match op.v {
Value_::Constant(Constant::Int(val)) if val <= i32::MAX as u64 => {
Value_::Constant(Constant::Int(val)) if val <= u32::MAX as u64 => {
true
},
_ => false
......
......@@ -6,7 +6,6 @@ use compiler::backend;
use utils::vec_utils;
use utils::LinkedHashSet;
use std::collections::LinkedList;
use std::collections::{HashMap, HashSet};
use self::nalgebra::DMatrix;
......
......@@ -67,6 +67,7 @@ impl Default for CompilerPolicy {
// ir level passes
passes.push(Box::new(passes::DefUse::new()));
passes.push(Box::new(passes::TreeGen::new()));
passes.push(Box::new(passes::GenMovPhi::new()));
passes.push(Box::new(passes::ControlFlowAnalysis::new()));
passes.push(Box::new(passes::TraceGen::new()));
......
use ast::ir::*;
use ast::ptr::*;
use ast::inst::*;
use vm::VM;
use compiler::CompilerPass;
use std::any::Any;
use std::sync::RwLock;
pub struct GenMovPhi {
name: &'static str,
}
impl GenMovPhi {
pub fn new() -> GenMovPhi {
GenMovPhi{name: "Generate Phi Moves"}
}
}
struct IntermediateBlockInfo {
blk_id: MuID,
target: MuID,
from_args : Vec<P<TreeNode>>
}
impl CompilerPass for GenMovPhi {
fn name(&self) -> &'static str {
self.name
}
fn as_any(&self) -> &Any {
self
}
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
let mut f_content = func.content.take().unwrap();
let mut new_blocks_to_insert : Vec<IntermediateBlockInfo> = vec![];
// iteratio blocks
for (blk_id, mut block) in f_content.blocks.iter_mut() {
trace!("block: {}", blk_id);
// old block content
let block_content = block.content.as_ref().unwrap().clone();
let mut new_body = vec![];
let mut i = 0;
let i_last = block_content.body.len() - 1;
for node in block_content.body.iter() {
// check if this is the last element
if i != i_last {
new_body.push(node.clone());
} else {
trace!("last instruction is {}", node);
let last_inst = node.clone();
match last_inst.v {
TreeNode_::Instruction(inst) => {
let ops = inst.ops.read().unwrap();
match inst.v {
Instruction_::Branch2{cond, true_dest, false_dest, true_prob} => {
let true_dest = process_dest(true_dest, &mut new_blocks_to_insert, &ops, vm);
let false_dest = process_dest(false_dest, &mut new_blocks_to_insert, &ops, vm);
let new_inst = func.new_inst(Instruction{
hdr: inst.hdr.clone(),
value: inst.value.clone(),
ops: RwLock::new(ops.to_vec()),
v: Instruction_::Branch2 {
cond: cond,
true_dest: true_dest,
false_dest: false_dest,
true_prob: true_prob
}
});
trace!("rewrite to {}", new_inst);
new_body.push(new_inst);
}
Instruction_::Call{data, resume} => {
let norm_dest = process_dest(resume.normal_dest, &mut new_blocks_to_insert, &ops, vm);
let exn_dest = process_dest(resume.exn_dest, &mut new_blocks_to_insert, &ops, vm);
let new_inst = func.new_inst(Instruction{
hdr: inst.hdr.clone(),
value: inst.value.clone(),
ops: RwLock::new(ops.to_vec()),
v: Instruction_::Call {
data: data.clone(),
resume: ResumptionData{
normal_dest: norm_dest,
exn_dest: exn_dest
}
}
});
trace!("rewrite to {}", new_inst);
new_body.push(new_inst);
}
Instruction_::CCall{data, resume} => {
let norm_dest = process_dest(resume.normal_dest, &mut new_blocks_to_insert, &ops, vm);
let exn_dest = process_dest(resume.exn_dest, &mut new_blocks_to_insert, &ops, vm);
let new_inst = func.new_inst(Instruction{
hdr: inst.hdr.clone(),
value: inst.value.clone(),
ops: RwLock::new(ops.to_vec()),
v: Instruction_::Call {
data: data.clone(),
resume: ResumptionData{
normal_dest: norm_dest,
exn_dest: exn_dest
}
}
});
trace!("rewrite to {}", new_inst);
new_body.push(new_inst);
},
Instruction_::Switch{cond, default, mut branches} => {
let default_dest = process_dest(default, &mut new_blocks_to_insert, &ops, vm);
let new_branches = branches.drain(..).map(|pair| {
let dest = process_dest(pair.1, &mut new_blocks_to_insert, &ops, vm);
(pair.0, dest)
}).collect();
let new_inst = func.new_inst(Instruction{
hdr: inst.hdr.clone(),
value: inst.value.clone(),
ops: RwLock::new(ops.to_vec()),
v: Instruction_::Switch {
cond: cond,
default: default_dest,
branches: new_branches
}
});
trace!("rewrite to {}", new_inst);
new_body.push(new_inst);
}
Instruction_::Watchpoint{..} => {
unimplemented!()
},
Instruction_::WPBranch{..} => {
unimplemented!()
},
Instruction_::SwapStack{..} => {
unimplemented!()
},
Instruction_::ExnInstruction{..} => {
unimplemented!()
},
_ => {
trace!("no rewrite");
new_body.push(node.clone())
}
}
}
_ => panic!("expect a terminal instruction")
}
}
i += 1;
}
block.content = Some(BlockContent{
args : block_content.args.to_vec(),
exn_arg : block_content.exn_arg.clone(),
body : new_body,
keepalives: block_content.keepalives.clone()
});
}
// insert new blocks here
for block_info in new_blocks_to_insert {
let block = {
let mut ret = Block::new(block_info.blk_id);
let target_id = block_info.target;
let name = format!("intermediate_block_{}_to_{}", block_info.blk_id, target_id);
vm.set_name(ret.as_entity(), name);
let target_block = f_content.get_block(target_id);
assert!(target_block.content.is_some());
let ref target_args = target_block.content.as_ref().unwrap().args;
ret.content = Some(BlockContent{
args: vec![],
exn_arg: None,
body: {
let mut vec = vec![];
// move every from_arg to target_arg
let mut i = 0;
for arg in block_info.from_args.iter() {
let m = func.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![target_args[i].clone()]),
ops: RwLock::new(vec![arg.clone()]),
v: Instruction_::Move(0)
});
vec.push(m);
i += 1;
}
// branch to target
let b = func.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![]),
v: Instruction_::Branch1(Destination{
target: target_id,
args: vec![]
})
});
vec.push(b);
vec
},
keepalives: None
});
trace!("inserting new intermediate block: {:?}", ret);
ret
};
f_content.blocks.insert(block.id(), block);
}
func.define(f_content);
}
}
fn process_dest(dest: Destination, blocks_to_insert: &mut Vec<IntermediateBlockInfo>, ops: &Vec<P<TreeNode>>, vm: &VM) -> Destination {