To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

Commit 3288e8b7 authored by Javad Ebrahimian Amiri's avatar Javad Ebrahimian Amiri
Browse files

Merge remote-tracking branch 'remotes/origin/develop' into sel4-rumprun-devel

parents 7ca01d63 a184e147
......@@ -29,8 +29,9 @@ are not compliant to Mu spec.
## Building
You will need:
* rust version 1.18 (03fc9d622 2017-06-06)
* rust version 1.19 (0ade33941 2017-07-17)
* clang 4.0+
* cmake 3.8+ (we do not depend on cmake, but some Rust crates use it)
* internet connection (as Rust will download dependencies)
To build Zebu with release build,
......@@ -107,6 +108,17 @@ The header also includes Zebu-specific APIs, such as `mu_fastimpl_new()`.
Zebu allows the user to set options when creating a new instance.
The options can be found in [vm_options.rs](src/vm/vm_options.rs).
## Contribution
#### Coding style
Zebu code base uses `rustfmt-nightly` default style (as defined in [Rust Style Guide](https://github.com/rust-lang-nursery/fmt-rfcs/blob/master/guide/guide.md))
with one exception - no trailing comma (see [rustfmt.toml](rustfmt.toml)). The CI
server marks commits as failed if the code is compliant to the style.
The CI server is using:
* `rust` nightly-2017-07-19
* `rustfmt-nightly` 0.1.9-nightly
## Bug reports
As Zebu is still in its early development, we expect bugs and
......
......@@ -337,12 +337,13 @@ impl fmt::Display for Instruction {
if self.value.is_some() {
write!(
f,
"{} = {}",
"{} = {} [{}]",
vec_utils::as_str(self.value.as_ref().unwrap()),
self.v.debug_str(ops)
self.v.debug_str(ops),
self.hdr
)
} else {
write!(f, "{}", self.v.debug_str(ops))
write!(f, "{} [{}]", self.v.debug_str(ops), self.hdr)
}
}
}
......
......@@ -813,6 +813,15 @@ impl BlockContent {
ret
}
pub fn clone_empty(&self) -> BlockContent {
BlockContent {
args: self.args.clone(),
exn_arg: self.exn_arg.clone(),
body: vec![],
keepalives: self.keepalives.clone()
}
}
}
/// TreeNode represents a node in the AST, it could either be an instruction,
......
......@@ -19,7 +19,8 @@ use utils::POINTER_SIZE;
use utils::vec_utils;
use std;
use rodal;
use std::sync::atomic::{Ordering, AtomicPtr};
use std::ptr;
use std::fmt;
use std::collections::HashMap;
use std::sync::RwLock;
......@@ -138,6 +139,14 @@ impl MuType {
}
}
/// is this type an integer type?
pub fn is_int(&self) -> bool {
match self.v {
MuType_::Int(_) => true,
_ => false
}
}
/// is this type a floating point type? (float/double)
pub fn is_fp(&self) -> bool {
match self.v {
......@@ -467,13 +476,24 @@ impl fmt::Display for MuType_ {
}
}
#[no_mangle]
pub static STRUCT_TAG_MAP_LOC: Option<AtomicPtr<RwLock<HashMap<StructTag, StructType_>>>> = None;
#[no_mangle]
pub static HYBRID_TAG_MAP_LOC: Option<AtomicPtr<RwLock<HashMap<HybridTag, HybridType_>>>> = None;
lazy_static! {
/// storing a map from MuName to StructType_
pub static ref STRUCT_TAG_MAP : RwLock<HashMap<StructTag, StructType_>> =
rodal::try_load_asm_name_move("STRUCT_TAG_MAP").unwrap_or(RwLock::new(HashMap::new()));
match &STRUCT_TAG_MAP_LOC {
&Some(ref ptr) => unsafe{ptr::read(ptr.load(Ordering::Relaxed))},
&None => RwLock::new(HashMap::new())
};
/// storing a map from MuName to HybridType_
pub static ref HYBRID_TAG_MAP : RwLock<HashMap<HybridTag, HybridType_>> =
rodal::try_load_asm_name_move("HYBRID_TAG_MAP").unwrap_or(RwLock::new(HashMap::new()));
match &HYBRID_TAG_MAP_LOC {
&Some(ref ptr) => unsafe{ptr::read(ptr.load(Ordering::Relaxed))},
&None => RwLock::new(HashMap::new())
};
}
rodal_struct!(StructType_{tys});
......
......@@ -365,8 +365,7 @@ impl<'a> InstructionSelection {
self.finish_block();
let block_name = make_block_name(
&self.current_fv_name,
node.id(),
&node.name(),
format!("switch_not_met_case_{}", case_op_index).as_str()
);
self.start_block(block_name);
......@@ -829,21 +828,11 @@ impl<'a> InstructionSelection {
// The size of the aarch64 register
let to_ty_reg_size = check_op_len(&tmp_res.ty);
if to_ty_size != to_ty_reg_size {
let blk_positive = make_block_name(
&self.current_fv_name,
node.id(),
"positive"
);
let blk_negative = make_block_name(
&self.current_fv_name,
node.id(),
"negative"
);
let blk_end = make_block_name(
&self.current_fv_name,
node.id(),
"end"
);
let blk_positive =
make_block_name(&node.name(), "positive");
let blk_negative =
make_block_name(&node.name(), "negative");
let blk_end = make_block_name(&node.name(), "end");
let tmp = make_temporary(f_context, to_ty.clone(), vm);
self.backend.emit_tbnz(
......@@ -1019,11 +1008,8 @@ impl<'a> InstructionSelection {
self.finish_block();
let blk_load_start = make_block_name(
&self.current_fv_name,
node.id(),
"load_start"
);
let blk_load_start =
make_block_name(&node.name(), "load_start");
// load_start:
self.start_block(blk_load_start.clone());
......@@ -1168,11 +1154,8 @@ impl<'a> InstructionSelection {
self.finish_block();
let blk_store_start = make_block_name(
&self.current_fv_name,
node.id(),
"store_start"
);
let blk_store_start =
make_block_name(&node.name(), "store_start");
// store_start:
self.start_block(blk_store_start.clone());
......@@ -1278,12 +1261,10 @@ impl<'a> InstructionSelection {
let res_value = self.get_result_value(node, 0);
let res_success = self.get_result_value(node, 1);
let blk_cmpxchg_start =
make_block_name(&self.current_fv_name, node.id(), "cmpxchg_start");
let blk_cmpxchg_failed =
make_block_name(&self.current_fv_name, node.id(), "cmpxchg_failed");
let blk_cmpxchg_start = make_block_name(&node.name(), "cmpxchg_start");
let blk_cmpxchg_failed = make_block_name(&node.name(), "cmpxchg_failed");
let blk_cmpxchg_succeded =
make_block_name(&self.current_fv_name, node.id(), "cmpxchg_succeded");
make_block_name(&node.name(), "cmpxchg_succeded");
self.finish_block();
......@@ -3420,9 +3401,8 @@ impl<'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end =
make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
let blk_alloc_large = make_block_name(&node.name(), "alloc_large");
let blk_alloc_large_end = make_block_name(&node.name(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -3451,7 +3431,7 @@ impl<'a> InstructionSelection {
self.backend.emit_b_cond("GT", blk_alloc_large.clone());
self.finish_block();
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
let block_name = make_block_name(&node.name(), "allocsmall");
self.start_block(block_name);
self.emit_alloc_sequence_small(
tmp_allocator.clone(),
......@@ -5615,8 +5595,7 @@ impl<'a> InstructionSelection {
let ret = {
if cur_node.is_some() {
make_block_name(
&self.current_fv_name,
cur_node.unwrap().id(),
&cur_node.unwrap().name(),
format!("callsite_{}", self.current_callsite_id).as_str()
)
} else {
......
......@@ -1897,6 +1897,48 @@ impl ASMCodeGen {
)
}
/// emits a move instruction (reg64/32 -> fpr)
fn internal_mov_bitcast_fpr_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a move instruction (fpr -> reg64/32)
fn internal_mov_bitcast_r_fpr(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a move instruction (reg -> reg)
fn internal_mov_r_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
let len = check_op_len(dest);
......@@ -2242,6 +2284,28 @@ impl ASMCodeGen {
)
}
/// emits a truncate instruction (fpreg -> fpreg)
fn internal_fp_trunc(&mut self, inst: &str, dest: Reg, src: Reg) {
let inst = inst.to_string();
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a store instruction to store a spilled register
fn emit_spill_store_gpr(&mut self, dest: Mem, src: Reg) {
self.internal_mov_mem_r("mov", dest, src, true, false)
......@@ -2448,23 +2512,19 @@ impl CodeGenerator for ASMCodeGen {
}
fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg) {
trace!("emit: movq {} -> {}", src, dest);
self.internal_mov_bitcast_fpr_r("movq", dest, src)
}
let (reg1, id1, loc1) = self.prepare_reg(src, 5);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, 5 + reg1.len() + 1);
fn emit_mov_fpr_r32(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_fpr_r("movd", dest, src)
}
let asm = format!("movq {},{}", reg1, reg2);
fn emit_mov_r64_fpr(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_r_fpr("movq", dest, src)
}
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
fn emit_mov_r32_fpr(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_r_fpr("movd", dest, src)
}
fn emit_mov_r_imm(&mut self, dest: &P<Value>, src: i32) {
......@@ -3509,6 +3569,15 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fpr_to_gpr("cvttss2si", dest, src);
}
// convert - fp trunc
fn emit_cvtsd2ss_f32_f64(&mut self, dest: Reg, src: Reg) {
self.internal_fp_trunc("cvtsd2ss", dest, src)
}
fn emit_cvtss2sd_f64_f32(&mut self, dest: Reg, src: Reg) {
self.internal_fp_trunc("cvtss2sd", dest, src)
}
// unpack low data - interleave low byte
fn emit_punpckldq_f64_mem128(&mut self, dest: Reg, src: Mem) {
trace!("emit: punpckldq {} {} -> {}", src, dest, dest);
......
......@@ -70,8 +70,11 @@ pub trait CodeGenerator {
// mov imm64 to r64
fn emit_mov_r64_imm64(&mut self, dest: Reg, src: i64);
// mov r64 to fpr
// bitcast between int and floatpoint of same length
fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg);
fn emit_mov_fpr_r32(&mut self, dest: Reg, src: Reg);
fn emit_mov_r64_fpr(&mut self, dest: Reg, src: Reg);
fn emit_mov_r32_fpr(&mut self, dest: Reg, src: Reg);
fn emit_mov_r_imm(&mut self, dest: Reg, src: i32);
fn emit_mov_r_mem(&mut self, dest: Reg, src: Mem); // load
......@@ -302,6 +305,10 @@ pub trait CodeGenerator {
fn emit_cvtsi2ss_f32_r(&mut self, dest: Reg, src: Reg);
fn emit_cvtss2si_r_f32(&mut self, dest: Reg, src: Reg);
// fp trunc
fn emit_cvtsd2ss_f32_f64(&mut self, dest: Reg, src: Reg);
fn emit_cvtss2sd_f64_f32(&mut self, dest: Reg, src: Reg);
// used for unsigned int to fp conversion
fn emit_cvttsd2si_r_f64(&mut self, dest: Reg, src: Reg);
......
......@@ -365,21 +365,9 @@ impl<'a> InstructionSelection {
}
// jcc - for 8-bits integer
_ => {
let blk_true = make_block_name(
&self.current_fv_name,
node.id(),
"select_true"
);
let blk_false = make_block_name(
&self.current_fv_name,
node.id(),
"select_false"
);
let blk_end = make_block_name(
&self.current_fv_name,
node.id(),
"select_end"
);
let blk_true = make_block_name(&node.name(), "select_true");
let blk_false = make_block_name(&node.name(), "select_false");
let blk_end = make_block_name(&node.name(), "select_end");
// jump to blk_true if true
match cmpop {
......@@ -442,12 +430,9 @@ impl<'a> InstructionSelection {
} else if self.match_fpreg(true_val) {
let tmp_res = self.get_result_value(node);
let blk_true =
make_block_name(&self.current_fv_name, node.id(), "select_true");
let blk_false =
make_block_name(&self.current_fv_name, node.id(), "select_false");
let blk_end =
make_block_name(&self.current_fv_name, node.id(), "select_end");
let blk_true = make_block_name(&node.name(), "select_true");
let blk_false = make_block_name(&node.name(), "select_false");
let blk_end = make_block_name(&node.name(), "select_end");
// jump to blk_true if true
match cmpop {
......@@ -603,8 +588,7 @@ impl<'a> InstructionSelection {
self.finish_block();
let block_name = make_block_name(
&self.current_fv_name,
node.id(),
&node.name(),
format!("switch_not_met_case_{}", case_op_index).as_str()
);
self.start_block(block_name);
......@@ -1098,18 +1082,15 @@ impl<'a> InstructionSelection {
self.backend.emit_test_r_r(&tmp_op, &tmp_op);
let blk_if_signed = make_block_name(
&self.current_fv_name,
node.id(),
&node.name(),
"uitofp_float_if_signed"
);
let blk_if_not_signed = make_block_name(
&self.current_fv_name,
node.id(),
&node.name(),
"uitofp_float_if_not_signed"
);
let blk_done = make_block_name(
&self.current_fv_name,
node.id(),
&node.name(),
"uitofp_float_done"
);
......@@ -1349,7 +1330,94 @@ impl<'a> InstructionSelection {
panic!("expect double or float")
}
}
_ => unimplemented!()
op::ConvOp::FPTRUNC => {
let tmp_res = self.get_result_value(node);
assert!(
self.match_fpreg(op),
"unexpected op (expected fpreg): {}",
op
);
let tmp_op = self.emit_fpreg(op, f_content, f_context, vm);
if from_ty.is_double() && to_ty.is_float() {
self.backend.emit_cvtsd2ss_f32_f64(&tmp_res, &tmp_op);
} else {
panic!(
"FPTRUNC from {} to {} is not supported \
(only support FPTRUNC from double to float)",
from_ty,
to_ty
);
}
}
op::ConvOp::FPEXT => {
let tmp_res = self.get_result_value(node);
assert!(
self.match_fpreg(op),
"unexpected op (expected fpreg): {}",
op
);
let tmp_op = self.emit_fpreg(op, f_content, f_context, vm);
if from_ty.is_float() && to_ty.is_double() {
self.backend.emit_cvtss2sd_f64_f32(&tmp_res, &tmp_op);
} else {
panic!(
"FPEXT from {} to {} is not supported\
(only support FPEXT from float to double)",
from_ty,
to_ty
);
}
}
op::ConvOp::BITCAST => {
let tmp_res = self.get_result_value(node);
let tmp_op = if self.match_fpreg(op) {
self.emit_fpreg(op, f_content, f_context, vm)
} else if self.match_ireg(op) {
self.emit_ireg(op, f_content, f_context, vm)
} else {
panic!("expected op for BITCAST (expected ireg/fpreg): {}", op)
};
let ref from_ty = tmp_op.ty;
let ref to_ty = tmp_res.ty;
let from_ty_size = vm.get_backend_type_size(from_ty.id());
let to_ty_size = vm.get_backend_type_size(to_ty.id());
assert!(
from_ty_size == to_ty_size,
"BITCAST only works between int/fp of same length"
);
assert!(
from_ty_size == 8 || from_ty_size == 4,
"BITCAST only works for int32/float or int64/double"
);
if from_ty.is_fp() && to_ty.is_int() {
if from_ty_size == 8 {
self.backend.emit_mov_r64_fpr(&tmp_res, &tmp_op);
} else if from_ty_size == 4 {
self.backend.emit_mov_r32_fpr(&tmp_res, &tmp_op);
} else {
unreachable!()
}
} else if from_ty.is_int() && to_ty.is_fp() {
if from_ty_size == 8 {
self.backend.emit_mov_fpr_r64(&tmp_res, &tmp_op);
} else if from_ty_size == 4 {
self.backend.emit_mov_fpr_r32(&tmp_res, &tmp_op);
} else {
unreachable!()
}
} else {
panic!(
"expected BITCAST between int and fp,\
found {} and {}",
from_ty,
to_ty
)
}
}
}
}
......@@ -2997,9 +3065,8 @@ impl<'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end =
make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
let blk_alloc_large = make_block_name(&node.name(), "alloc_large");
let blk_alloc_large_end = make_block_name(&node.name(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
// if the header size is not zero, we need to calculate a total size to alloc
......@@ -3016,7 +3083,7 @@ impl<'a> InstructionSelection {
self.backend.emit_jg(blk_alloc_large.clone());
self.finish_block();
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
let block_name = make_block_name(&node.name(), "allocsmall");
self.start_block(block_name);
// alloc small here
......@@ -3172,12 +3239,12 @@ impl<'a> InstructionSelection {
// branch to slow path if end > limit (end - limit > 0)
// ASM: jg alloc_slow
let slowpath = make_block_name(&self.current_fv_name, node.id(), "allocslow");
let slowpath = make_block_name(&node.name(), "allocslow");
self.backend.emit_jg(slowpath.clone());
// finish current block
self.finish_block();
let block_name = make_block_name(&self.current_fv_name, node.id(), "updatecursor");
let block_name = make_block_name(&node.name(), "updatecursor");
self.start_block(block_name);
// update cursor
......@@ -3195,7 +3262,7 @@ impl<'a> InstructionSelection {
}
// ASM jmp alloc_end
let allocend = make_block_name(&self.current_fv_name, node.id(), "alloc_small_end");
let allocend = make_block_name(&node.name(), "alloc_small_end");
self.backend.emit_jmp(allocend.clone());
// finishing current block
......@@ -3856,7 +3923,7 @@ impl<'a> InstructionSelection {
inst: &Instruction,
calldata: &CallData,
resumption: Option<&ResumptionData>,
cur_node: &TreeNode,
node: &TreeNode,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM
......@@ -3918,7 +3985,7 @@ impl<'a> InstructionSelection {