Commit 3288e8b7 authored by Javad Ebrahimian Amiri's avatar Javad Ebrahimian Amiri

Merge remote-tracking branch 'remotes/origin/develop' into sel4-rumprun-devel

parents 7ca01d63 a184e147
......@@ -29,8 +29,9 @@ are not compliant to Mu spec.
## Building
You will need:
* rust version 1.18 (03fc9d622 2017-06-06)
* rust version 1.19 (0ade33941 2017-07-17)
* clang 4.0+
* cmake 3.8+ (we do not depend on cmake, but some Rust crates use it)
* internet connection (as Rust will download dependencies)
To build Zebu with release build,
......@@ -107,6 +108,17 @@ The header also includes Zebu-specific APIs, such as `mu_fastimpl_new()`.
Zebu allows the user to set options when creating a new instance.
The options can be found in [vm_options.rs](src/vm/vm_options.rs).
## Contribution
#### Coding style
Zebu code base uses `rustfmt-nightly` default style (as defined in [Rust Style Guide](https://github.com/rust-lang-nursery/fmt-rfcs/blob/master/guide/guide.md))
with one exception - no trailing comma (see [rustfmt.toml](rustfmt.toml)). The CI
server marks commits as failed if the code is compliant to the style.
The CI server is using:
* `rust` nightly-2017-07-19
* `rustfmt-nightly` 0.1.9-nightly
## Bug reports
As Zebu is still in its early development, we expect bugs and
......
......@@ -337,12 +337,13 @@ impl fmt::Display for Instruction {
if self.value.is_some() {
write!(
f,
"{} = {}",
"{} = {} [{}]",
vec_utils::as_str(self.value.as_ref().unwrap()),
self.v.debug_str(ops)
self.v.debug_str(ops),
self.hdr
)
} else {
write!(f, "{}", self.v.debug_str(ops))
write!(f, "{} [{}]", self.v.debug_str(ops), self.hdr)
}
}
}
......
......@@ -813,6 +813,15 @@ impl BlockContent {
ret
}
pub fn clone_empty(&self) -> BlockContent {
BlockContent {
args: self.args.clone(),
exn_arg: self.exn_arg.clone(),
body: vec![],
keepalives: self.keepalives.clone()
}
}
}
/// TreeNode represents a node in the AST, it could either be an instruction,
......
......@@ -19,7 +19,8 @@ use utils::POINTER_SIZE;
use utils::vec_utils;
use std;
use rodal;
use std::sync::atomic::{Ordering, AtomicPtr};
use std::ptr;
use std::fmt;
use std::collections::HashMap;
use std::sync::RwLock;
......@@ -138,6 +139,14 @@ impl MuType {
}
}
/// is this type an integer type?
pub fn is_int(&self) -> bool {
match self.v {
MuType_::Int(_) => true,
_ => false
}
}
/// is this type a floating point type? (float/double)
pub fn is_fp(&self) -> bool {
match self.v {
......@@ -467,13 +476,24 @@ impl fmt::Display for MuType_ {
}
}
#[no_mangle]
pub static STRUCT_TAG_MAP_LOC: Option<AtomicPtr<RwLock<HashMap<StructTag, StructType_>>>> = None;
#[no_mangle]
pub static HYBRID_TAG_MAP_LOC: Option<AtomicPtr<RwLock<HashMap<HybridTag, HybridType_>>>> = None;
lazy_static! {
/// storing a map from MuName to StructType_
pub static ref STRUCT_TAG_MAP : RwLock<HashMap<StructTag, StructType_>> =
rodal::try_load_asm_name_move("STRUCT_TAG_MAP").unwrap_or(RwLock::new(HashMap::new()));
match &STRUCT_TAG_MAP_LOC {
&Some(ref ptr) => unsafe{ptr::read(ptr.load(Ordering::Relaxed))},
&None => RwLock::new(HashMap::new())
};
/// storing a map from MuName to HybridType_
pub static ref HYBRID_TAG_MAP : RwLock<HashMap<HybridTag, HybridType_>> =
rodal::try_load_asm_name_move("HYBRID_TAG_MAP").unwrap_or(RwLock::new(HashMap::new()));
match &HYBRID_TAG_MAP_LOC {
&Some(ref ptr) => unsafe{ptr::read(ptr.load(Ordering::Relaxed))},
&None => RwLock::new(HashMap::new())
};
}
rodal_struct!(StructType_{tys});
......
......@@ -365,8 +365,7 @@ impl<'a> InstructionSelection {
self.finish_block();
let block_name = make_block_name(
&self.current_fv_name,
node.id(),
&node.name(),
format!("switch_not_met_case_{}", case_op_index).as_str()
);
self.start_block(block_name);
......@@ -829,21 +828,11 @@ impl<'a> InstructionSelection {
// The size of the aarch64 register
let to_ty_reg_size = check_op_len(&tmp_res.ty);
if to_ty_size != to_ty_reg_size {
let blk_positive = make_block_name(
&self.current_fv_name,
node.id(),
"positive"
);
let blk_negative = make_block_name(
&self.current_fv_name,
node.id(),
"negative"
);
let blk_end = make_block_name(
&self.current_fv_name,
node.id(),
"end"
);
let blk_positive =
make_block_name(&node.name(), "positive");
let blk_negative =
make_block_name(&node.name(), "negative");
let blk_end = make_block_name(&node.name(), "end");
let tmp = make_temporary(f_context, to_ty.clone(), vm);
self.backend.emit_tbnz(
......@@ -1019,11 +1008,8 @@ impl<'a> InstructionSelection {
self.finish_block();
let blk_load_start = make_block_name(
&self.current_fv_name,
node.id(),
"load_start"
);
let blk_load_start =
make_block_name(&node.name(), "load_start");
// load_start:
self.start_block(blk_load_start.clone());
......@@ -1168,11 +1154,8 @@ impl<'a> InstructionSelection {
self.finish_block();
let blk_store_start = make_block_name(
&self.current_fv_name,
node.id(),
"store_start"
);
let blk_store_start =
make_block_name(&node.name(), "store_start");
// store_start:
self.start_block(blk_store_start.clone());
......@@ -1278,12 +1261,10 @@ impl<'a> InstructionSelection {
let res_value = self.get_result_value(node, 0);
let res_success = self.get_result_value(node, 1);
let blk_cmpxchg_start =
make_block_name(&self.current_fv_name, node.id(), "cmpxchg_start");
let blk_cmpxchg_failed =
make_block_name(&self.current_fv_name, node.id(), "cmpxchg_failed");
let blk_cmpxchg_start = make_block_name(&node.name(), "cmpxchg_start");
let blk_cmpxchg_failed = make_block_name(&node.name(), "cmpxchg_failed");
let blk_cmpxchg_succeded =
make_block_name(&self.current_fv_name, node.id(), "cmpxchg_succeded");
make_block_name(&node.name(), "cmpxchg_succeded");
self.finish_block();
......@@ -3420,9 +3401,8 @@ impl<'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end =
make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
let blk_alloc_large = make_block_name(&node.name(), "alloc_large");
let blk_alloc_large_end = make_block_name(&node.name(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -3451,7 +3431,7 @@ impl<'a> InstructionSelection {
self.backend.emit_b_cond("GT", blk_alloc_large.clone());
self.finish_block();
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
let block_name = make_block_name(&node.name(), "allocsmall");
self.start_block(block_name);
self.emit_alloc_sequence_small(
tmp_allocator.clone(),
......@@ -5615,8 +5595,7 @@ impl<'a> InstructionSelection {
let ret = {
if cur_node.is_some() {
make_block_name(
&self.current_fv_name,
cur_node.unwrap().id(),
&cur_node.unwrap().name(),
format!("callsite_{}", self.current_callsite_id).as_str()
)
} else {
......
......@@ -1897,6 +1897,48 @@ impl ASMCodeGen {
)
}
/// emits a move instruction (reg64/32 -> fpr)
fn internal_mov_bitcast_fpr_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a move instruction (fpr -> reg64/32)
fn internal_mov_bitcast_r_fpr(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a move instruction (reg -> reg)
fn internal_mov_r_r(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
let len = check_op_len(dest);
......@@ -2242,6 +2284,28 @@ impl ASMCodeGen {
)
}
/// emits a truncate instruction (fpreg -> fpreg)
fn internal_fp_trunc(&mut self, inst: &str, dest: Reg, src: Reg) {
let inst = inst.to_string();
trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_fpreg(src, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
}
/// emits a store instruction to store a spilled register
fn emit_spill_store_gpr(&mut self, dest: Mem, src: Reg) {
self.internal_mov_mem_r("mov", dest, src, true, false)
......@@ -2448,23 +2512,19 @@ impl CodeGenerator for ASMCodeGen {
}
fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg) {
trace!("emit: movq {} -> {}", src, dest);
self.internal_mov_bitcast_fpr_r("movq", dest, src)
}
let (reg1, id1, loc1) = self.prepare_reg(src, 5);
let (reg2, id2, loc2) = self.prepare_fpreg(dest, 5 + reg1.len() + 1);
fn emit_mov_fpr_r32(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_fpr_r("movd", dest, src)
}
let asm = format!("movq {},{}", reg1, reg2);
fn emit_mov_r64_fpr(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_r_fpr("movq", dest, src)
}
self.add_asm_inst(
asm,
linked_hashmap!{
id2 => vec![loc2]
},
linked_hashmap!{
id1 => vec![loc1]
},
false
)
fn emit_mov_r32_fpr(&mut self, dest: Reg, src: Reg) {
self.internal_mov_bitcast_r_fpr("movd", dest, src)
}
fn emit_mov_r_imm(&mut self, dest: &P<Value>, src: i32) {
......@@ -3509,6 +3569,15 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fpr_to_gpr("cvttss2si", dest, src);
}
// convert - fp trunc
fn emit_cvtsd2ss_f32_f64(&mut self, dest: Reg, src: Reg) {
self.internal_fp_trunc("cvtsd2ss", dest, src)
}
fn emit_cvtss2sd_f64_f32(&mut self, dest: Reg, src: Reg) {
self.internal_fp_trunc("cvtss2sd", dest, src)
}
// unpack low data - interleave low byte
fn emit_punpckldq_f64_mem128(&mut self, dest: Reg, src: Mem) {
trace!("emit: punpckldq {} {} -> {}", src, dest, dest);
......
......@@ -70,8 +70,11 @@ pub trait CodeGenerator {
// mov imm64 to r64
fn emit_mov_r64_imm64(&mut self, dest: Reg, src: i64);
// mov r64 to fpr
// bitcast between int and floatpoint of same length
fn emit_mov_fpr_r64(&mut self, dest: Reg, src: Reg);
fn emit_mov_fpr_r32(&mut self, dest: Reg, src: Reg);
fn emit_mov_r64_fpr(&mut self, dest: Reg, src: Reg);
fn emit_mov_r32_fpr(&mut self, dest: Reg, src: Reg);
fn emit_mov_r_imm(&mut self, dest: Reg, src: i32);
fn emit_mov_r_mem(&mut self, dest: Reg, src: Mem); // load
......@@ -302,6 +305,10 @@ pub trait CodeGenerator {
fn emit_cvtsi2ss_f32_r(&mut self, dest: Reg, src: Reg);
fn emit_cvtss2si_r_f32(&mut self, dest: Reg, src: Reg);
// fp trunc
fn emit_cvtsd2ss_f32_f64(&mut self, dest: Reg, src: Reg);
fn emit_cvtss2sd_f64_f32(&mut self, dest: Reg, src: Reg);
// used for unsigned int to fp conversion
fn emit_cvttsd2si_r_f64(&mut self, dest: Reg, src: Reg);
......
......@@ -25,7 +25,7 @@ use std;
use utils::ByteSize;
use utils::math::align_up;
use runtime::mm;
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
use runtime::mm::common::gctype::{GCType, RefPattern, GCTYPE_INIT_ID};
use num::integer::lcm;
/// for ahead-of-time compilation (boot image making), the file contains a persisted VM,
......@@ -536,6 +536,6 @@ impl RegGroup {
}
}
fn make_block_name(fv_name: &String, id: MuID, label: &str) -> MuName {
format!("{}.#{}:{}", fv_name, id, label)
fn make_block_name(inst: &MuName, label: &str) -> MuName {
format!("{}:{}", inst, label)
}
This diff is collapsed.
......@@ -311,7 +311,7 @@ pub fn start_trace(
lo_space: Arc<FreeListSpace>
) {
// creates root deque
let (mut worker, stealer) = deque();
let (worker, stealer) = deque();
while !work_stack.is_empty() {
worker.push(work_stack.pop().unwrap());
......
......@@ -165,6 +165,17 @@ fn link_executable_internal(
cc.arg("-lm");
cc.arg("-lpthread");
cc.arg("-lz");
} else if cfg!(target_os = "macos") {
cc.arg("-liconv");
cc.arg("-framework");
cc.arg("Security");
cc.arg("-framework");
cc.arg("CoreFoundation");
cc.arg("-lz");
cc.arg("-lSystem");
cc.arg("-lresolv");
cc.arg("-lc");
cc.arg("-lm");
}
// all the source code
......
......@@ -27,7 +27,15 @@ extern void* rodal_realloc(void*, size_t);
extern uint32_t mu_retval;
extern void* STRUCT_TAG_MAP;
extern void* HYBRID_TAG_MAP;
extern void* STRUCT_TAG_MAP_LOC;
extern void* HYBRID_TAG_MAP_LOC;
int main(int argc, char** argv) {
STRUCT_TAG_MAP_LOC = &STRUCT_TAG_MAP;
HYBRID_TAG_MAP_LOC = &HYBRID_TAG_MAP;
rodal_init_deallocate();
mu_main(&RODAL_END, &vm, argc, argv);
return (int)mu_retval;
......
......@@ -177,4 +177,4 @@ use vm::built_info;
#[no_mangle]
pub extern "C" fn mu_get_version() -> *const c_char {
built_info::ZEBU_VERSION_C_STR.as_ptr()
}
\ No newline at end of file
}
#!/bin/bash
$(ls -t ./target/debug/lib-* | head -1) "$@"
#!/bin/bash
./target/release/lib-* "$@"
$(ls -t ./target/release/lib-* | head -1) "$@"
......@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(unused_macros)]
macro_rules! typedef {
// int, floating point
(($vm: expr) $name: ident = mu_int($len: expr)) => {
......
......@@ -4,4 +4,4 @@ use std::ffi::CStr;
#[test]
fn test_mu_get_version() {
println!("{:?}", unsafe { CStr::from_ptr(mu_get_version()) });
}
\ No newline at end of file
}
......@@ -29,4 +29,4 @@ mod test_mem_inst;
mod test_inline;
mod test_convop;
mod test_int128;
mod test_misc;
\ No newline at end of file
mod test_misc;
......@@ -114,3 +114,57 @@ fn truncate_then_call() -> VM {
vm
}
#[test]
fn test_bitcast_f32_to_u32() {
let lib = linkutils::aot::compile_fnc("bitcast_f32_to_u32", &bitcast_f32_to_u32);
unsafe {
use std::f32;
let bitcast_f32_to_u32: libloading::Symbol<unsafe extern "C" fn(f32) -> u32> =
lib.get(b"bitcast_f32_to_u32").unwrap();
let res = bitcast_f32_to_u32(f32::MAX);
println!("bitcast_f32_to_u32(f32::MAX) = {}", res);
assert!(res == 2139095039u32);
let res = bitcast_f32_to_u32(3.1415926f32);
println!("bitcast_f32_to_u32(PI) = {}", res);
assert!(res == 1078530010u32);
}
}
fn bitcast_f32_to_u32() -> VM {
let vm = VM::new();
typedef! ((vm) float = mu_float);
typedef! ((vm) u32 = mu_int(32));
funcsig! ((vm) sig = (float) -> (u32));
funcdecl! ((vm) <sig> bitcast_f32_to_u32);
funcdef! ((vm) <sig> bitcast_f32_to_u32 VERSION bitcast_f32_to_u32_v1);
// blk entry
block! ((vm, bitcast_f32_to_u32_v1) blk_entry);
ssa! ((vm, bitcast_f32_to_u32_v1) <float> f);
ssa! ((vm, bitcast_f32_to_u32_v1) <u32> i);
inst! ((vm, bitcast_f32_to_u32_v1) blk_entry_bitcast:
i = CONVOP (ConvOp::BITCAST) <float u32> f
);
inst! ((vm, bitcast_f32_to_u32_v1) blk_entry_ret:
RET (i)
);
define_block!((vm, bitcast_f32_to_u32_v1) blk_entry(f) {
blk_entry_bitcast, blk_entry_ret
});
define_func_ver!((vm) bitcast_f32_to_u32_v1 (entry: blk_entry) {
blk_entry
});
vm
}
......@@ -875,3 +875,113 @@ fn fp_arraysum() -> VM {
vm
}
#[test]
fn test_double_to_float() {
let lib = linkutils::aot::compile_fnc("double_to_float", &double_to_float);
unsafe {
use std::f64;
let double_to_float: libloading::Symbol<unsafe extern "C" fn(f64) -> f32> =
lib.get(b"double_to_float").unwrap();
let res = double_to_float(0f64);
println!("double_fo_float(0) = {}", res);
assert!(res == 0f32);
let res = double_to_float(1f64);
println!("double_fo_float(1) = {}", res);
assert!(res == 1f32);
let res = double_to_float(f64::MAX);
println!("double_to_float(f64::MAX) = {}", res);
assert!(res.is_infinite());
}
}
fn double_to_float() -> VM {
let vm = VM::new();
typedef! ((vm) double = mu_double);
typedef! ((vm) float = mu_float);
funcsig! ((vm) sig = (double) -> (float));
funcdecl! ((vm) <sig> double_to_float);
funcdef! ((vm) <sig> double_to_float VERSION double_to_float_v1);
// blk entry
block! ((vm, double_to_float_v1) blk_entry);
ssa! ((vm, double_to_float_v1) <double> d);
ssa! ((vm, double_to_float_v1) <float> f);
inst! ((vm, double_to_float_v1) blk_entry_fptrunc:
f = CONVOP (ConvOp::FPTRUNC) <double float> d
);
inst! ((vm, double_to_float_v1) blk_entry_ret:
RET (f)
);
define_block!((vm, double_to_float_v1) blk_entry(d) {
blk_entry_fptrunc, blk_entry_ret
});
define_func_ver!((vm) double_to_float_v1 (entry: blk_entry) {
blk_entry
});
vm
}
#[test]
fn test_float_to_double() {
let lib = linkutils::aot::compile_fnc("float_to_double", &float_to_double);
unsafe {
let float_to_double: libloading::Symbol<unsafe extern "C" fn(f32) -> f64> =
lib.get(b"float_to_double").unwrap();
let res = float_to_double(0f32);
println!("float_to_double(0) = {}", 0);
assert!(res == 0f64);
let res = float_to_double(1f32);
println!("float_to_double(1) = {}", 0);
assert!(res == 1f64);
}
}
fn float_to_double() -> VM {
let vm = VM::new();
typedef! ((vm) double = mu_double);
typedef! ((vm) float = mu_float);
funcsig! ((vm) sig = (float) -> (double));
funcdecl! ((vm) <sig> float_to_double);
funcdef! ((vm) <sig> float_to_double VERSION float_to_double_v1);
// blk entry
block! ((vm, float_to_double_v1) blk_entry);
ssa! ((vm, float_to_double_v1) <double> d);
ssa! ((vm, float_to_double_v1) <float> f);
inst! ((vm, float_to_double_v1) blk_entry_fpext:
d = CONVOP (ConvOp::FPEXT) <float double> f
);
inst! ((vm, float_to_double_v1) blk_entry_ret:
RET (d)
);
define_block!((vm, float_to_double_v1) blk_entry(f) {
blk_entry_fpext, blk_entry_ret
});
define_func_ver!((vm) float_to_double_v1 (entry: blk_entry) {
blk_entry
});
vm
}
......@@ -24,12 +24,6 @@ macro_rules! assert_type (
)
);
macro_rules! println_type (
($test:expr) => (
println!("{}", $test)
)
);
/// create one of each MuType
fn create_types() -> Vec<P<MuType>> {
let mut types = vec![];
......
......@@ -141,9 +141,8 @@ def fncptr_from_py_script(py_fnc, heapinit_fnc, name, argtypes=[], restype=ctype
# load libmu before rffi so to load it with RTLD_GLOBAL
libmu = preload_libmu()
loglvl = os.environ.get('MU_LOG_LEVEL', 'none')
emit_dir = kwargs.get('muemitdir', os.environ.get('MU_EMIT_DIR', 'emit'))
mu = rmu.MuVM("--log-level=%(loglvl)s --aot-emit-dir=%(emit_dir)s" % locals())
mu = rmu.MuVM("--aot-emit-dir=%(emit_dir)s" % locals())
ctx = mu.new_context()
bldr = ctx.new_ir_builder()
......
......@@ -123,3 +123,24 @@ def test_stack_pass_and_return():
}
""", "test_stack_pass_and_return");
assert(execute("test_stack_pass_and_return") == 44);
def test_double_inline():
lib = load_bundle(
"""
.funcsig new_sig = ()->(ref<void>)
.funcdef new_void <new_sig>
{
entry():
//res = NEW <ref<void>>
res = CCALL #DEFAULT <ufuncptr<new_sig> new_sig> <ufuncptr<new_sig>>EXTERN "malloc"()
RET res
}
.funcdef double_inline <()->(ref<void> ref<void>)>
{
entry():
a = CALL <()->(ref<void>)> new_void()
b = CALL <()->(ref<void>)> new_void()
RET (a b)
}
""", "test_double_inline");
Markdown is supported
0% or