...
 
Commits (7)
#![allow(unused_imports)]
use ast::ir::*;
use ast::inst::Instruction_::*;
use vm::context::VM;
use vm::VM;
use std::any::Any;
use compiler::CompilerPass;
......@@ -23,4 +27,8 @@ impl CompilerPass for InstructionSelection {
fn start_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
debug!("{}", self.name());
}
fn as_any(&self) -> &Any {
self
}
}
pub use inst_sel;
pub mod inst_sel;
pub const GPR_COUNT : usize = 16;
pub const FPR_COUNT : usize = 16;
......@@ -11,6 +11,7 @@ use compiler::backend::{Reg, Mem};
use compiler::backend::x86_64::check_op_len;
use compiler::machine_code::MachineCode;
use vm::VM;
use vm::target::*;
use runtime::ValueLocation;
use utils::vec_utils;
......@@ -2805,26 +2806,17 @@ impl CodeGenerator for ASMCodeGen {
self.add_asm_branch2(asm, dest_name);
}
#[cfg(target_os = "macos")]
fn emit_call_near_rel32(&mut self, callsite: String, func: MuName, pe: Option<MuName>) -> ValueLocation {
fn emit_call_near_rel32(&mut self, callsite: String, mut func: MuName, pe: Option<MuName>) -> ValueLocation {
trace!("emit: call {}", func);
let asm = format!("call {}", symbol(func));
self.add_asm_call(asm, pe);
let callsite_symbol = symbol(callsite.clone());
self.add_asm_symbolic(directive_globl(callsite_symbol.clone()));
self.add_asm_symbolic(format!("{}:", callsite_symbol.clone()));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
}
#[cfg(target_os = "linux")]
// generating Position-Independent Code using PLT
fn emit_call_near_rel32(&mut self, callsite: String, func: MuName, pe: Option<MuName>) -> ValueLocation {
trace!("emit: call {}", func);
let func = func + "@PLT";
if is_target_os(TargetOS::MacOS) {
// nothing special
} else if is_target_os(TargetOS::Linux) {
// generating Position-Independent Code using PLT
func = func + "@PLT";
} else {
unimplemented!()
}
let asm = format!("call {}", symbol(func));
self.add_asm_call(asm, pe);
......@@ -3231,23 +3223,25 @@ fn write_const_min_align(f: &mut File) {
write_align(f, MIN_ALIGN);
}
#[cfg(target_os = "linux")]
fn write_align(f: &mut File, align: ByteSize) {
use std::io::Write;
f.write_fmt(format_args!("\t.align {}\n", check_min_align(align))).unwrap();
}
#[cfg(target_os = "macos")]
fn write_align(f: &mut File, align: ByteSize) {
use std::io::Write;
use vm::target::*;
let align = check_min_align(align);
let mut n = 0;
while 2usize.pow(n) < align {
n += 1;
}
assert!(2usize.pow(n) == align, "alignment needs to be power of 2, alignment is {}", align);
if is_target_os(TargetOS::Linux) {
f.write_fmt(format_args!("\t.align {}\n", check_min_align(align))).unwrap();
} else if is_target_os(TargetOS::MacOS) {
use utils::math;
f.write_fmt(format_args!("\t.align {}\n", n)).unwrap();
let align = check_min_align(align);
let n = match math::is_power_of_two(align) {
Some(n) => n,
None => panic!("alignment needs to be power of 2, alignment is {}", align)
};
f.write_fmt(format_args!("\t.align {}\n", n)).unwrap();
} else {
unimplemented!()
}
}
fn write_const(f: &mut File, constant: P<Value>, loc: P<Value>) {
......@@ -3494,24 +3488,29 @@ fn directive_comm(name: String, size: ByteSize, align: ByteSize) -> String {
format!(".comm {},{},{}", name, size, align)
}
#[cfg(target_os = "linux")]
pub fn symbol(name: String) -> String {
name
}
#[cfg(target_os = "macos")]
pub fn symbol(name: String) -> String {
format!("_{}", name)
}
use vm::target::*;
#[allow(dead_code)]
#[cfg(target_os = "linux")]
pub fn pic_symbol(name: String) -> String {
format!("{}@GOTPCREL", name)
if is_target_os(TargetOS::Linux) {
name
} else if is_target_os(TargetOS::MacOS) {
format!("_{}", name)
} else {
unimplemented!()
}
}
#[allow(dead_code)]
#[cfg(target_os = "macos")]
pub fn pic_symbol(name: String) -> String {
symbol(name)
use vm::target::*;
if is_target_os(TargetOS::Linux) {
format!("{}@GOTPCREL", name)
} else if is_target_os(TargetOS::MacOS) {
symbol(name)
} else {
unimplemented!()
}
}
use compiler::machine_code::CompiledFunction;
......
......@@ -22,6 +22,8 @@ use compiler::backend::RegGroup;
use utils::LinkedHashMap;
use std::collections::HashMap;
pub const STACK_ALIGNMENT : usize = 16;
macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) -> $r32: ident, $r16: ident, $r8l: ident, $r8h: ident) => {
lazy_static!{
......
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::inst_sel::*;
// if aot, or (jit, and target_arch is x86_64)
// we include this module
#[cfg(any(feature = "aot", all(feature = "jit", target_arch = "x86_64")))]
pub use compiler::backend::x86_64::inst_sel as x86_64;
#[cfg(any(feature = "aot", all(feature = "jit", target_arch = "arm")))]
pub use compiler::backend::arm::inst_sel as arm;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::inst_sel::x86_64 as default;
#[cfg(target_arch = "arm")]
pub use compiler::backend::arm::inst_sel::*;
pub use compiler::backend::inst_sel::arm as default;
\ No newline at end of file
......@@ -9,9 +9,6 @@ use utils::math::align_up;
use runtime::mm;
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
pub type Word = usize;
pub const WORD_SIZE : ByteSize = 8;
pub const AOT_EMIT_CONTEXT_FILE : &'static str = "context.s";
// this is not full name, but pro/epilogue name is generated from this
......@@ -21,46 +18,35 @@ pub const EPILOGUE_BLOCK_NAME: &'static str = "epilogue";
pub type Reg<'a> = &'a P<Value>;
pub type Mem<'a> = &'a P<Value>;
// X86_64
#[cfg(target_arch = "x86_64")]
#[path = "arch/x86_64/mod.rs"]
pub mod x86_64;
#[path = "arch/arm/mod.rs"]
pub mod arm;
// X86_64
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::init_machine_regs_for_func;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_aliased;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_color_for_precolored;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::number_of_regs_in_group;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::number_of_all_regs;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_regs;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_usable_regs;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::pick_group_for_reg;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_callee_saved;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_code;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context_with_reloc;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::spill_rewrite;
pub use compiler::backend::x86_64 as default;
// ARM
#[cfg(target_arch = "arm")]
#[path = "arch/arm/mod.rs"]
mod arm;
pub use compiler::backend::arm as default;
// expected interface
pub use compiler::backend::default::estimate_insts_for_ir;
pub use compiler::backend::default::init_machine_regs_for_func;
pub use compiler::backend::default::is_aliased;
pub use compiler::backend::default::get_color_for_precolored;
pub use compiler::backend::default::number_of_regs_in_group;
pub use compiler::backend::default::number_of_all_regs;
pub use compiler::backend::default::all_regs;
pub use compiler::backend::default::all_usable_regs;
pub use compiler::backend::default::pick_group_for_reg;
pub use compiler::backend::default::is_callee_saved;
pub use compiler::backend::default::emit_code;
pub use compiler::backend::default::emit_context;
pub use compiler::backend::default::emit_context_with_reloc;
pub use compiler::backend::default::spill_rewrite;
// common data structure with target specific info
......
......@@ -13,6 +13,8 @@ use compiler::backend::reg_alloc::validate::exact_liveness::*;
const VERIFY_SPILLING : bool = false;
#[allow(unreachable_code)]
#[allow(unused_variables)]
pub fn validate_regalloc(cf: &CompiledFunction,
reg_assigned: LinkedHashMap<MuID, MuID>,
spill_scratch_regs: LinkedHashMap<MuID, MuID>)
......@@ -372,19 +374,24 @@ fn validate_spill_load(scratch_temp: MuID, source_temp: MuID, spill_loc: P<Value
}
}
#[cfg(target_arch = "x86_64")]
fn add_machine_specific_regs_at_func_start(alive: &mut AliveEntries) {
use compiler::backend::x86_64;
// RIP, RSP, RBP always have valid values
alive.new_alive_reg(x86_64::RIP.id());
alive.new_alive_reg(x86_64::RSP.id());
alive.new_alive_reg(x86_64::RBP.id());
// callee saved regs are alive
alive.new_alive_reg(x86_64::RBX.id());
alive.new_alive_reg(x86_64::R12.id());
alive.new_alive_reg(x86_64::R13.id());
alive.new_alive_reg(x86_64::R14.id());
alive.new_alive_reg(x86_64::R15.id());
use vm::target::*;
if is_target_arch(TargetArch::X86_64) {
use compiler::backend::x86_64;
// RIP, RSP, RBP always have valid values
alive.new_alive_reg(x86_64::RIP.id());
alive.new_alive_reg(x86_64::RSP.id());
alive.new_alive_reg(x86_64::RBP.id());
// callee saved regs are alive
alive.new_alive_reg(x86_64::RBX.id());
alive.new_alive_reg(x86_64::R12.id());
alive.new_alive_reg(x86_64::R13.id());
alive.new_alive_reg(x86_64::R14.id());
alive.new_alive_reg(x86_64::R15.id());
} else {
unimplemented!()
}
}
\ No newline at end of file
......@@ -6,6 +6,9 @@ use runtime::ValueLocation;
use std::fmt;
use std::collections::HashMap;
use vm::VM;
use vm::target::*;
use compiler::backend;
// | previous frame ...
// |---------------
......@@ -59,15 +62,25 @@ impl Frame {
}
}
#[cfg(target_arch = "x86_64")]
#[cfg(feature = "aot")]
pub fn cur_size(&self) -> usize {
// frame size is a multiple of 16 bytes
let size = self.cur_offset.abs() as usize;
if is_target_arch(TargetArch::X86_64) {
self.aligned_size(backend::x86_64::STACK_ALIGNMENT)
} else {
unimplemented!()
}
}
// align size to a multiple of 16 bytes
let size = (size + 16 - 1) & !(16 - 1);
#[cfg(feature = "jit")]
pub fn cur_size(&self) -> usize {
self.aligned_size(backend::default::STACK_ALIGNMENT)
}
fn aligned_size(&self, align: usize) -> usize {
let size = self.cur_offset.abs() as usize;
let size = (size + align - 1) & !(align - 1);
debug_assert!(size % 16 == 0);
debug_assert!(size % align == 0);
size
}
......@@ -103,17 +116,12 @@ impl Frame {
self.exception_callsites.push((callsite, dest));
}
#[cfg(target_arch = "x86_64")]
fn alloc_slot(&mut self, val: &P<Value>, vm: &VM) -> &FrameSlot {
// RBP is 16 bytes aligned, we are offsetting from RBP
// every value should be properly aligned
let backendty = vm.get_backend_type_info(val.ty.id());
if backendty.alignment > 16 {
unimplemented!()
}
self.cur_offset -= backendty.size as isize;
{
......@@ -151,15 +159,38 @@ impl fmt::Display for FrameSlot {
}
impl FrameSlot {
#[cfg(feature = "aot")]
pub fn make_memory_op(&self, ty: P<MuType>, vm: &VM) -> P<Value> {
if is_target_arch(TargetArch::X86_64) {
use compiler::backend::x86_64;
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty.clone(),
v: Value_::Memory(
MemoryLocation::Address {
base: x86_64::RBP.clone(),
offset: Some(Value::make_int_const(vm.next_id(), self.offset as u64)),
index: None,
scale: None
}
)
})
} else {
unimplemented!()
}
}
#[cfg(feature = "jit")]
#[cfg(target_arch = "x86_64")]
pub fn make_memory_op(&self, ty: P<MuType>, vm: &VM) -> P<Value> {
use compiler::backend::x86_64;
P(Value{
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty.clone(),
v: Value_::Memory(
MemoryLocation::Address{
MemoryLocation::Address {
base: x86_64::RBP.clone(),
offset: Some(Value::make_int_const(vm.next_id(), self.offset as u64)),
index: None,
......
......@@ -65,29 +65,6 @@ impl CompilerPolicy {
}
}
impl Default for CompilerPolicy {
fn default() -> Self {
let mut passes : Vec<Box<CompilerPass>> = vec![];
passes.push(Box::new(passes::Inlining::new()));
// ir level passes
passes.push(Box::new(passes::DefUse::new()));
passes.push(Box::new(passes::TreeGen::new()));
passes.push(Box::new(passes::GenMovPhi::new()));
passes.push(Box::new(passes::ControlFlowAnalysis::new()));
passes.push(Box::new(passes::TraceGen::new()));
// compilation
passes.push(Box::new(backend::inst_sel::InstructionSelection::new()));
passes.push(Box::new(backend::reg_alloc::RegisterAllocation::new()));
// machine code level passes
passes.push(Box::new(backend::peephole_opt::PeepholeOptimization::new()));
passes.push(Box::new(backend::code_emission::CodeEmission::new()));
CompilerPolicy{passes: passes}
}
}
// rewrite parts of the hprof crates to print via log (instead of print!())
use self::hprof::ProfileNode;
use std::rc::Rc;
......
use utils;
use ast::ir::*;
use vm::VM;
use compiler::backend::Word;
use vm::target::*;
use compiler::backend::RegGroup;
use utils::Address;
use utils::Word;
use std::fmt;
use std::os::raw::c_int;
......@@ -159,15 +160,20 @@ impl ValueLocation {
}
#[allow(unused_variables)]
pub fn from_constant(c: Constant) -> ValueLocation {
pub fn from_constant_le(c: Constant) -> ValueLocation {
match c {
Constant::Int(int_val) => ValueLocation::Constant(RegGroup::GPR, utils::mem::u64_to_raw(int_val)),
Constant::Float(f32_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f32_to_raw(f32_val)),
Constant::Double(f64_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f64_to_raw(f64_val)),
Constant::Int(int_val) => ValueLocation::Constant(RegGroup::GPR, utils::mem::u64_to_raw_le(int_val)),
Constant::Float(f32_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f32_to_raw_le(f32_val)),
Constant::Double(f64_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f64_to_raw_le(f64_val)),
_ => unimplemented!()
}
}
#[allow(unused_variables)]
pub fn from_constant_be(c: Constant) -> ValueLocation {
unimplemented!()
}
pub fn to_address(&self) -> Address {
match self {
......@@ -211,18 +217,23 @@ pub extern fn mu_main(serialized_vm : *const c_char, argc: c_int, argv: *const *
// create mu stack
let stack = vm.new_stack(primordial.func_id);
let from_constant = match vm.vm_options.flag_target_arch {
TargetArch::X86_64 => ValueLocation::from_constant_le,
_ => unimplemented!()
};
// if the primordial named some const arguments, we use the const args
// otherwise we push 'argc' and 'argv' to new stack
let args : Vec<ValueLocation> = if primordial.has_const_args {
primordial.args.iter().map(|arg| ValueLocation::from_constant(arg.clone())).collect()
primordial.args.iter().map(|arg| from_constant(arg.clone())).collect()
} else {
let mut args = vec![];
// 1st arg: argc
args.push(ValueLocation::from_constant(Constant::Int(argc as u64)));
args.push(from_constant(Constant::Int(argc as u64)));
// 2nd arg: argv
args.push(ValueLocation::from_constant(Constant::Int(argv as u64)));
args.push(from_constant(Constant::Int(argv as u64)));
args
};
......
......@@ -20,9 +20,4 @@ void set_thread_local(void* thread) {
void* muentry_get_thread_local() {
// printf("Thread%p: getting mu_tls as %p\n", (void*) pthread_self(), mu_tls);
return mu_tls;
}
void* resolve_symbol(const char* sym) {
// printf("%s\n", sym);
return dlsym(RTLD_DEFAULT, sym);
}
}
\ No newline at end of file
......@@ -4,9 +4,11 @@ use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
use vm::VM;
use vm::target::*;
use runtime;
use runtime::ValueLocation;
use runtime::mm;
use vm::target;
use utils::ByteSize;
use utils::Address;
......@@ -22,9 +24,6 @@ use std::fmt;
pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb
#[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE : ByteSize = (4 << 10); // 4kb
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);
......@@ -58,24 +57,25 @@ pub struct MuStack {
impl MuStack {
pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
let total_size = PAGE_SIZE * 2 + STACK_SIZE;
let page_size = target::page_size();
let total_size = page_size * 2 + STACK_SIZE;
let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
Ok(m) => m,
Err(_) => panic!("failed to mmap for a stack"),
};
let mmap_start = Address::from_ptr(anon_mmap.ptr());
debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
debug_assert!(mmap_start.is_aligned_to(page_size));
let overflow_guard = mmap_start;
let lower_bound = mmap_start.plus(PAGE_SIZE);
let lower_bound = mmap_start.plus(page_size);
let upper_bound = lower_bound.plus(STACK_SIZE);
let underflow_guard = upper_bound;
unsafe {
memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(), page_size, memsec::Prot::NoAccess);
memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), page_size, memsec::Prot::NoAccess);
}
debug!("creating stack {} with entry func {:?}", id, func);
......@@ -103,65 +103,66 @@ impl MuStack {
mmap: Some(anon_mmap)
}
}
#[cfg(target_arch = "x86_64")]
pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
use compiler::backend::Word;
use compiler::backend::WORD_SIZE;
use compiler::backend::RegGroup;
use compiler::backend::x86_64;
let mut gpr_used = vec![];
let mut fpr_used = vec![];
for i in 0..vals.len() {
let ref val = vals[i];
let (reg_group, word) = val.load_value();
match reg_group {
RegGroup::GPR => gpr_used.push(word),
RegGroup::FPR => fpr_used.push(word),
}
}
let mut stack_ptr = self.sp;
for i in 0..x86_64::ARGUMENT_FPRs.len() {
stack_ptr = stack_ptr.sub(WORD_SIZE);
let val = {
if i < fpr_used.len() {
fpr_used[i]
} else {
0 as Word
}
};
debug!("store {} to {}", val, stack_ptr);
unsafe {stack_ptr.store(val);}
}
for i in 0..x86_64::ARGUMENT_GPRs.len() {
stack_ptr = stack_ptr.sub(WORD_SIZE);
let val = {
if i < gpr_used.len() {
gpr_used[i]
} else {
0 as Word
if is_target_arch(TargetArch::X86_64) {
use utils::{Word, WORD_SIZE};
use compiler::backend::RegGroup;
use compiler::backend::x86_64;
let mut gpr_used = vec![];
let mut fpr_used = vec![];
for i in 0..vals.len() {
let ref val = vals[i];
let (reg_group, word) = val.load_value();
match reg_group {
RegGroup::GPR => gpr_used.push(word),
RegGroup::FPR => fpr_used.push(word),
}
};
debug!("store {} to {}", val, stack_ptr);
unsafe {stack_ptr.store(val);}
}
}
// save it back
self.sp = stack_ptr;
self.print_stack(Some(20));
let mut stack_ptr = self.sp;
for i in 0..x86_64::ARGUMENT_FPRs.len() {
stack_ptr = stack_ptr.sub(WORD_SIZE);
let val = {
if i < fpr_used.len() {
fpr_used[i]
} else {
0 as Word
}
};
debug!("store {} to {}", val, stack_ptr);
unsafe { stack_ptr.store(val); }
}
for i in 0..x86_64::ARGUMENT_GPRs.len() {
stack_ptr = stack_ptr.sub(WORD_SIZE);
let val = {
if i < gpr_used.len() {
gpr_used[i]
} else {
0 as Word
}
};
debug!("store {} to {}", val, stack_ptr);
unsafe { stack_ptr.store(val); }
}
// save it back
self.sp = stack_ptr;
self.print_stack(Some(20));
} else {
unimplemented!()
}
}
pub fn print_stack(&self, n_entries: Option<usize>) {
use compiler::backend::Word;
use compiler::backend::WORD_SIZE;
use utils::{Word, WORD_SIZE};
let mut cursor = self.upper_bound.sub(WORD_SIZE);
let mut count = 0;
......@@ -234,16 +235,12 @@ impl fmt::Display for MuThread {
}
}
#[cfg(target_arch = "x86_64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "runtime")]
extern "C" {
pub fn set_thread_local(thread: *mut MuThread);
pub fn muentry_get_thread_local() -> Address;
}
#[cfg(target_arch = "x86_64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "swap_stack")]
extern "C" {
fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
......
......@@ -137,6 +137,8 @@ pub fn link_primordial (funcs: Vec<MuName>, out: &str, vm: &VM) -> PathBuf {
// include the primordial C main
ret.push(dest);
// build mu static lib
// include mu static lib
let libmu_path = if cfg!(debug_assertions) {
"target/debug/libmu.a"
......
......@@ -63,21 +63,23 @@ pub fn get_path_under_mu(str: &'static str) -> PathBuf {
}
}
#[cfg(target_os = "macos")]
pub fn get_dylib_name(name: &'static str) -> String {
format!("lib{}.dylib", name)
}
#[cfg(target_os = "linux")]
pub fn get_dylib_name(name: &'static str) -> String {
format!("lib{}.so", name)
use vm::target::*;
if is_target_os(TargetOS::MacOS) {
format!("lib{}.dylib", name)
} else if is_target_os(TargetOS::Linux) {
format!("lib{}.so", name)
} else {
unimplemented!()
}
}
pub fn compile_fnc<'a>(fnc_name: &'static str, build_fnc: &'a Fn() -> VM) -> ll::Library {
VM::start_logging_trace();
let vm = Arc::new(build_fnc());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of(fnc_name);
{
let funcs = vm.funcs().read().unwrap();
......@@ -108,7 +110,7 @@ pub fn compile_fncs<'a>(entry: &'static str, fnc_names: Vec<&'static str>, build
VM::start_logging_trace();
let vm = Arc::new(build_fnc());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
for func in fnc_names.iter() {
let func_id = vm.id_of(func);
......
......@@ -4,13 +4,12 @@ extern crate rustc_serialize;
pub type BitSize = usize;
pub type ByteOffset = isize;
pub type ByteSize = usize;
pub type Word = usize;
#[cfg(target_arch = "x86_64")]
// assumption: we only care about 64-bits machines
pub type Word = u64;
pub const LOG_POINTER_SIZE : usize = 3;
pub const POINTER_SIZE : ByteSize = 1 << LOG_POINTER_SIZE;
pub const WORD_SIZE : ByteSize = 1 << LOG_POINTER_SIZE;
pub const WORD_SIZE : ByteSize = POINTER_SIZE;
pub mod mem;
......
......@@ -5,30 +5,26 @@ use Word;
#[allow(unused_imports)]
use byteorder::{LittleEndian, BigEndian, ReadBytesExt, WriteBytesExt, ByteOrder};
#[cfg(target_arch = "x86_64")]
pub fn u64_to_raw(val: u64) -> Word {
pub fn u64_to_raw_le(val: u64) -> Word {
let mut ret = vec![];
ret.write_u64::<LittleEndian>(val).unwrap();
as_word(ret)
as_word_le(ret)
}
#[cfg(target_arch = "x86_64")]
pub fn f32_to_raw(val: f32) -> Word {
pub fn f32_to_raw_le(val: f32) -> Word {
let mut ret = vec![];
ret.write_f32::<LittleEndian>(val).unwrap();
as_word(ret)
as_word_le(ret)
}
#[cfg(target_arch = "x86_64")]
pub fn f64_to_raw(val: f64) -> Word {
pub fn f64_to_raw_le(val: f64) -> Word {
let mut ret = vec![];
ret.write_f64::<LittleEndian>(val).unwrap();
as_word(ret)
as_word_le(ret)
}
#[cfg(target_arch = "x86_64")]
pub fn as_word(mut u8_array: Vec<u8>) -> Word {
fn as_word_le(mut u8_array: Vec<u8>) -> Word {
LittleEndian::read_uint(&mut u8_array, 8) as Word
}
......
......@@ -80,7 +80,7 @@ impl MuVM {
use compiler::*;
use testutil::aot;
let compiler = Compiler::new(CompilerPolicy::default(), &self.vm);
let compiler = Compiler::new(self.vm.default_compiler_policy(), &self.vm);
let funcs = self.vm.funcs().read().unwrap();
let mut func_names = vec![];
// NOTE: this fails because load() API call is not properly implemented yet.
......
mod vm;
mod vm_options;
pub mod target;
pub mod api;
pub mod handle;
......
pub use vm::vm_options::TargetArch;
pub use vm::vm_options::TargetOS;
use vm::VMOptions;
use utils::ByteSize;
pub fn set_arch_config(opt: &VMOptions) {
TARGET_ARCH.store(opt.flag_target_arch as usize, Ordering::Relaxed);
TARGET_OS.store(opt.flag_target_os as usize, Ordering::Relaxed);
}
// provide a way to get arch/os without having a reference to VM
use std::sync::atomic::Ordering;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
static TARGET_ARCH : AtomicUsize = ATOMIC_USIZE_INIT;
static TARGET_OS : AtomicUsize = ATOMIC_USIZE_INIT;
pub fn is_target_arch(expect_arch: TargetArch) -> bool {
let arch = TARGET_ARCH.load(Ordering::Relaxed);
arch == expect_arch as usize
}
pub fn is_target_os(expect_os: TargetOS) -> bool {
let os = TARGET_OS.load(Ordering::Relaxed);
os == expect_os as usize
}
// page size
const X86_64_PAGE_SIZE : ByteSize = (4 << 10); // 4kb
#[cfg(feature = "aot")]
pub fn page_size() -> ByteSize {
if is_target_arch(TargetArch::X86_64) {
X86_64_PAGE_SIZE
} else {
unimplemented!()
}
}
#[cfg(all(feature = "jit", target_arch = "x86_64"))]
#[inline(always)]
pub fn page_size() -> ByteSize {
X86_64_PAGE_SIZE
}
\ No newline at end of file
......@@ -18,6 +18,7 @@ use runtime::mm as gc;
use vm::handle::*;
use vm::vm_options::VMOptions;
use vm::vm_options::MuLogLevel;
use vm::target::*;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use log::LogLevel;
......@@ -483,6 +484,11 @@ impl <'a> VM {
let ref options = self.vm_options;
gc::gc_init(options.flag_gc_immixspace_size, options.flag_gc_lospace_size, options.flag_gc_nthreads, !options.flag_gc_disable_collection);
}
// init target arch/os
{
set_arch_config(&self.vm_options);
}
}
fn start_logging(level: MuLogLevel) {
......@@ -557,6 +563,44 @@ impl <'a> VM {
vm
}
pub fn default_compiler_policy(&self) -> CompilerPolicy {
use compiler::*;
let mut passes : Vec<Box<CompilerPass>> = vec![];
passes.push(Box::new(passes::Inlining::new()));
// ir level passes
passes.push(Box::new(passes::DefUse::new()));
passes.push(Box::new(passes::TreeGen::new()));
passes.push(Box::new(passes::GenMovPhi::new()));
passes.push(Box::new(passes::ControlFlowAnalysis::new()));
passes.push(Box::new(passes::TraceGen::new()));
// compilation
if cfg!(feature = "aot") {
use vm::vm_options::*;
match self.vm_options.flag_target_arch {
TargetArch::X86_64 => {
passes.push(Box::new(backend::inst_sel::x86_64::InstructionSelection::new()));
}
TargetArch::ARM => {
passes.push(Box::new(backend::inst_sel::arm::InstructionSelection::new()));
}
TargetArch::Default => {
passes.push(Box::new(backend::inst_sel::default::InstructionSelection::new()));
}
}
} else {
passes.push(Box::new(backend::inst_sel::default::InstructionSelection::new()));
}
passes.push(Box::new(backend::reg_alloc::RegisterAllocation::new()));
// machine code level passes
passes.push(Box::new(backend::peephole_opt::PeepholeOptimization::new()));
passes.push(Box::new(backend::code_emission::CodeEmission::new()));
CompilerPolicy{passes: passes}
}
pub fn next_id(&self) -> MuID {
// This only needs to be atomic, and does not need to be a synchronisation operation. The
......@@ -988,7 +1032,7 @@ impl <'a> VM {
trace!("Making boot image...");
let whitelist_funcs = {
let compiler = Compiler::new(CompilerPolicy::default(), self);
let compiler = Compiler::new(self.default_compiler_policy(), self);
let funcs = self.funcs().read().unwrap();
let func_vers = self.func_vers().read().unwrap();
......
......@@ -2,9 +2,10 @@ extern crate rustc_serialize;
extern crate docopt;
use self::docopt::Docopt;
use std::default::Default;
use vm::target::set_arch_config;
const USAGE: &'static str = "
zebu (mu implementation). Pass arguments as a strings to init it.
......@@ -25,6 +26,9 @@ AOT Compiler:
--bootimage-external-lib=<lib> ... library that will be linked against when making bootimage [default: ]
--bootimage-external-libpath=<path> ... path for the libraries during bootimage generation [default: ]
--target-arch=<arch> target architecture (x86_64, arm) for cross compilation [default: default]
--target-os=<os> target os (linux, macos) for cross compilation [default: default]
Garbage Collection:
--gc-disable-collection disable collection
--gc-immixspace-size=<kb> immix space size (default 65536kb = 64mb) [default: 67108864]
......@@ -47,6 +51,9 @@ pub struct VMOptions {
pub flag_bootimage_external_lib: Vec<String>,
pub flag_bootimage_external_libpath: Vec<String>,
pub flag_target_arch: TargetArch,
pub flag_target_os : TargetOS,
// GC
pub flag_gc_disable_collection: bool,
pub flag_gc_immixspace_size: usize,
......@@ -59,6 +66,21 @@ pub enum MuLogLevel {
None, Error, Warn, Info, Debug, Trace
}
#[allow(non_camel_case_types)]
#[derive(Debug, Clone, Copy, RustcDecodable, RustcEncodable, PartialEq, Eq)]
pub enum TargetArch {
Default = 0, // same as host arch
X86_64 = 1,
ARM = 2
}
#[derive(Debug, Clone, Copy, RustcDecodable, RustcEncodable, PartialEq, Eq)]
pub enum TargetOS {
Default = 0, // same as host os
Linux = 1,
MacOS = 2
}
impl VMOptions {
pub fn init(str: &str) -> VMOptions {
info!("init vm options with: {:?}", str);
......@@ -76,8 +98,36 @@ impl VMOptions {
// always disable register validation
ret.flag_disable_regalloc_validate = true;
// target-arch/os should only be used with 'aot' build option
if !cfg!(feature = "aot") {
assert!(ret.flag_target_arch == TargetArch::Default, "--target-arch option is only supported with build option aot");
assert!(ret.flag_target_os == TargetOS::Default, "--target-os option is only supported with build option aot");
}
// if target-arch/os is default, set them depending on running platform
if ret.flag_target_arch == TargetArch::Default {
if cfg!(target_arch = "x86_64") {
ret.flag_target_arch = TargetArch::X86_64;
} else if cfg!(target_arch = "arm") {
ret.flag_target_arch = TargetArch::ARM;
}
}
if ret.flag_target_os == TargetOS::Default {
if cfg!(target_os = "linux") {
ret.flag_target_os = TargetOS::Linux;
} else if cfg!(target_os = "macos") {
ret.flag_target_os = TargetOS::MacOS;
}
}
// set globals for arch/os
set_arch_config(&ret);
ret
}
}
impl Default for VMOptions {
......
pub mod deps {
pub use mu::ast::ptr::*;
pub use mu::ast::ir::*;
pub use mu::ast::inst::*;
pub use mu::ast::types;
pub use mu::ast::types::*;
pub use mu::vm::*;
pub use std::sync::Arc;
pub use std::sync::RwLock;
pub use mu::utils::LinkedHashMap;
}
macro_rules! typedef {
// int, floating point
(($vm: expr) $name: ident = mu_int($len: expr)) => {
......
......@@ -13,4 +13,7 @@ mod test_controlflow;
mod test_call;
mod test_mem_inst;
mod test_inline;
mod test_convop;
\ No newline at end of file
mod test_convop;
#[cfg(feature = "aot")]
mod test_cross_compile;
\ No newline at end of file
......@@ -22,7 +22,7 @@ fn test_allocation_fastpath() {
let vm = Arc::new(allocation_fastpath());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("allocation_fastpath");
{
......@@ -96,7 +96,7 @@ fn test_instruction_new() {
let vm = Arc::new(alloc_new());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("alloc_new");
{
......@@ -126,7 +126,7 @@ fn test_instruction_new_on_cur_thread() {
// compile
let vm = Arc::new(alloc_new());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("alloc_new");
{
let funcs = vm.funcs().read().unwrap();
......
......@@ -20,7 +20,7 @@ fn test_ccall_exit() {
let vm = Arc::new(ccall_exit());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("ccall_exit");
{
......@@ -103,7 +103,7 @@ fn test_pass_1arg_by_stack() {
VM::start_logging_trace();
let vm = Arc::new(pass_1arg_by_stack());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_foo = vm.id_of("foo7");
let func_main = vm.id_of("pass_1arg_by_stack");
......@@ -239,7 +239,7 @@ fn test_pass_2args_by_stack() {
VM::start_logging_trace();
let vm = Arc::new(pass_2args_by_stack());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_foo = vm.id_of("foo8");
let func_main = vm.id_of("pass_2args_by_stack");
......@@ -379,7 +379,7 @@ fn test_pass_2_int8_args_by_stack() {
VM::start_logging_trace();
let vm = Arc::new(pass_2_int8_args_by_stack());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_foo = vm.id_of("foo8");
let func_main = vm.id_of("pass_2_int8_args_by_stack");
......@@ -528,7 +528,7 @@ fn test_pass_mixed_args_by_stack() {
VM::start_logging_trace();
let vm = Arc::new(pass_mixed_args_by_stack());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_foo = vm.id_of("foo8");
let func_main = vm.id_of("pass_mixed_args_by_stack");
......
use ir_macros::deps::*;
#[test]
#[cfg(feature = "aot")]
#[cfg(all(target_os = "macos", target_arch="x86_64"))]
fn test_cross_compile_mac_to_linux() {
VM::start_logging_trace();
let vm = Arc::new(VM::new_with_opts("init_mu --target-arch=x86_64 --target-os=linux"));
build_dummy_main(&vm);
let dummy_main_id = vm.id_of("dummy_main");
let dummy_main_handle = vm.handle_from_func(dummy_main_id);
vm.make_boot_image(
vec![dummy_main_id], // white list
Some(&dummy_main_handle), // primordial_func
None, // primordial_stack
None, // primordial_threadlocal
vec![], vec![], // sym fields/strings
vec![], vec![], // reloc fields/strings
String::from("dummy_main-x86_64-linux")
);
}
fn build_dummy_main(vm: &VM) {
funcsig! ((vm) sig = () -> ());
funcdecl! ((vm) <sig> dummy_main);
funcdef! ((vm) <sig> dummy_main VERSION dummy_main_v1);
block! ((vm, dummy_main_v1) blk_entry);
inst! ((vm, dummy_main_v1) blk_entry_threadexit:
THREADEXIT
);
define_block!((vm, dummy_main_v1) blk_entry() {
blk_entry_threadexit
});
define_func_ver!((vm) dummy_main_v1 (entry: blk_entry) {
blk_entry
});
}
\ No newline at end of file
......@@ -21,7 +21,7 @@ fn test_exception_throw_catch_simple() {
VM::start_logging_trace();
let vm = Arc::new(throw_catch_simple());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_throw = vm.id_of("throw_exception");
let func_catch = vm.id_of("catch_exception");
......@@ -183,7 +183,7 @@ fn test_exception_throw_catch_dont_use_exception_arg() {
VM::start_logging_trace();
let vm = Arc::new(throw_catch_dont_use_exception_arg());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_throw = vm.id_of("throw_exception");
let func_catch = vm.id_of("catch_exception");
......@@ -228,7 +228,7 @@ fn test_exception_throw_catch_and_add() {
VM::start_logging_trace();
let vm = Arc::new(throw_catch_and_add());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_throw = vm.id_of("throw_exception");
let func_catch = vm.id_of("catch_and_add");
......@@ -441,7 +441,7 @@ fn test_exception_throw_catch_twice() {
VM::start_logging_trace();
let vm = Arc::new(throw_catch_twice());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_throw = vm.id_of("throw_exception");
let func_catch = vm.id_of("catch_twice");
......
......@@ -30,7 +30,7 @@ fn test_global_access() {
}
global_access(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
{
let func_id = vm.id_of("global_access");
......@@ -54,7 +54,7 @@ fn test_set_global_by_api() {
}
set_global_by_api(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("set_global_by_api");
{
......@@ -136,7 +136,7 @@ fn test_get_global_in_dylib() {
}
get_global_in_dylib(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("get_global_in_dylib");
{
......@@ -218,7 +218,7 @@ fn test_persist_linked_list() {
}
persist_linked_list(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("persist_linked_list");
{
......@@ -429,7 +429,7 @@ fn test_persist_hybrid() {
}
persist_hybrid(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("persist_hybrid");
{
......@@ -649,7 +649,7 @@ fn test_persist_funcref() {
}
persist_funcref(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_ret42_id = vm.id_of("ret42");
{
......
......@@ -17,7 +17,7 @@ fn test_instsel_fac() {
Box::new(passes::TreeGen::new()),
Box::new(passes::ControlFlowAnalysis::new()),
Box::new(passes::TraceGen::new()),
Box::new(backend::inst_sel::InstructionSelection::new())
Box::new(backend::inst_sel::default::InstructionSelection::new())
]), &vm);
let func_id = vm.id_of("fac");
......
......@@ -275,7 +275,7 @@ fn test_struct() {
let vm = Arc::new(struct_insts_macro());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("struct_insts");
{
......@@ -530,7 +530,7 @@ fn test_hybrid_fix_part() {
let vm = Arc::new(hybrid_fix_part_insts());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("hybrid_fix_part_insts");
{
......@@ -687,7 +687,7 @@ fn test_hybrid_var_part() {
let vm = Arc::new(hybrid_var_part_insts());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("hybrid_var_part_insts");
{
......
......@@ -44,7 +44,7 @@ fn test_ir_liveness_fac() {
Box::new(passes::TreeGen::new()),
Box::new(passes::ControlFlowAnalysis::new()),
Box::new(passes::TraceGen::new()),
Box::new(backend::inst_sel::InstructionSelection::new()),
Box::new(backend::inst_sel::default::InstructionSelection::new()),
]), &vm);
let func_id = vm.id_of("fac");
......@@ -97,7 +97,7 @@ fn test_spill1() {
let vm = Arc::new(create_spill1());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("spill1");
{
......@@ -211,7 +211,7 @@ fn test_simple_spill() {
let vm = Arc::new(create_simple_spill());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("simple_spill");
{
......@@ -392,7 +392,7 @@ fn test_coalesce_branch_moves() {
let vm = Arc::new(coalesce_branch_moves());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("coalesce_branch_moves");
{
......@@ -460,7 +460,7 @@ fn test_coalesce_args() {
let vm = Arc::new(coalesce_args());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("coalesce_args");
{
......@@ -520,7 +520,7 @@ fn test_coalesce_branch2_moves() {
let vm = Arc::new(coalesce_branch2_moves());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("coalesce_branch2_moves");
{
......@@ -654,7 +654,7 @@ fn test_preserve_caller_saved_simple() {
VM::start_logging_trace();
let vm = Arc::new(preserve_caller_saved_simple());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_foo = vm.id_of("foo");
let func_preserve_caller_saved_simple = vm.id_of("preserve_caller_saved_simple");
......@@ -862,7 +862,7 @@ fn test_preserve_caller_saved_call_args() {
VM::start_logging_trace();
let vm = Arc::new(preserve_caller_saved_call_args());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_foo = vm.id_of("foo6");
let func_preserve_caller_saved_simple = vm.id_of("preserve_caller_saved_call_args");
......@@ -1075,7 +1075,7 @@ fn test_spill_int8() {
let vm = Arc::new(spill_int8());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("spill_int8");
{
......
......@@ -20,7 +20,7 @@ fn test_thread_create() {
let vm = Arc::new(primordial_main());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let compiler = Compiler::new(vm.default_compiler_policy(), &vm);
let func_id = vm.id_of("primordial_main");
{
......
RPySOM @ 9f8c67fc
Subproject commit 9f8c67fc040c92597016f296910cb1d97824aa04