GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit a5745df4 authored by qinsoon's avatar qinsoon

runtime module

parent 9fd01dde
Pipeline #643 failed with stages
in 11 minutes and 45 seconds
......@@ -17,30 +17,19 @@ extern crate gcc;
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[cfg(target_arch = "x86_64")]
fn main() {
gcc::compile_library("libruntime.a", &["src/runtime/runtime_x64_sysv.c"]);
gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_x64_sysv.c"]);
gcc::Config::new().flag("-O3").flag("-c")
.file("src/runtime/swap_stack_x64_sysv.S")
.compile("libswap_stack.a");
.file("src/runtime/runtime_asm_x64_sysv.S")
.compile("libruntime_asm.a");
}
#[cfg(target_os = "linux")]
#[cfg(target_arch = "aarch64")]
fn main() {
gcc::compile_library("libruntime.a", &["src/runtime/runtime_aarch64_sysv.c"]);
gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_aarch64_sysv.c"]);
gcc::Config::new().flag("-O3").flag("-c")
.file("src/runtime/swap_stack_aarch64_sysv.S")
.compile("libswap_stack.a");
}
// This is here to enable cross compiling from windows/x86_64 to linux/aarch64
#[cfg(target_os = "windows")]
#[cfg(target_arch = "x86_64")]
fn main() {
gcc::compile_library("libruntime.a", &["src/runtime/runtime_aarch64_sysv.c"]);
gcc::Config::new().flag("-O3").flag("-c")
.file("src/runtime/swap_stack_aarch64_sysv.S")
.compile("libswap_stack.a");
.file("src/runtime/runtime_asm_aarch64_sysv.S")
.compile("libruntime_asm.a");
}
\ No newline at end of file
......@@ -50,74 +50,72 @@ pub mod x86_64;
/// estimates how many machine instructions are needed for a Mu instruction
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
/// initializes machine registers in the function context
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::init_machine_regs_for_func;
/// checks if two machine registers are alias (the same register)
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_aliased;
/// gets color for a machine register (e.g. AH, AX, EAX all have color of RAX)
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_color_for_precolored;
/// returns the number of registers in a given RegGroup
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::number_of_regs_in_group;
/// returns the number of all machine registers
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::number_of_all_regs;
/// returns a hashmap of all the machine registers
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_regs;
/// returns all usable registers (machine registers that can be assigned to temporaries)
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_usable_regs;
/// returns RegGroup for a machine register
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::pick_group_for_reg;
/// checks if a register is callee saved
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_callee_saved;
/// emits code for a function version (the function needs to be compiled first)
/// number of callee saved registers
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::CALLEE_SAVED_COUNT;
/// gets offset for callee saved registers (used for exception table)
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_callee_saved_offset;
/// gets frame pointer for previous frame
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_frame_pointer;
/// gets return address for current frame
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_return_address;
/// sets frame pointer for previous frame
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_previous_frame_pointer;
/// sets return address for current frame
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_return_address;
/// gets staci pointer for previous frame
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_stack_pointer;
/// emits code for a function version (the function needs to be compiled first)
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_code;
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context;
/// emits context with consideration of relocation info
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context_with_reloc;
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::spill_rewrite;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
/// --- aarch64 backend ---
#[cfg(target_arch = "aarch64")]
......@@ -127,44 +125,33 @@ pub mod aarch64;
/// estimates how many machine instructions are needed for a Mu instruction
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::estimate_insts_for_ir;
/// initializes machine registers in the function context
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::init_machine_regs_for_func;
/// checks if two machine registers are alias (the same register)
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_aliased;
/// gets color for a machine register
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_color_for_precolored;
/// returns the number of registers in a given RegGroup
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_regs_in_group;
/// returns the number of all machine registers
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_all_regs;
/// returns a hashmap of all the machine registers
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_regs;
/// returns all usable registers (machine registers that can be assigned to temporaries)
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_usable_regs;
/// returns RegGroup for a machine register
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::pick_group_for_reg;
/// checks if a register is callee saved
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_callee_saved;
/// emits code for a function version (the function needs to be compiled first)
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::CALLEE_SAVED_COUNT ;
#[cfg(target_arch = "aarch64")]
......@@ -179,22 +166,24 @@ pub use compiler::backend::aarch64::get_previous_stack_pointer;
pub use compiler::backend::aarch64::set_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_return_address;
/// emits code for a function version (the function needs to be compiled first)
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_code;
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context;
/// emits context with consideration of relocation info
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context_with_reloc;
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::spill_rewrite;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
use vm::VM;
use ast::types::*;
......
......@@ -55,7 +55,7 @@ lazy_static! {
jit: RwLock::new(None)
};
// impl: swap_stack_x64_macos.s
// impl: runtime_asm_ARCH_OS.s
// decl: thread.rs
pub static ref SWAP_BACK_TO_NATIVE_STACK : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
......
......@@ -17,22 +17,24 @@ use utils::Address;
use utils::POINTER_SIZE;
use runtime::*;
// muentry_throw_exception should call this function,
// With the first argument being the address of the exception object,
// And the second argument should be point to the base of the call frame of muentry_throw_exception,
// which saves every callee saved register (note this frame will be modified by this function).
// e.g. on aarch64 (where the values are the value of the registers immediatly before the first instruction in muentry_throw_exception is executed):
// Return Address (value of X30)
// frame_cursor --> Frame Pointer (value of X29)
// First Callee Saved Register (value of X19)
// .........
// Last Callee Saved Register (value of D15)
// The actual offsets of the callee saved registers is determined by get_callee_saved_offset (relative to frame_cursor)
// The location of Frame Pointer and Return address is architecture dependent
// (and are accesed by get/set_return_address and get/set_previous_frame and may be passed real frame pointers or the frame cursor)
/// runtime function to deal with exception (unwind stack, find catch block, and restore)
/// This function is called by muentry_throw_exception() which gets emitted for THROW instruction
/// With the first argument being the address of the exception object,
/// And the second argument should be point to the base of the call frame of muentry_throw_exception,
/// which saves every callee saved register (note this frame will be modified by this function).
/// e.g. on aarch64 (where the values are the value of the registers immediately before the first
/// instruction in muentry_throw_exception is executed):
/// Return Address (value of X30)
/// frame_cursor --> Frame Pointer (value of X29)
/// First Callee Saved Register (value of X19)
/// .........
/// Last Callee Saved Register (value of D15)
/// The actual offsets of the callee saved registers is determined by get_callee_saved_offset
/// (relative to frame_cursor)
/// The location of Frame Pointer and Return address is architecture dependent
/// (and are accessed by get/set_return_address and get/set_previous_frame and may be passed
/// real frame pointers or the frame cursor)
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Address) -> ! {
trace!("throwing exception: {}", exception_obj);
......@@ -42,13 +44,16 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
}
let ref mut cur_thread = thread::MuThread::current_mut();
// set exception object
// set exception object (the catch block will have a landing pad to fetch this object)
cur_thread.exception_obj = exception_obj;
let ref vm = cur_thread.vm;
let mut current_frame_pointer = frame_cursor; // this will be 16 bytes bellow the bottom of the previous frame
let ref vm = cur_thread.vm;
// this will be 16 bytes bellow the bottom of the previous frame
let mut current_frame_pointer = frame_cursor;
let mut callsite = get_return_address(current_frame_pointer);
let mut previous_frame_pointer = get_previous_frame_pointer(current_frame_pointer); // thrower::fp, the starting point of the previous frame
// thrower's fp, the starting point of the previous frame
let mut previous_frame_pointer = get_previous_frame_pointer(current_frame_pointer);
// acquire lock for exception table
let compiled_exception_table = vm.compiled_exception_table.read().unwrap();
......@@ -63,9 +68,13 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
let table_entry = compiled_exception_table.get(&callsite);
if table_entry.is_none() {
error!("Cannot find Mu callsite (i.e. we have reached a native frame), either there isn't a catch block to catch the exception or your catch block is above a native function call");
// we are not dealing with native frames for unwinding stack
// See Issue #42
error!("Cannot find Mu callsite (i.e. we have reached a native frame), \
either there isn't a catch block to catch the exception or \
your catch block is above a native function call");
print_backtrace(frame_cursor);
unreachable!(); // The above function will not return
// The above function will not return
}
table_entry.unwrap()
......@@ -109,21 +118,22 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
}
}
fn print_frame(base: Address) {
/// prints current frame cursor
fn print_frame(cursor: Address) {
let top = 2;
let bottom = -(CALLEE_SAVED_COUNT as isize);
for i in (bottom .. top).rev() {
unsafe {
let addr = base.offset(i * POINTER_SIZE as isize);
let addr = cursor.offset(i * POINTER_SIZE as isize);
let val = addr.load::<Word>();
trace!("\taddr: 0x{:x} | val: 0x{:x} {}", addr, val, {if addr == base {"<- base"} else {""}});
trace!("\taddr: 0x{:x} | val: 0x{:x} {}", addr, val, {if addr == cursor {"<- cursor"} else {""}});
}
}
}
// This function may segfault or panic when it reaches the bottom of the stack
// (TODO: Determine where the bottom is without segfaulting)
/// This function may segfault or panic when it reaches the bottom of the stack
// TODO: Determine where the bottom is without segfaulting
fn print_backtrace(base: Address) -> !{
error!("BACKTRACE: ");
......
......@@ -24,6 +24,5 @@ extern uint32_t mu_retval;
int main(int argc, char** argv) {
char* serialize_vm = (char*) &vm;
mu_main(serialize_vm, argc, argv);
return (int) mu_retval;
}
......@@ -12,19 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Some mathematical functions used at runtime
/// remainder for float type
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_frem32(a: f32, b: f32) -> f32 {
use std::ops::Rem;
a.rem(b)
}
/// remainder for double type
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_frem64(a: f64, b: f64) -> f64 {
use std::ops::Rem;
a.rem(b)
}
......@@ -34,40 +34,49 @@ use extprim::i128::i128;
use runtime::math::num_traits::ToPrimitive;
use runtime::math::num_traits::FromPrimitive;
/// unsigned division for int128
#[no_mangle]
pub extern fn muentry_udiv_u128(a: u128, b: u128) -> u128 {
a.wrapping_div(b)
}
/// signed division for int128
#[no_mangle]
pub extern fn muentry_sdiv_i128(a: i128, b: i128) -> i128 {
a.wrapping_div(b)
}
/// unsigned remainder for int128
#[no_mangle]
pub extern fn muentry_urem_u128(a: u128, b: u128) -> u128 {
a.wrapping_rem(b)
}
/// signed division for int128
#[no_mangle]
pub extern fn muentry_srem_i128(a: i128, b: i128) -> i128 {
a.wrapping_rem(b)
}
/// double to unsigned int128
#[no_mangle]
pub extern fn muentry_fptoui_double_u128(a: f64) -> u128 { u128::from_f64(a).unwrap() }
/// double to signed int128
#[no_mangle]
pub extern fn muentry_fptosi_double_i128(a: f64) -> i128 { i128::from_f64(a).unwrap() }
/// unsigned int128 to double
#[no_mangle]
pub extern fn muentry_uitofp_u128_double(a: u128) -> f64 { a.to_f64().unwrap() }
/// signed int128 to double
#[no_mangle]
pub extern fn muentry_sitofp_i128_double(a: i128) -> f64 { a.to_f64().unwrap() }
/// float to unsigned int128
#[no_mangle]
pub extern fn muentry_fptoui_float_u128(a: f32) -> u128 { u128::from_f32(a).unwrap() }
/// float to signed int128
#[no_mangle]
pub extern fn muentry_fptosi_float_i128(a: f32) -> i128 { i128::from_f32(a).unwrap() }
/// unsigned int128 to float
#[no_mangle]
pub extern fn muentry_uitofp_u128_float(a: u128) -> f32 { a.to_f32().unwrap() }
/// signed int128 to float
#[no_mangle]
pub extern fn muentry_sitofp_i128_float(a: i128) -> f32 { a.to_f32().unwrap() }
\ No newline at end of file
......@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
/// the garbage collection crate as in src/gc
/// we design the GC crate to be separate from other parts of the VM, and to be self-contained
/// as much as possible. We only expose limited interface (functions, data structures, constants)
/// from the GC crate, and those get re-exported in this module.
extern crate gc;
pub use self::gc::*;
use utils::ByteSize;
......@@ -27,6 +30,11 @@ use compiler::backend::BackendType;
use runtime::ValueLocation;
use runtime::thread::MuThread;
// the following functions are used by VM to allocate for the client (through API)
/// finds an allocator and allocates memory
/// if current thread has an allocator, use the allocator. Otherwise creates a new allocator,
/// allocates objects and drops the allocator
fn check_allocator(size: ByteSize, align: ByteSize, encode: u64, hybrid_len: Option<u64>) -> ObjectReference {
if MuThread::has_current() {
// we have an allocator
......@@ -34,23 +42,24 @@ fn check_allocator(size: ByteSize, align: ByteSize, encode: u64, hybrid_len: Opt
allocate(allocator, size, align, encode, hybrid_len)
} else {
let mut allocator = new_mutator();
let ret = allocate(&mut allocator as *mut Mutator, size, align, encode, hybrid_len);
drop_mutator(&mut allocator as *mut Mutator);
ret
}
}
/// allocates and initiates an object (hybrid or other types, large or small)
#[inline(always)]
fn allocate(allocator: *mut Mutator, size: ByteSize, align: ByteSize, encode: u64, hybrid_len: Option<u64>) -> ObjectReference {
// allocate
let ret = if size > LARGE_OBJECT_THRESHOLD {
muentry_alloc_large(allocator, size, align)
} else {
alloc(allocator, size, align)
};
// initiate
if hybrid_len.is_none() {
muentry_init_object(allocator, ret, encode);
} else {
......@@ -60,6 +69,7 @@ fn allocate(allocator: *mut Mutator, size: ByteSize, align: ByteSize, encode: u6
ret
}
/// allocates an object of fixed types
pub fn allocate_fixed(ty: P<MuType>, backendtype: Box<BackendType>) -> Address {
let gctype = backendtype.gc_type.clone();
let encode = get_gc_type_encode(gctype.id);
......@@ -71,6 +81,7 @@ pub fn allocate_fixed(ty: P<MuType>, backendtype: Box<BackendType>) -> Address {
check_allocator(gctype.size(), gctype.alignment, encode, None).to_address()
}
/// allocates an object of hybrid types
pub fn allocate_hybrid(ty: P<MuType>, len: u64, backendtype: Box<BackendType>) -> Address {
let gctype = backendtype.gc_type.clone();
let encode = get_gc_type_encode(gctype.id);
......@@ -82,6 +93,7 @@ pub fn allocate_hybrid(ty: P<MuType>, len: u64, backendtype: Box<BackendType>) -
check_allocator(gctype.size_hybrid(len as u32), gctype.alignment, encode, Some(len)).to_address()
}
/// allocates a global cell
pub fn allocate_global(iref_global: P<Value>, backendtype: Box<BackendType>) -> ValueLocation {
let referenced_type = match iref_global.ty.get_referent_ty() {
Some(ty) => ty,
......
......@@ -27,13 +27,20 @@ use std::ffi::CString;
use std::ffi::CStr;
use std::sync::Arc;
/// memory management: allocation, reclamation
/// (the actual code is in src/gc, which gets re-exported in mm module)
pub mod mm;
/// thread management: stack, thread
pub mod thread;
/// mathematics functions
pub mod math;
/// a list of all entrypoints used by compiler to generate calls into runtime
/// (where generated code entries the runtime)
pub mod entrypoints;
/// exception handling
pub mod exception;
/// Dl_info used by dynamic loading functions
// consider using libloading crate instead of the raw c functions for dynalic libraries
// however i am not sure if libloading can load symbols from current process (not from an actual dylib)
// so here i use dlopen/dlsym from C
......@@ -53,8 +60,9 @@ extern "C" {
fn dlerror() -> *const c_char;
}
// TODO: this actually returns the name and address of the nearest symbol (of any type)
// that starts before function_addr (instead we want the nearest function symbol)
/// returns name for a function address
// FIXME: this actually returns the name and address of the nearest symbol (of any type)
// that starts before function_addr (instead we want the nearest function symbol)
pub fn get_function_info(function_addr: Address) -> (CName, Address) {
use std::ptr;
......@@ -84,8 +92,8 @@ pub fn get_function_info(function_addr: Address) -> (CName, Address) {
}
pub fn resolve_symbol(symbol: String) -> Address {
/// returns address for a given symbol, e.g. function name
pub fn resolve_symbol(symbol: MuName) -> Address {
use std::ptr;
let symbol = name_check(symbol);
......@@ -107,14 +115,22 @@ pub fn resolve_symbol(symbol: String) -> Address {
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
/// ValueLocation represents the runtime location for a value.
/// The purpose of this data structure is to refer to a location in a unified way
/// for both compile time (usually talking about symbols) and run time (talking about addresses)
/// A ValueLocation could be:
/// * a register (the register holds the value)
/// * a Constant (the value itself)
/// * a relocatable symbol (a relocatable symbol emitted by AOT compiler, which resides the value)
/// * a direct memory address (the address contains the value)
/// * a indirect memory address (the address contains a pointer to the value)
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
pub enum ValueLocation {
Register(RegGroup, MuID), // 0
Constant(RegGroup, Word), // 1
Relocatable(RegGroup, MuName),// 2
Direct(RegGroup, Address), // 3
Indirect(RegGroup, Address), // 4
Register (RegGroup, MuID), // 0
Constant (RegGroup, Word), // 1
Relocatable (RegGroup, MuName), // 2
Direct (RegGroup, Address), // 3
Indirect (RegGroup, Address), // 4
}
impl fmt::Display for ValueLocation {
......@@ -198,40 +214,51 @@ impl Decodable for ValueLocation {
}
impl ValueLocation {
/// loads value from a ValueLocation
pub fn load_value(&self) -> (RegGroup, Word) {
match self {
&ValueLocation::Register(_, _)
| &ValueLocation::Direct(_, _)
| &ValueLocation::Indirect(_, _) => unimplemented!(),
&ValueLocation::Register(_, _) => unimplemented!(),
&ValueLocation::Direct(group, addr) => {
(group, unsafe {addr.load::<Word>()})
}
&ValueLocation::Indirect(group, addr) => {
unsafe {
let ptr = addr.load::<Address>();
(group, ptr.load::<Word>())
}
}
&ValueLocation::Constant(group, word) => {
(group, word)
}
&ValueLocation::Relocatable(_, _) => panic!("expect a runtime value")
&ValueLocation::Relocatable(group, ref symbol) => {
let addr = resolve_symbol(symbol.clone());
(group, unsafe {addr.load::<Word>()})
}
}
}
#[allow(unused_variables)]
/// creates a ValueLocation from a constant, panics if impossible
pub fn from_constant(c: Constant) -> ValueLocation {
match c {
Constant::Int(int_val) => ValueLocation::Constant(RegGroup::GPR, utils::mem::u64_to_raw(int_val)),
Constant::Float(f32_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f32_to_raw(f32_val)),
Constant::Int(int_val) => ValueLocation::Constant(RegGroup::GPR, utils::mem::u64_to_raw(int_val)),
Constant::Float(f32_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f32_to_raw(f32_val)),
Constant::Double(f64_val) => ValueLocation::Constant(RegGroup::FPR, utils::mem::f64_to_raw(f64_val)),
_ => unimplemented!()
}
}
/// returns the address that contains the value
pub fn to_address(&self) -> Address {
match self {
&ValueLocation::Register(_, _)
| &ValueLocation::Constant(_, _) => panic!("a register/constant cannot be turned into address"),
&ValueLocation::Direct(_, addr) => addr,
&ValueLocation::Direct(_, addr) => addr,
&ValueLocation::Indirect(_, addr) => unsafe {addr.load::<Address>()},
&ValueLocation::Relocatable(_, ref symbol) => resolve_symbol(symbol.clone())
&ValueLocation::Relocatable(_, ref symbol) => resolve_symbol(symbol.clone()),
&ValueLocation::Register(_, _)
| &ValueLocation::Constant(_, _) => panic!("a register/constant cannot be turned into address")
}
}
/// returns a relocatable symbol that contains the value, panics if impossible
pub fn to_relocatable(&self) -> MuName {
match self {
&ValueLocation::Relocatable(_, ref name) => name.clone(),
......@@ -240,21 +267,29 @@ impl ValueLocation {
}
}
/// a C wrapper as main function for executable boot images"
/// The C wrapper does:
/// 1. loads the persisted VM
/// 2. invokes mu_main() to hand the control to Rust code
/// 3. returns the return value set by SetRetval
pub const PRIMORDIAL_ENTRY : &'static str = "src/runtime/main.c";
/// starts trace level logging, this function will be called from C
#[no_mangle]
pub extern fn mu_trace_level_log() {
VM::start_logging_trace();
}
/// the main function for executable boot image, this function will be called from C
#[no_mangle]
pub extern fn mu_main(serialized_vm : *const c_char, argc: c_int, argv: *const *const c_char) {
debug!("mu_main() started...");
// load and resume the VM
let str_vm = unsafe{CStr::from_ptr(serialized_vm)}.to_str().unwrap();
let vm : Arc<VM> = Arc::new(VM::resume_vm(str_vm));
// find the primordial function as an entry
let primordial = vm.primordial.read().unwrap();
if primordial.is_none() {
panic!("no primordial thread/stack/function. Client should provide an entry point");
......@@ -280,16 +315,15 @@ pub extern fn mu_main(serialized_vm : *const c_char, argc: c_int, argv: *const *
args
};
// FIXME: currently assumes no user defined thread local
// will need to fix this after we can serialize heap object
// FIXME: currently assumes no user defined thread local - See Issue #48
let thread = thread::MuThread::new_thread_normal(stack, unsafe{Address::zero()}, args, vm.clone());
thread.join().unwrap();
}
}
/// runtime function to print a hex value (for PRINTHEX instruction for debugging use)
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_print_hex(x: u64) {
println!("PRINTHEX: 0x{:x}", x);
}
\ No newline at end of file
......@@ -85,12 +85,12 @@ begin_func muentry_swap_back_to_native_stack
RET
end_func muentry_swap_back_to_native_stack
# _get_current_frame_rbp() -> Address
# _get_current_frame_bp() -> Address
# X0
begin_func get_current_frame_rbp
begin_func get_current_frame_bp
MOV X0, FP
RET
end_func get_current_frame_rbp
end_func get_current_frame_bp
# muentry_throw_exception(obj: Address)
# X0
......@@ -116,24 +116,4 @@ begin_func exception_restore
pop_pair FP, LR, X1
MOV SP, X2
BR X0
end_func exception_restore
# fake_swap_mu_thread(old_sp_loc: Address)
# X0
# (we do not actually swap stack, but we make the stack the same
# as if they are native stack that have been swapped out, so that
# when THREADEXIT (swap_back_to_native_stack) is called, we won't panic
# this function is untested!!!
begin_func fake_swap_mu_thread
enter_frame
push_callee_saved
# save old sp to thread field
MOV X9, SP
STR X9, [X0]
# return to caller, but preserve those pushed values (since THREADEXIT will pick them up)
RET
end_func fake_swap_mu_thread
end_func exception_restore
\ No newline at end of file
......@@ -80,11 +80,11 @@ begin_func muentry_swap_back_to_native_stack
ret
end_func muentry_swap_back_to_native_stack
# _get_current_frame_rbp() -> Address
begin_func get_current_frame_rbp
# _get_current_frame_bp() -> Address
begin_func get_current_frame_bp
movq %rbp, %rax
ret
end_func get_current_frame_rbp
end_func get_current_frame_bp
# muentry_throw_exception(obj: Address)
# %rdi
......@@ -123,31 +123,4 @@ begin_func exception_restore
movq %rdx, %rsp
jmpq *%rdi
end_func exception_restore
# fake_swap_mu_thread(old_sp_loc: Address)
# %rdi
# (we do not actually swap stack, but we make the stack the same
# as if they are native stack that have been swapped out, so that
# when THREADEXIT (swap_back_to_native_stack) is called, we won't panic