going to check bench performance

parent d0e1ea48
......@@ -1601,6 +1601,7 @@ pub fn is_valid_c_identifier(name: &MuName) -> bool {
/// changes name to mangled name
/// This will always return a valid C identifier
pub fn mangle_name(name: MuName) -> String {
// log::trace!("mangle_name({})", name);
let name = name.replace('@', "");
if name.starts_with("__mu_") {
// TODO: Get rid of this, since it will be triggered if a client
......@@ -1621,7 +1622,10 @@ pub fn mangle_name(name: MuName) -> String {
.replace('-', "Zh")
.replace(':', "Zc")
.replace('#', "Za");
"__mu_".to_string() + name.as_str()
let res = "__mu_".to_string() + name.as_str();
// log::trace!("mangle_name.final({})", res);
res
}
/// demangles a Mu name
......
......@@ -2079,6 +2079,12 @@ impl<'a> InstructionSelection {
}
Instruction_::CommonInst_GetThreadLocal => {
trace!("instsel on GETTHREADLOCAL");
trace!("thread.ALLOCATOR_OFFSET = {}", *thread::ALLOCATOR_OFFSET as i32);
trace!("thread.NATIVE_SP_LOC_OFFSET = {}", *thread::NATIVE_SP_LOC_OFFSET as i32);
trace!("thread.USER_TLS_OFFSET = {}", *thread::USER_TLS_OFFSET as i32);
trace!("thread.STACK_OFFSET = {}", *thread::STACK_OFFSET as i32);
trace!("thread.EXCEPTION_OBJ_OFFSET = {}", *thread::EXCEPTION_OBJ_OFFSET as i32);
// get thread local
let tl = self.emit_get_threadlocal(
Some(node),
......@@ -2098,6 +2104,11 @@ impl<'a> InstructionSelection {
}
Instruction_::CommonInst_SetThreadLocal(op) => {
trace!("instsel on SETTHREADLOCAL");
trace!("thread.ALLOCATOR_OFFSET = {}", *thread::ALLOCATOR_OFFSET as i32);
trace!("thread.NATIVE_SP_LOC_OFFSET = {}", *thread::NATIVE_SP_LOC_OFFSET as i32);
trace!("thread.USER_TLS_OFFSET = {}", *thread::USER_TLS_OFFSET as i32);
trace!("thread.STACK_OFFSET = {}", *thread::STACK_OFFSET as i32);
trace!("thread.EXCEPTION_OBJ_OFFSET = {}", *thread::EXCEPTION_OBJ_OFFSET as i32);
let ref ops = inst.ops;
let ref op = ops[op];
......
......@@ -144,11 +144,14 @@ pub trait CompilerPass {
}
pub fn tune_file_name(fname: Arc<String>) -> Arc<String> {
trace!("tune_file_name.input {}", fname);
let ln = fname.len();
if ln < 200 {
return fname
}
let res = String::from(fname.get(0..100).unwrap()) + fname.get((ln-100)..ln).unwrap();
trace!("tune_file_name.output {}", res);
return Arc::from(res)
}
......@@ -20,7 +20,7 @@ use std::*;
use utils::Address;
use utils::ByteSize;
const TRACE_ALLOC: bool = true;
const TRACE_ALLOC: bool = false;
#[repr(C)]
pub struct ImmixAllocator {
......@@ -68,7 +68,7 @@ impl Allocator for ImmixAllocator {
#[inline(always)]
fn alloc(&mut self, size: usize, align: usize) -> Address {
trace!("immix_mutator::alloc({}, {}, {});", &self, size, align);
// trace!("immix_mutator::alloc({}, {}, {});", &self, size, align);
// this part of code will slow down allocation
let align = objectmodel::check_alignment(align);
// end
......@@ -129,7 +129,7 @@ impl ImmixAllocator {
#[inline(never)]
pub fn alloc_slow(&mut self, size: usize, align: usize) -> Address {
trace!("immix_mutator::alloc_slow({}, {}, {});", &self, size, align);
// trace!("immix_mutator::alloc_slow({}, {}, {});", &self, size, align);
if size > BYTES_IN_LINE {
trace_if!(TRACE_ALLOC, "Mutator: overflow alloc()");
self.overflow_alloc(size, align)
......@@ -152,7 +152,7 @@ impl ImmixAllocator {
}
pub fn overflow_alloc(&mut self, size: usize, align: usize) -> Address {
trace!("immix_mutator::overflow_alloc(self, {}, {});", size, align);
// trace!("immix_mutator::overflow_alloc(self, {}, {});", size, align);
let start = self.large_cursor.align_up(align);
let end = start + size;
......@@ -173,7 +173,7 @@ impl ImmixAllocator {
#[inline(always)]
pub fn init_object<T>(&mut self, addr: Address, encode: T) {
trace!("init_object({}, _)", addr);
// trace!("init_object({}, _)", addr);
let map_slot = ImmixSpace::get_type_byte_slot_static(addr);
unsafe {
map_slot.store(encode);
......@@ -185,12 +185,12 @@ impl ImmixAllocator {
size: usize,
align: usize
) -> Address {
trace!(
"immix_mutator::try_alloc_from_local({}, {}, {});",
&self,
size,
align
);
// trace!(
// "immix_mutator::try_alloc_from_local({}, {}, {});",
// &self,
// size,
// align
// );
if self.line < LINES_IN_BLOCK {
let opt_next_available_line = {
let cur_line = self.line;
......@@ -239,14 +239,14 @@ impl ImmixAllocator {
align: usize,
request_large: bool
) -> Address {
trace!(
"immix_mutator::alloc_from_global({}, {}, {}, {});",
&self,
size,
align,
request_large
);
trace!("Mutator: slowpath: alloc_from_global()");
// trace!(
// "immix_mutator::alloc_from_global({}, {}, {}, {});",
// &self,
// size,
// align,
// request_large
// );
// trace!("Mutator: slowpath: alloc_from_global()");
self.return_block(request_large);
loop {
......@@ -267,10 +267,10 @@ impl ImmixAllocator {
self.large_limit = b.mem_start() + BYTES_IN_BLOCK;
self.large_block = Some(b);
trace!(
"Mutator: slowpath: new large_block starting from 0x{:x}",
self.large_cursor
);
// trace!(
// "Mutator: slowpath: new large_block starting from 0x{:x}",
// self.large_cursor
// );
return self.alloc(size, align);
} else {
......@@ -279,10 +279,10 @@ impl ImmixAllocator {
self.line = 0;
self.block = Some(b);
trace!(
"Mutator: slowpath: new block starting from 0x{:x}",
self.cursor
);
// trace!(
// "Mutator: slowpath: new block starting from 0x{:x}",
// self.cursor
// );
return self.alloc(size, align);
}
......@@ -295,7 +295,7 @@ impl ImmixAllocator {
}
fn return_block(&mut self, request_large: bool) {
trace!("immix_mutator::return_block(self, {});", request_large);
// trace!("immix_mutator::return_block(self, {});", request_large);
if request_large {
if self.large_block.is_some() {
trace!(
......
......@@ -301,7 +301,7 @@ pub extern "C" fn remove_root(obj: ObjectReference) {
/// pins an object so that it will be moved or reclaimed
#[no_mangle]
pub extern "C" fn muentry_pin_object(obj: ObjectReference) -> Address {
trace!("gc::src::lib::muentry_pin_object");
// trace!("gc::src::lib::muentry_pin_object");
add_to_root(obj);
obj.to_address()
}
......@@ -309,7 +309,7 @@ pub extern "C" fn muentry_pin_object(obj: ObjectReference) -> Address {
/// unpins an object so that it can be freely moved/reclaimed as normal objects
#[no_mangle]
pub extern "C" fn muentry_unpin_object(obj: Address) {
trace!("gc::src::lib::muentry_unpin_object");
// trace!("gc::src::lib::muentry_unpin_object");
remove_root(unsafe { obj.to_object_reference() });
}
......@@ -341,7 +341,7 @@ pub extern "C" fn muentry_alloc_tiny(
align: usize
) -> ObjectReference {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_tiny({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_tiny({}, {})", size, align);
unsafe { m.tiny.alloc(size, align).to_object_reference() }
}
......@@ -352,7 +352,7 @@ pub extern "C" fn muentry_alloc_normal(
align: usize
) -> ObjectReference {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_normal({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_normal({}, {})", size, align);
let res = m.normal.alloc(size, align);
m.normal.post_alloc(res, size);
unsafe { res.to_object_reference() }
......@@ -367,7 +367,7 @@ pub extern "C" fn muentry_alloc_tiny_slow(
align: usize
) -> Address {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_tiny_slow({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_tiny_slow({}, {})", size, align);
m.tiny.alloc_slow(size, align)
}
......@@ -380,7 +380,7 @@ pub extern "C" fn muentry_alloc_normal_slow(
align: usize
) -> Address {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_normal_slow({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_normal_slow({}, {})", size, align);
let res = m.normal.alloc_slow(size, align);
m.normal.post_alloc(res, size);
res
......@@ -395,7 +395,7 @@ pub extern "C" fn muentry_alloc_large(
align: usize
) -> ObjectReference {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_large({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_large({}, {})", size, align);
let res = m.lo.alloc(size, align);
unsafe { res.to_object_reference() }
}
......@@ -407,7 +407,7 @@ pub extern "C" fn muentry_init_tiny_object(
obj: ObjectReference,
encode: TinyObjectEncode
) {
trace!("gc::src::lib::muentry_init_tiny_object");
// trace!("gc::src::lib::muentry_init_tiny_object");
unsafe { &mut *mutator }
.tiny
.init_object(obj.to_address(), encode);
......@@ -420,7 +420,7 @@ pub extern "C" fn muentry_init_small_object(
obj: ObjectReference,
encode: SmallObjectEncode
) {
trace!("gc::src::lib::muentry_init_small_object");
// trace!("gc::src::lib::muentry_init_small_object");
unsafe { &mut *mutator }
.normal
.init_object(obj.to_address(), encode);
......@@ -433,7 +433,7 @@ pub extern "C" fn muentry_init_medium_object(
obj: ObjectReference,
encode: MediumObjectEncode
) {
trace!("gc::src::lib::muentry_init_medium_object");
// trace!("gc::src::lib::muentry_init_medium_object");
unsafe { &mut *mutator }
.normal
.init_object(obj.to_address(), encode);
......@@ -445,7 +445,7 @@ pub extern "C" fn muentry_init_large_object(
obj: ObjectReference,
encode: LargeObjectEncode
) {
trace!("gc::src::lib::muentry_init_large_object");
// trace!("gc::src::lib::muentry_init_large_object");
unsafe { &mut *mutator }
.lo
.init_object(obj.to_address(), encode);
......
//#![feature(alloc, heap_api)]
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
......@@ -45,3 +46,5 @@ pub mod compiler;
pub mod linkutils;
pub mod runtime;
pub mod vm;
mod log_settings;
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A place to define constants to enable/disable logging functions
pub const RTRPY_HELPERS_TRACE: bool = false;
pub const RTRPY_HELPERS_INFO: bool = true;
pub const MM_RTMU_TRACE: bool = true;
pub const MM_RTMU_INFO: bool = true;
pub const MM_MU_TRACE: bool = false;
pub const MM_MU_DEBUG: bool = false;
pub const MM_MU_INFO: bool = false;
......@@ -43,51 +43,86 @@ impl sys_rtmu_futex {
}
pub fn lock(&mut self, timeout_ns: u64) {
let res = sys_futex_lock_slowpath(
&mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
timeout_ns,
let tid = os_gettid() as u32;
let res = match self.mem.compare_exchange(
0 as u32,
tid,
Ordering::SeqCst,
Ordering::SeqCst
) {
Ok(tid) => {
trace!("FUTEX.lock.fast_path.success");
0 as u32
}
Err(err_val) => sys_futex_lock_slowpath(
&mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
timeout_ns
) as u32
};
// let res = sys_futex_lock_slowpath(
// &mut self.mem as *const AtomicU32 as *const u32 as *mut
// u32, timeout_ns
// );
// let mut res: SysResult = -1;
//
// while res == -1 {
// if self.mem.fetch_add(1, Ordering::SeqCst) == 0 {
// trace!("FUTEX.lock({}) fastpath success",
// Address::from_mut_ptr(self as *const
// sys_rtmu_futex as *mut sys_rtmu_futex));
// break; } else {
// let fval = self.mem.load(Ordering::SeqCst);
// trace!(
// "FUTEX.lock({}) fastpath failed (res={}),
// \ncurrent fval: #{}\nerror#{}",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut
// sys_rtmu_futex), res, fval, unsafe {
// (*libc::__errno_location()) } );
// res = sys_futex_lock_slowpath(
// &mut self.mem as *const AtomicU32 as *const u32 as
// *mut u32, timeout_ns,
// );
// }
// }
trace!(
"FUTEX.lock({}) returning({}), error_code=({})",
Address::from_mut_ptr(
self as *const sys_rtmu_futex as *mut sys_rtmu_futex
),
res,
unsafe { (*libc::__errno_location()) }
);
// let mut res: SysResult = -1;
//
// while res == -1 {
// if self.mem.fetch_add(1, Ordering::SeqCst) == 0 {
// trace!("FUTEX.lock({}) fastpath success",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex));
// break;
// } else {
// let fval = self.mem.load(Ordering::SeqCst);
// trace!(
// "FUTEX.lock({}) fastpath failed (res={}), \ncurrent fval: #{}\nerror#{}",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex),
// res, fval, unsafe { (*libc::__errno_location()) }
// );
// res = sys_futex_lock_slowpath(
// &mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
// timeout_ns,
// );
// }
// }
trace!("FUTEX.lock({}) returning({}), error_code=({})",
Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex),
res,
unsafe { (*libc::__errno_location())
});
}
pub fn unlock(&mut self, count: u64) {
let fval = self.mem.load(Ordering::SeqCst) as i32;
trace!("FUTEX.unlock({}).fval = {}",
Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex),
fval);
// if self.mem.fetch_sub(1, Ordering::SeqCst) <= 1 {
// trace!("FUTEX.unlock({}) fastpath success",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex));
// } else {
sys_futex_unlock_slowpath(
&mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
count,
// let fval = self.mem.load(Ordering::SeqCst) as i32;
trace!(
"FUTEX.unlock({}).fval = {}",
Address::from_mut_ptr(
self as *const sys_rtmu_futex as *mut sys_rtmu_futex
),
self.mem.load(Ordering::SeqCst) as i32
);
// }
// if self.mem.fetch_sub(1, Ordering::SeqCst) <= 1 {
// trace!("FUTEX.unlock({}) fastpath success",
// Address::from_mut_ptr(self as *const sys_rtmu_futex
// as *mut sys_rtmu_futex)); } else {
let tid = os_gettid() as u32;
match self.mem.compare_exchange(
tid,
0 as u32,
Ordering::SeqCst,
Ordering::SeqCst
) {
Ok(tid) => trace!("FUTEX.unlock.fast_path.success"),
Err(err_val) => {
sys_futex_unlock_slowpath(
&mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
count
);
}
};
// }
}
}
......@@ -108,7 +143,7 @@ fn sys_futex_lock_slowpath(futex_ptr: *mut u32, timeout_ns: u64) -> SysResult {
0,
null_ts,
null_cl,
0,
0
)
},
_ => unsafe {
......@@ -119,19 +154,19 @@ fn sys_futex_lock_slowpath(futex_ptr: *mut u32, timeout_ns: u64) -> SysResult {
0,
&ns_to_time(timeout_ns) as *const libc::timespec,
null_cl,
0,
0
)
}
};
res as SysResult
// assert_eq!(
// res,
// 0,
// "FUTEX.lock slowpath failed with error code #{}",
// unsafe { (*libc::__errno_location()) }
// );
// trace!("FUTEX.lock slowpath aquired #{:#?}", futex_ptr);
// assert_eq!(
// res,
// 0,
// "FUTEX.lock slowpath failed with error code #{}",
// unsafe { (*libc::__errno_location()) }
// );
// trace!("FUTEX.lock slowpath aquired #{:#?}", futex_ptr);
}
fn sys_futex_unlock_slowpath(futex_ptr: *mut u32, count: u64) {
......@@ -146,20 +181,26 @@ fn sys_futex_unlock_slowpath(futex_ptr: *mut u32, count: u64) {
count,
null_ts,
null_cl,
0,
0
)
};
assert_ne!(
res, -1,
res,
-1,
"FUTEX.unlock({}) slowpath failed with res={} and error code #{}",
Address::from_mut_ptr(futex_ptr), res, unsafe { (*libc::__errno_location()) }
Address::from_mut_ptr(futex_ptr),
res,
unsafe { (*libc::__errno_location()) }
);
trace!(
"FUTEX.unlock({}) slowpath released",
Address::from_mut_ptr(futex_ptr)
);
trace!("FUTEX.unlock({}) slowpath released", Address::from_mut_ptr(futex_ptr));
}
fn ns_to_time(ns: u64) -> libc::timespec {
libc::timespec {
tv_sec: (ns / 1_000_000_000) as i64,
tv_nsec: (ns % 1_000_000_000) as i64,
tv_nsec: (ns % 1_000_000_000) as i64
}
}
......@@ -48,11 +48,18 @@ pub fn gen_object_encode(
}
};
debug!(
debug_if!(
log_settings::MM_MU_DEBUG,
"ENCODE: gen_object_encode: {:?}, size: {}",
backend_ty, size
backend_ty,
size
);
debug_if!(
log_settings::MM_MU_DEBUG,
"ENCODE: gc_ty: {}, full_gc_ty: {}",
gc_tyid,
full_tyid
);
debug!("ENCODE: gc_ty: {}, full_gc_ty: {}", gc_tyid, full_tyid);
gen_object_encode_internal(is_hybrid, gc_tyid, full_tyid, size, vm)
}
......@@ -212,7 +219,7 @@ pub fn allocate_fixed(
) -> Address {
let encode = gen_object_encode(&backendtype, backendtype.size, vm);
trace!("API: allocate fixed ty: {}", ty);
trace_if!(log_settings::MM_MU_TRACE, "API: allocate fixed ty: {}", ty);
check_allocator(backendtype.size, backendtype.alignment, encode)
.to_address()
}
......@@ -229,7 +236,7 @@ pub fn allocate_hybrid(
);
let encode = gen_object_encode(&backendtype, size, vm);
trace!("API: allocate hybrd ty: {}", ty);
trace_if!(log_settings::MM_MU_TRACE, "API: allocate hybrd ty: {}", ty);
check_allocator(size, backendtype.alignment, encode).to_address()
}
......@@ -252,7 +259,11 @@ pub fn allocate_global(
"global cell cannot be hybrid type"
);
trace!("API: allocate hybrd ty: {}", referenced_type);
trace_if!(
log_settings::MM_MU_TRACE,
"API: allocate hybrd ty: {}",
referenced_type
);
let addr = allocate_fixed(referenced_type, backendtype, vm);
ValueLocation::Direct(RegGroup::GPR, addr)
......
This diff is collapsed.
......@@ -46,21 +46,26 @@ pub fn sys_get_aligned_size(size: usize, align: usize) -> usize {
}
fn sys_allocate_by_size(size: usize) -> SysMemBackStore {
let mem_layout =
Box::new(match Layout::from_size_align(size, POINTER_SIZE) {
Ok(lo) => lo,
Err(err) => panic!("Allocating region failed with err: {}", err)
});
unsafe { alloc(*mem_layout) }
// let mem_layout =
// Box::new(match Layout::from_size_align(size, POINTER_SIZE) {
// Ok(lo) => lo,
// Err(err) => panic!("Allocating region failed with err: {}", err)
// });
// unsafe { alloc(*mem_layout) }
let raw_mem = unsafe { libc::malloc(size as libc::size_t) as *mut u8};
raw_mem
}
fn sys_delete_mem(backstore: SysMemBackStore, size: usize) {
let bbbox = unsafe { Box::from_raw(backstore) };
let mem_layout =
Box::new(match Layout::from_size_align(size, POINTER_SIZE) {
Ok(lo) => lo,
Err(err) => panic!("Allocating region failed with err: {}", err)
});
unsafe { dealloc(backstore, *mem_layout) };
let _backstore = Box::into_raw(bbbox);
// let backstore = backstore as *mut Vec<u8>;
// let boxed_vec: Box<Vec<u8>> = unsafe { Box::from_raw(backstore) };
// let mem_layout =
// Box::new(match Layout::from_size_align(size, POINTER_SIZE) {
// Ok(lo) => lo,
// Err(err) => panic!("Allocating region failed with err: {}", err)
// });
// unsafe { dealloc(backstore, *mem_layout) };
// let _backstore = Box::into_raw(bbbox);
unsafe { libc::free(backstore as *mut libc::c_void); };
}
......@@ -14,6 +14,7 @@
// expose the mu interface (GC)
use super::super::log_settings;
use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
......
......@@ -14,6 +14,7 @@
use ast::ir::*;
use compiler::backend::RegGroup;
use log_settings;
use utils;
use utils::Address;
use utils::Word;
......@@ -38,7 +39,8 @@ pub mod exception;
pub mod math;
// /// platform depedent code for runtime goes here
//mod _platform;
pub mod itc; // inter-thread communication
pub mod itc;
// inter-thread communication
/// memory management: allocation, reclamation
/// (the actual code is in src/gc, which gets re-exported in mm module)
//#[cfg(feature = "realtime")]
......@@ -51,6 +53,12 @@ pub mod thread;
#[cfg(feature = "realtime")]
pub mod mu_time;
#[cfg(feature = "realtime")]
mod rtrpy_helpers;
#[cfg(feature = "realtime")]
pub use self::rtrpy_helpers::*;
lazy_static! {
static ref UNKNOWN_FUNCTION_NAME: CName = Arc::new("UNKOWN".to_string());
}
......@@ -102,7 +110,11 @@ pub fn get_function_info(function_addr: Address) -> (CName, Address) {
pub fn resolve_symbol(symbol: MuName) -> Address {
use std::ptr;
let c_symbol = CString::new(mangle_name(symbol.clone())).unwrap();
// trace!("resolve_symbol({})", symbol);
let mangled_name = mangle_name(symbol.clone());
let mut mnc = mangled_name.clone();
let as_byte_vec = mnc.into_bytes();
let c_symbol = unsafe { CString::from_vec_unchecked(as_byte_vec) };
let rtld_default = unsafe { dlopen(ptr::null(), 0) };
let ret = unsafe { dlsym(rtld_default, c_symbol.as_ptr()) };
......@@ -152,11 +164,13 @@ pub fn resolve_symbol(symbol: MuName) -> Address {
pub enum ValueLocation {
Register(RegGroup, MuID),
Constant(RegGroup, Word),
Relocatable(RegGroup, MuName), /* TODO: This only works for mu entities
* (add a flag to indicate */
Relocatable(RegGroup, MuName),
/* TODO: This only works for mu entities
* (add a flag to indicate */
// if its native or have a different variant?)
Direct(RegGroup, Address), // Not dumped
Indirect(RegGroup, Address) // Not dumped
Direct(RegGroup, Address),
// Not dumped
Indirect(RegGroup, Address) // Not dumped
}
rodal_enum!(ValueLocation{(Register: group, id), (Constant: group, word),
......
......@@ -25,9 +25,9 @@ pub fn sys_get_time_ns() -> i64 {
let res = (timestamp.tv_nsec as i64)
+ (1_000_000_000 * (timestamp.tv_sec as i64));
if cfg!(debug_assertions) {
debug!("sys_get_time_ns: {}", res);
}
// if cfg!(debug_assertions) {
// debug!("sys_get_time_ns: {}", res);
// }
res
}
// Copyright 2019 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate timer;
extern crate time;
use super::*;
// use self::timer::Guard;
type TimerHandler = extern "C" fn(Address) -> ();
// extern "C" {
// fn call_mu_function(func_addr: Address, arg_addr: Address);
// }
pub struct SysTimer {
timerref: Box<timer::Timer>,
guard: timer::Guard,
}
impl SysTimer {