WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 584ad012 authored by Javad Ebrahimian Amiri's avatar Javad Ebrahimian Amiri
Browse files

going to check bench performance

parent d0e1ea48
......@@ -1601,6 +1601,7 @@ pub fn is_valid_c_identifier(name: &MuName) -> bool {
/// changes name to mangled name
/// This will always return a valid C identifier
pub fn mangle_name(name: MuName) -> String {
// log::trace!("mangle_name({})", name);
let name = name.replace('@', "");
if name.starts_with("__mu_") {
// TODO: Get rid of this, since it will be triggered if a client
......@@ -1621,7 +1622,10 @@ pub fn mangle_name(name: MuName) -> String {
.replace('-', "Zh")
.replace(':', "Zc")
.replace('#', "Za");
"__mu_".to_string() + name.as_str()
let res = "__mu_".to_string() + name.as_str();
// log::trace!("mangle_name.final({})", res);
res
}
/// demangles a Mu name
......
......@@ -2079,6 +2079,12 @@ impl<'a> InstructionSelection {
}
Instruction_::CommonInst_GetThreadLocal => {
trace!("instsel on GETTHREADLOCAL");
trace!("thread.ALLOCATOR_OFFSET = {}", *thread::ALLOCATOR_OFFSET as i32);
trace!("thread.NATIVE_SP_LOC_OFFSET = {}", *thread::NATIVE_SP_LOC_OFFSET as i32);
trace!("thread.USER_TLS_OFFSET = {}", *thread::USER_TLS_OFFSET as i32);
trace!("thread.STACK_OFFSET = {}", *thread::STACK_OFFSET as i32);
trace!("thread.EXCEPTION_OBJ_OFFSET = {}", *thread::EXCEPTION_OBJ_OFFSET as i32);
// get thread local
let tl = self.emit_get_threadlocal(
Some(node),
......@@ -2098,6 +2104,11 @@ impl<'a> InstructionSelection {
}
Instruction_::CommonInst_SetThreadLocal(op) => {
trace!("instsel on SETTHREADLOCAL");
trace!("thread.ALLOCATOR_OFFSET = {}", *thread::ALLOCATOR_OFFSET as i32);
trace!("thread.NATIVE_SP_LOC_OFFSET = {}", *thread::NATIVE_SP_LOC_OFFSET as i32);
trace!("thread.USER_TLS_OFFSET = {}", *thread::USER_TLS_OFFSET as i32);
trace!("thread.STACK_OFFSET = {}", *thread::STACK_OFFSET as i32);
trace!("thread.EXCEPTION_OBJ_OFFSET = {}", *thread::EXCEPTION_OBJ_OFFSET as i32);
let ref ops = inst.ops;
let ref op = ops[op];
......
......@@ -144,11 +144,14 @@ pub trait CompilerPass {
}
pub fn tune_file_name(fname: Arc<String>) -> Arc<String> {
trace!("tune_file_name.input {}", fname);
let ln = fname.len();
if ln < 200 {
return fname
}
let res = String::from(fname.get(0..100).unwrap()) + fname.get((ln-100)..ln).unwrap();
trace!("tune_file_name.output {}", res);
return Arc::from(res)
}
......@@ -20,7 +20,7 @@ use std::*;
use utils::Address;
use utils::ByteSize;
const TRACE_ALLOC: bool = true;
const TRACE_ALLOC: bool = false;
#[repr(C)]
pub struct ImmixAllocator {
......@@ -68,7 +68,7 @@ impl Allocator for ImmixAllocator {
#[inline(always)]
fn alloc(&mut self, size: usize, align: usize) -> Address {
trace!("immix_mutator::alloc({}, {}, {});", &self, size, align);
// trace!("immix_mutator::alloc({}, {}, {});", &self, size, align);
// this part of code will slow down allocation
let align = objectmodel::check_alignment(align);
// end
......@@ -129,7 +129,7 @@ impl ImmixAllocator {
#[inline(never)]
pub fn alloc_slow(&mut self, size: usize, align: usize) -> Address {
trace!("immix_mutator::alloc_slow({}, {}, {});", &self, size, align);
// trace!("immix_mutator::alloc_slow({}, {}, {});", &self, size, align);
if size > BYTES_IN_LINE {
trace_if!(TRACE_ALLOC, "Mutator: overflow alloc()");
self.overflow_alloc(size, align)
......@@ -152,7 +152,7 @@ impl ImmixAllocator {
}
pub fn overflow_alloc(&mut self, size: usize, align: usize) -> Address {
trace!("immix_mutator::overflow_alloc(self, {}, {});", size, align);
// trace!("immix_mutator::overflow_alloc(self, {}, {});", size, align);
let start = self.large_cursor.align_up(align);
let end = start + size;
......@@ -173,7 +173,7 @@ impl ImmixAllocator {
#[inline(always)]
pub fn init_object<T>(&mut self, addr: Address, encode: T) {
trace!("init_object({}, _)", addr);
// trace!("init_object({}, _)", addr);
let map_slot = ImmixSpace::get_type_byte_slot_static(addr);
unsafe {
map_slot.store(encode);
......@@ -185,12 +185,12 @@ impl ImmixAllocator {
size: usize,
align: usize
) -> Address {
trace!(
"immix_mutator::try_alloc_from_local({}, {}, {});",
&self,
size,
align
);
// trace!(
// "immix_mutator::try_alloc_from_local({}, {}, {});",
// &self,
// size,
// align
// );
if self.line < LINES_IN_BLOCK {
let opt_next_available_line = {
let cur_line = self.line;
......@@ -239,14 +239,14 @@ impl ImmixAllocator {
align: usize,
request_large: bool
) -> Address {
trace!(
"immix_mutator::alloc_from_global({}, {}, {}, {});",
&self,
size,
align,
request_large
);
trace!("Mutator: slowpath: alloc_from_global()");
// trace!(
// "immix_mutator::alloc_from_global({}, {}, {}, {});",
// &self,
// size,
// align,
// request_large
// );
// trace!("Mutator: slowpath: alloc_from_global()");
self.return_block(request_large);
loop {
......@@ -267,10 +267,10 @@ impl ImmixAllocator {
self.large_limit = b.mem_start() + BYTES_IN_BLOCK;
self.large_block = Some(b);
trace!(
"Mutator: slowpath: new large_block starting from 0x{:x}",
self.large_cursor
);
// trace!(
// "Mutator: slowpath: new large_block starting from 0x{:x}",
// self.large_cursor
// );
return self.alloc(size, align);
} else {
......@@ -279,10 +279,10 @@ impl ImmixAllocator {
self.line = 0;
self.block = Some(b);
trace!(
"Mutator: slowpath: new block starting from 0x{:x}",
self.cursor
);
// trace!(
// "Mutator: slowpath: new block starting from 0x{:x}",
// self.cursor
// );
return self.alloc(size, align);
}
......@@ -295,7 +295,7 @@ impl ImmixAllocator {
}
fn return_block(&mut self, request_large: bool) {
trace!("immix_mutator::return_block(self, {});", request_large);
// trace!("immix_mutator::return_block(self, {});", request_large);
if request_large {
if self.large_block.is_some() {
trace!(
......
......@@ -301,7 +301,7 @@ pub extern "C" fn remove_root(obj: ObjectReference) {
/// pins an object so that it will be moved or reclaimed
#[no_mangle]
pub extern "C" fn muentry_pin_object(obj: ObjectReference) -> Address {
trace!("gc::src::lib::muentry_pin_object");
// trace!("gc::src::lib::muentry_pin_object");
add_to_root(obj);
obj.to_address()
}
......@@ -309,7 +309,7 @@ pub extern "C" fn muentry_pin_object(obj: ObjectReference) -> Address {
/// unpins an object so that it can be freely moved/reclaimed as normal objects
#[no_mangle]
pub extern "C" fn muentry_unpin_object(obj: Address) {
trace!("gc::src::lib::muentry_unpin_object");
// trace!("gc::src::lib::muentry_unpin_object");
remove_root(unsafe { obj.to_object_reference() });
}
......@@ -341,7 +341,7 @@ pub extern "C" fn muentry_alloc_tiny(
align: usize
) -> ObjectReference {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_tiny({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_tiny({}, {})", size, align);
unsafe { m.tiny.alloc(size, align).to_object_reference() }
}
......@@ -352,7 +352,7 @@ pub extern "C" fn muentry_alloc_normal(
align: usize
) -> ObjectReference {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_normal({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_normal({}, {})", size, align);
let res = m.normal.alloc(size, align);
m.normal.post_alloc(res, size);
unsafe { res.to_object_reference() }
......@@ -367,7 +367,7 @@ pub extern "C" fn muentry_alloc_tiny_slow(
align: usize
) -> Address {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_tiny_slow({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_tiny_slow({}, {})", size, align);
m.tiny.alloc_slow(size, align)
}
......@@ -380,7 +380,7 @@ pub extern "C" fn muentry_alloc_normal_slow(
align: usize
) -> Address {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_normal_slow({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_normal_slow({}, {})", size, align);
let res = m.normal.alloc_slow(size, align);
m.normal.post_alloc(res, size);
res
......@@ -395,7 +395,7 @@ pub extern "C" fn muentry_alloc_large(
align: usize
) -> ObjectReference {
let m = mutator_ref(mutator);
trace!("gc::src::lib::muentry_alloc_large({}, {})", size, align);
// trace!("gc::src::lib::muentry_alloc_large({}, {})", size, align);
let res = m.lo.alloc(size, align);
unsafe { res.to_object_reference() }
}
......@@ -407,7 +407,7 @@ pub extern "C" fn muentry_init_tiny_object(
obj: ObjectReference,
encode: TinyObjectEncode
) {
trace!("gc::src::lib::muentry_init_tiny_object");
// trace!("gc::src::lib::muentry_init_tiny_object");
unsafe { &mut *mutator }
.tiny
.init_object(obj.to_address(), encode);
......@@ -420,7 +420,7 @@ pub extern "C" fn muentry_init_small_object(
obj: ObjectReference,
encode: SmallObjectEncode
) {
trace!("gc::src::lib::muentry_init_small_object");
// trace!("gc::src::lib::muentry_init_small_object");
unsafe { &mut *mutator }
.normal
.init_object(obj.to_address(), encode);
......@@ -433,7 +433,7 @@ pub extern "C" fn muentry_init_medium_object(
obj: ObjectReference,
encode: MediumObjectEncode
) {
trace!("gc::src::lib::muentry_init_medium_object");
// trace!("gc::src::lib::muentry_init_medium_object");
unsafe { &mut *mutator }
.normal
.init_object(obj.to_address(), encode);
......@@ -445,7 +445,7 @@ pub extern "C" fn muentry_init_large_object(
obj: ObjectReference,
encode: LargeObjectEncode
) {
trace!("gc::src::lib::muentry_init_large_object");
// trace!("gc::src::lib::muentry_init_large_object");
unsafe { &mut *mutator }
.lo
.init_object(obj.to_address(), encode);
......
//#![feature(alloc, heap_api)]
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
......@@ -45,3 +46,5 @@ pub mod compiler;
pub mod linkutils;
pub mod runtime;
pub mod vm;
mod log_settings;
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A place to define constants to enable/disable logging functions
pub const RTRPY_HELPERS_TRACE: bool = false;
pub const RTRPY_HELPERS_INFO: bool = true;
pub const MM_RTMU_TRACE: bool = true;
pub const MM_RTMU_INFO: bool = true;
pub const MM_MU_TRACE: bool = false;
pub const MM_MU_DEBUG: bool = false;
pub const MM_MU_INFO: bool = false;
......@@ -43,51 +43,86 @@ impl sys_rtmu_futex {
}
pub fn lock(&mut self, timeout_ns: u64) {
let res = sys_futex_lock_slowpath(
let tid = os_gettid() as u32;
let res = match self.mem.compare_exchange(
0 as u32,
tid,
Ordering::SeqCst,
Ordering::SeqCst
) {
Ok(tid) => {
trace!("FUTEX.lock.fast_path.success");
0 as u32
}
Err(err_val) => sys_futex_lock_slowpath(
&mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
timeout_ns,
);
// let mut res: SysResult = -1;
//
// while res == -1 {
// if self.mem.fetch_add(1, Ordering::SeqCst) == 0 {
// trace!("FUTEX.lock({}) fastpath success",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex));
// break;
// } else {
// let fval = self.mem.load(Ordering::SeqCst);
// trace!(
// "FUTEX.lock({}) fastpath failed (res={}), \ncurrent fval: #{}\nerror#{}",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex),
// res, fval, unsafe { (*libc::__errno_location()) }
// );
// res = sys_futex_lock_slowpath(
// &mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
// timeout_ns,
// );
// }
// }
trace!("FUTEX.lock({}) returning({}), error_code=({})",
Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex),
timeout_ns
) as u32
};
// let res = sys_futex_lock_slowpath(
// &mut self.mem as *const AtomicU32 as *const u32 as *mut
// u32, timeout_ns
// );
// let mut res: SysResult = -1;
//
// while res == -1 {
// if self.mem.fetch_add(1, Ordering::SeqCst) == 0 {
// trace!("FUTEX.lock({}) fastpath success",
// Address::from_mut_ptr(self as *const
// sys_rtmu_futex as *mut sys_rtmu_futex));
// break; } else {
// let fval = self.mem.load(Ordering::SeqCst);
// trace!(
// "FUTEX.lock({}) fastpath failed (res={}),
// \ncurrent fval: #{}\nerror#{}",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut
// sys_rtmu_futex), res, fval, unsafe {
// (*libc::__errno_location()) } );
// res = sys_futex_lock_slowpath(
// &mut self.mem as *const AtomicU32 as *const u32 as
// *mut u32, timeout_ns,
// );
// }
// }
trace!(
"FUTEX.lock({}) returning({}), error_code=({})",
Address::from_mut_ptr(
self as *const sys_rtmu_futex as *mut sys_rtmu_futex
),
res,
unsafe { (*libc::__errno_location())
});
unsafe { (*libc::__errno_location()) }
);
}
pub fn unlock(&mut self, count: u64) {
let fval = self.mem.load(Ordering::SeqCst) as i32;
trace!("FUTEX.unlock({}).fval = {}",
Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex),
fval);
// if self.mem.fetch_sub(1, Ordering::SeqCst) <= 1 {
// trace!("FUTEX.unlock({}) fastpath success",
// Address::from_mut_ptr(self as *const sys_rtmu_futex as *mut sys_rtmu_futex));
// } else {
// let fval = self.mem.load(Ordering::SeqCst) as i32;
trace!(
"FUTEX.unlock({}).fval = {}",
Address::from_mut_ptr(
self as *const sys_rtmu_futex as *mut sys_rtmu_futex
),
self.mem.load(Ordering::SeqCst) as i32
);
// if self.mem.fetch_sub(1, Ordering::SeqCst) <= 1 {
// trace!("FUTEX.unlock({}) fastpath success",
// Address::from_mut_ptr(self as *const sys_rtmu_futex
// as *mut sys_rtmu_futex)); } else {
let tid = os_gettid() as u32;
match self.mem.compare_exchange(
tid,
0 as u32,
Ordering::SeqCst,
Ordering::SeqCst
) {
Ok(tid) => trace!("FUTEX.unlock.fast_path.success"),
Err(err_val) => {
sys_futex_unlock_slowpath(
&mut self.mem as *const AtomicU32 as *const u32 as *mut u32,
count,
count
);
// }
}
};
// }
}
}
......@@ -108,7 +143,7 @@ fn sys_futex_lock_slowpath(futex_ptr: *mut u32, timeout_ns: u64) -> SysResult {
0,
null_ts,
null_cl,
0,
0
)
},
_ => unsafe {
......@@ -119,19 +154,19 @@ fn sys_futex_lock_slowpath(futex_ptr: *mut u32, timeout_ns: u64) -> SysResult {
0,
&ns_to_time(timeout_ns) as *const libc::timespec,
null_cl,
0,
0
)
}
};
res as SysResult
// assert_eq!(
// res,
// 0,
// "FUTEX.lock slowpath failed with error code #{}",
// unsafe { (*libc::__errno_location()) }
// );
// trace!("FUTEX.lock slowpath aquired #{:#?}", futex_ptr);
// assert_eq!(
// res,
// 0,
// "FUTEX.lock slowpath failed with error code #{}",
// unsafe { (*libc::__errno_location()) }
// );
// trace!("FUTEX.lock slowpath aquired #{:#?}", futex_ptr);
}
fn sys_futex_unlock_slowpath(futex_ptr: *mut u32, count: u64) {
......@@ -146,20 +181,26 @@ fn sys_futex_unlock_slowpath(futex_ptr: *mut u32, count: u64) {
count,
null_ts,
null_cl,
0,
0
)
};
assert_ne!(
res, -1,
res,
-1,
"FUTEX.unlock({}) slowpath failed with res={} and error code #{}",
Address::from_mut_ptr(futex_ptr), res, unsafe { (*libc::__errno_location()) }
Address::from_mut_ptr(futex_ptr),
res,
unsafe { (*libc::__errno_location()) }
);
trace!(
"FUTEX.unlock({}) slowpath released",
Address::from_mut_ptr(futex_ptr)
);
trace!("FUTEX.unlock({}) slowpath released", Address::from_mut_ptr(futex_ptr));
}
fn ns_to_time(ns: u64) -> libc::timespec {
libc::timespec {
tv_sec: (ns / 1_000_000_000) as i64,
tv_nsec: (ns % 1_000_000_000) as i64,
tv_nsec: (ns % 1_000_000_000) as i64
}
}
......@@ -48,11 +48,18 @@ pub fn gen_object_encode(
}
};
debug!(
debug_if!(
log_settings::MM_MU_DEBUG,
"ENCODE: gen_object_encode: {:?}, size: {}",
backend_ty, size
backend_ty,
size
);
debug_if!(
log_settings::MM_MU_DEBUG,
"ENCODE: gc_ty: {}, full_gc_ty: {}",
gc_tyid,
full_tyid
);
debug!("ENCODE: gc_ty: {}, full_gc_ty: {}", gc_tyid, full_tyid);
gen_object_encode_internal(is_hybrid, gc_tyid, full_tyid, size, vm)
}
......@@ -212,7 +219,7 @@ pub fn allocate_fixed(
) -> Address {
let encode = gen_object_encode(&backendtype, backendtype.size, vm);
trace!("API: allocate fixed ty: {}", ty);
trace_if!(log_settings::MM_MU_TRACE, "API: allocate fixed ty: {}", ty);
check_allocator(backendtype.size, backendtype.alignment, encode)
.to_address()
}
......@@ -229,7 +236,7 @@ pub fn allocate_hybrid(
);
let encode = gen_object_encode(&backendtype, size, vm);
trace!("API: allocate hybrd ty: {}", ty);
trace_if!(log_settings::MM_MU_TRACE, "API: allocate hybrd ty: {}", ty);
check_allocator(size, backendtype.alignment, encode).to_address()
}
......@@ -252,7 +259,11 @@ pub fn allocate_global(
"global cell cannot be hybrid type"
);
trace!("API: allocate hybrd ty: {}", referenced_type);
trace_if!(
log_settings::MM_MU_TRACE,
"API: allocate hybrd ty: {}",
referenced_type
);
let addr = allocate_fixed(referenced_type, backendtype, vm);
ValueLocation::Direct(RegGroup::GPR, addr)
......
......@@ -16,6 +16,7 @@ use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::RwLock;
//use super::super::super::log_settings;
use super::mm_rtmu_std::*;
use super::*;
......@@ -40,23 +41,39 @@ impl fmt::Debug for EMM_ROOTS {
}
pub fn add_to_emm_roots(backstore_addr: Address) {
trace!("=== ADD {:?} TO_EMM_ROOTS ===", backstore_addr);
trace_if!(
log_settings::MM_RTMU_TRACE,
"=== ADD {:?} TO_EMM_ROOTS ===",
backstore_addr
);
{
let mut emm_roots = EMM_ROOTS.write().unwrap();
let raw_addr = backstore_addr.as_usize();
emm_roots.insert(raw_addr);
}
trace!("new EMM_ROOTS: {:#?}", EMM_ROOTS);
trace_if!(
log_settings::MM_RTMU_TRACE,
"new EMM_ROOTS: {:#?}",
EMM_ROOTS
);
}
pub fn delete_from_emm_roots(backstore_addr: Address) {
trace!("=== REMOVE {:?} FROM_EMM_ROOTS ===", backstore_addr);
trace_if!(
log_settings::MM_RTMU_TRACE,
"=== REMOVE {:?} FROM_EMM_ROOTS ===",
backstore_addr
);
{
let mut emm_roots = EMM_ROOTS.write().unwrap();
let raw_addr = backstore_addr.as_usize();
emm_roots.remove(&raw_addr);
}
trace!("new EMM_ROOTS: {:#?}", EMM_ROOTS);
trace_if!(
log_settings::MM_RTMU_TRACE,
"new EMM_ROOTS: {:#?}",
EMM_ROOTS
);
}
#[repr(C)]
......@@ -122,9 +139,9 @@ impl fmt::Debug for EMMBackStore {
impl EMMBackStore {
pub fn new(_size: usize, ty_id: Option<MuID>) -> EMMBackStore {
trace!("=== EMMBACKSTORE.NEW ===");
trace!("- size: {}", _size);
trace!("- type_id: {:?}", ty_id);
trace_if!(log_settings::MM_RTMU_TRACE, "=== EMMBACKSTORE.NEW ===");
trace_if!(log_settings::MM_RTMU_TRACE, "- size: {}", _size);
trace_if!(log_settings::MM_RTMU_TRACE, "- type_id: {:?}", ty_id);