WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 044bb53f authored by qinsoon's avatar qinsoon
Browse files

[wip] one gc test fails

parent 953861eb
......@@ -6,7 +6,6 @@ use heap::immix::ImmixLineMarkTable;
use heap::freelist::FreeListSpace;
use objectmodel;
use common::AddressMap;
use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
......@@ -342,13 +341,16 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
if cfg!(debug_assertions) {
// check if this object in within the heap, if it is an object
if !is_valid_object(obj.to_address(), immix_start, immix_end, alloc_map) {
use std::process;
println!("trying to trace an object that is not valid");
println!("address: 0x{:x}", obj);
println!("---");
println!("immix space: 0x{:x} - 0x{:x}", immix_start, immix_end);
println!("lo space: {}", *lo_space.read().unwrap());
panic!("invalid object during tracing");
println!("invalid object during tracing");
process::exit(101);
}
}
......@@ -401,17 +403,19 @@ pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<Obj
let edge = unsafe{field_addr.load::<ObjectReference>()};
if cfg!(debug_assertions) {
use std::process;
// check if this object in within the heap, if it is an object
if !is_valid_object(edge.to_address(), immix_start, immix_end, alloc_map) {
if !edge.to_address().is_zero() && !is_valid_object(edge.to_address(), immix_start, immix_end, alloc_map) {
println!("trying to follow an edge that is not a valid object");
println!("edge address: 0x{:x}", edge);
println!("edge address: 0x{:x} from 0x{:x}", edge, field_addr);
println!("base address: 0x{:x}", base);
println!("---");
objectmodel::print_object(base, immix_start, trace_map, alloc_map);
println!("---");
println!("immix space: 0x{:x} - 0x{:x}", immix_start, immix_end);
panic!("invalid object during tracing");
println!("invalid object during tracing");
process::exit(101);
}
}
......
......@@ -50,6 +50,14 @@ pub extern fn gc_stats() {
println!("{:?}", MY_GC.read().unwrap().as_ref().unwrap());
}
#[no_mangle]
pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<RwLock<FreeListSpace>>) {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
(space.immix_space.clone(), space.lo_space.clone())
}
#[no_mangle]
pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
// set this line to turn on certain level of debugging info
......@@ -86,7 +94,9 @@ pub extern fn new_mutator() -> Box<ImmixMutatorLocal> {
#[no_mangle]
#[allow(unused_variables)]
pub extern fn drop_mutator(mutator: Box<ImmixMutatorLocal>) {
pub extern fn drop_mutator(mut mutator: Box<ImmixMutatorLocal>) {
mutator.destroy();
// rust will reclaim the boxed mutator
}
......
......@@ -18,6 +18,7 @@ pub fn flip_mark_state() {
}
}
#[allow(unused_variables)]
pub fn print_object(obj: Address, space_start: Address, trace_map: *mut u8, alloc_map: *mut u8) {
let mut cursor = obj;
println!("OBJECT 0x{:x}", obj);
......@@ -29,31 +30,30 @@ pub fn print_object(obj: Address, space_start: Address, trace_map: *mut u8, allo
);
println!("0x{:x} | val: 0x{:x} | {}, hdr: {:b}",
println!("0x{:x} | val: 0x{:15x} | {}, hdr: {:b}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 0), hdr);
cursor = cursor.plus(POINTER_SIZE);
println!("0x{:x} | val: 0x{:x} | {}",
println!("0x{:x} | val: 0x{:15x} | {}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 1));
cursor = cursor.plus(POINTER_SIZE);
println!("0x{:x} | val: 0x{:x} | {}",
println!("0x{:x} | val: 0x{:15x} | {}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 2));
cursor = cursor.plus(POINTER_SIZE);
println!("0x{:x} | val: 0x{:x} | {}",
println!("0x{:x} | val: 0x{:15x} | {}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 3));
cursor = cursor.plus(POINTER_SIZE);
println!("0x{:x} | val: 0x{:x} | {}",
println!("0x{:x} | val: 0x{:15x} | {}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 4));
cursor = cursor.plus(POINTER_SIZE);
println!("0x{:x} | val: 0x{:x} | {} {}",
println!("0x{:x} | val: 0x{:15x} | {} {}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 5),
{
if !short_encode {
"MORE"
"MORE..."
} else {
""
}
......
......@@ -259,11 +259,6 @@ impl MuThread {
unsafe {set_thread_local(muthread)};
let addr = unsafe {get_thread_local()};
unsafe {get_thread_local()};
unsafe {get_thread_local()};
unsafe {get_thread_local()};
unsafe {get_thread_local()};
unsafe {get_thread_local()};
let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
debug!("new sp: 0x{:x}", new_sp);
......
......@@ -9,24 +9,12 @@ use self::mu::vm::*;
use std::sync::RwLock;
use std::collections::HashMap;
#[test]
#[allow(unused_variables)]
fn test_factorial() {
let vm = factorial();
}
#[test]
#[allow(unused_variables)]
fn test_sum() {
let vm = sum();
}
#[test]
#[allow(unused_variables)]
fn test_global_access() {
let vm = global_access();
}
pub fn sum() -> VM {
let vm = VM::new();
......@@ -183,6 +171,12 @@ pub fn sum() -> VM {
vm
}
#[test]
#[allow(unused_variables)]
fn test_factorial() {
let vm = factorial();
}
#[allow(unused_variables)]
pub fn factorial() -> VM {
let vm = VM::new();
......@@ -365,6 +359,12 @@ pub fn factorial() -> VM {
vm
}
#[test]
#[allow(unused_variables)]
fn test_global_access() {
let vm = global_access();
}
#[allow(unused_variables)]
pub fn global_access() -> VM {
let vm = VM::new();
......@@ -388,7 +388,7 @@ pub fn global_access() -> VM {
vm.set_name(global_a.as_entity(), "a".to_string());
// .funcsig @global_access_sig = () -> ()
let func_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_int64.clone()], vec![]);
let func_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![]);
vm.set_name(func_sig.as_entity(), "global_access_sig".to_string());
// .funcdecl @global_access <@global_access_sig>
......@@ -457,3 +457,117 @@ pub fn global_access() -> VM {
vm
}
#[test]
#[allow(unused_variables)]
fn test_alloc_new() {
let vm = alloc_new();
}
#[allow(unused_variables)]
pub fn alloc_new() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
// .typedef @iref_int64 = iref<int<64>>
let type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(type_def_int64.as_entity(), "int64".to_string());
let type_def_iref_int64 = vm.declare_type(vm.next_id(), MuType_::iref(type_def_int64.clone()));
vm.set_name(type_def_iref_int64.as_entity(), "iref_int64".to_string());
let type_def_ref_int64 = vm.declare_type(vm.next_id(), MuType_::muref(type_def_int64.clone()));
vm.set_name(type_def_ref_int64.as_entity(), "ref_int64".to_string());
// .const @int_64_0 <@int_64> = 0
// .const @int_64_1 <@int_64> = 1
let const_def_int64_0 = vm.declare_const(vm.next_id(), type_def_int64.clone(), Constant::Int(0));
vm.set_name(const_def_int64_0.as_entity(), "int64_0".to_string());
let const_def_int64_1 = vm.declare_const(vm.next_id(), type_def_int64.clone(), Constant::Int(1));
vm.set_name(const_def_int64_1.as_entity(), "int64_1".to_string());
// .funcsig @alloc_new_sig = () -> ()
let func_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![]);
vm.set_name(func_sig.as_entity(), "alloc_new_sig".to_string());
// .funcdecl @alloc_new <@alloc_new_sig>
let func = MuFunction::new(vm.next_id(), func_sig.clone());
vm.set_name(func.as_entity(), "alloc_new".to_string());
let func_id = func.id();
vm.declare_func(func);
// .funcdef @alloc VERSION @v1 <@alloc_new_sig>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, func_sig.clone());
// %blk_0():
let mut blk_0 = Block::new(vm.next_id());
vm.set_name(blk_0.as_entity(), "blk_0".to_string());
// %a = NEW <@int64_t>
let blk_0_a = func_ver.new_ssa(vm.next_id(), type_def_ref_int64.clone());
vm.set_name(blk_0_a.as_entity(), "blk_0_a".to_string());
let blk_0_inst0 = func_ver.new_inst(vm.next_id(), Instruction{
value: Some(vec![blk_0_a.clone_value()]),
ops: RwLock::new(vec![]),
v: Instruction_::New(type_def_int64.clone())
});
// STORE <@int_64> @a @int_64_1
let blk_0_const_int64_1 = func_ver.new_constant(vm.next_id(), const_def_int64_1.clone());
let blk_0_inst0 = func_ver.new_inst(vm.next_id(), Instruction{
value: None,
ops: RwLock::new(vec![blk_0_a.clone(), blk_0_const_int64_1.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0,
value: 1
}
});
// %a_iref = GETIREF <@int_64> @a
let blk_0_a_iref = func_ver.new_ssa(vm.next_id(), type_def_iref_int64.clone());
vm.set_name(blk_0_a.as_entity(), "blk_0_a_iref".to_string());
let blk_0_inst1 = func_ver.new_inst(vm.next_id(), Instruction{
value: Some(vec![blk_0_a_iref.clone_value()]),
ops: RwLock::new(vec![blk_0_a.clone()]),
v: Instruction_::GetIRef(0)
});
// %x = LOAD <@int_64> @a_iref
let blk_0_x = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_0_x.as_entity(), "blk_0_x".to_string());
let blk_0_inst2 = func_ver.new_inst(vm.next_id(), Instruction{
value: Some(vec![blk_0_x.clone_value()]),
ops: RwLock::new(vec![blk_0_a_iref.clone()]),
v: Instruction_::Load{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0
}
});
let blk_0_term = func_ver.new_inst(vm.next_id(), Instruction{
value: None,
ops: RwLock::new(vec![blk_0_x.clone()]),
v: Instruction_::Return(vec![0])
});
let blk_0_content = BlockContent {
args: vec![],
body: vec![blk_0_inst0, blk_0_inst1, blk_0_inst2, blk_0_term],
keepalives: None
};
blk_0.content = Some(blk_0_content);
func_ver.define(FunctionContent{
entry: blk_0.id(),
blocks: {
let mut ret = HashMap::new();
ret.insert(blk_0.id(), blk_0);
ret
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
use mu::runtime::mm;
use mu::runtime::mm::heap;
use mu::runtime::mm::heap::immix::ImmixMutatorLocal;
use mu::runtime::mm::heap::immix::ImmixSpace;
use mu::runtime::mm::heap::freelist::FreeListSpace;
use mu::runtime::mm::objectmodel;
use mu::utils::Address;
use std::sync::RwLock;
use std::sync::Arc;
use std::sync::atomic::Ordering;
const OBJECT_SIZE : usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD : usize = 500000;
const WORK_LOAD : usize = 250000;
const IMMIX_SPACE_SIZE : usize = 500 << 20;
const LO_SPACE_SIZE : usize = 500 << 20;
......@@ -34,11 +28,14 @@ fn test_exhaust_alloc() {
let res = mutator.alloc(OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0011);
}
mm::drop_mutator(mutator);
}
const LARGE_OBJECT_SIZE : usize = 256;
#[test]
#[allow(unused_variables)]
fn test_exhaust_alloc_large() {
mm::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = mm::new_mutator();
......@@ -49,25 +46,14 @@ fn test_exhaust_alloc_large() {
let res = mm::alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
}
mm::drop_mutator(mutator);
}
#[test]
fn test_alloc_mark() {
heap::IMMIX_SPACE_SIZE.store(IMMIX_SPACE_SIZE, Ordering::SeqCst);
heap::LO_SPACE_SIZE.store(LO_SPACE_SIZE, Ordering::SeqCst);
let shared_space : Arc<ImmixSpace> = {
let space : ImmixSpace = ImmixSpace::new(heap::IMMIX_SPACE_SIZE.load(Ordering::SeqCst));
Arc::new(space)
};
let lo_space : Arc<RwLock<FreeListSpace>> = {
let space : FreeListSpace = FreeListSpace::new(heap::LO_SPACE_SIZE.load(Ordering::SeqCst));
Arc::new(RwLock::new(space))
};
heap::gc::init(shared_space.clone(), lo_space.clone());
let mut mutator = ImmixMutatorLocal::new(shared_space.clone());
mm::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = mm::new_mutator();
println!("Trying to allocate 1 object of (size {}, align {}). ", OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
......@@ -82,6 +68,8 @@ fn test_alloc_mark() {
objs.push(unsafe {res.to_object_reference()});
}
let (shared_space, _) = mm::get_spaces();
println!("Start marking");
let mark_state = objectmodel::MARK_STATE.load(Ordering::SeqCst) as u8;
......@@ -100,7 +88,9 @@ fn test_alloc_mark() {
if obj.to_address() >= space_start && obj.to_address() < space_end {
line_mark_table.mark_line_live2(space_start, obj.to_address());
}
}
}
mm::drop_mutator(mutator);
}
#[allow(dead_code)]
......@@ -114,21 +104,9 @@ struct Node<'a> {
#[test]
fn test_alloc_trace() {
heap::IMMIX_SPACE_SIZE.store(IMMIX_SPACE_SIZE, Ordering::SeqCst);
heap::LO_SPACE_SIZE.store(LO_SPACE_SIZE, Ordering::SeqCst);
let shared_space : Arc<ImmixSpace> = {
let space : ImmixSpace = ImmixSpace::new(heap::IMMIX_SPACE_SIZE.load(Ordering::SeqCst));
Arc::new(space)
};
let lo_space : Arc<RwLock<FreeListSpace>> = {
let space : FreeListSpace = FreeListSpace::new(heap::LO_SPACE_SIZE.load(Ordering::SeqCst));
Arc::new(RwLock::new(space))
};
heap::gc::init(shared_space.clone(), lo_space.clone());
let mut mutator = ImmixMutatorLocal::new(shared_space.clone());
mm::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = mm::new_mutator();
let (shared_space, lo_space) = mm::get_spaces();
println!("Trying to allocate 1 object of (size {}, align {}). ", OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
......@@ -153,4 +131,6 @@ fn test_alloc_trace() {
let mut roots = vec![unsafe {root.to_object_reference()}];
heap::gc::start_trace(&mut roots, shared_space, lo_space);
mm::drop_mutator(mutator);
}
\ No newline at end of file
......@@ -5,7 +5,7 @@ use std::sync::atomic::Ordering;
const OBJECT_SIZE : usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD : usize = 50000000;
const WORK_LOAD : usize = 10000000;
const IMMIX_SPACE_SIZE : usize = 40 << 20;
const LO_SPACE_SIZE : usize = 40 << 20;
......@@ -28,4 +28,6 @@ fn test_gc_no_alive() {
let res = mutator.alloc(OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0011);
}
mm::drop_mutator(mutator);
}
\ No newline at end of file
......@@ -114,8 +114,7 @@ fn alloc(mutator: &mut ImmixMutatorLocal) -> *mut Node {
}
#[test]
fn start() {
simple_logger::init_with_level(log::LogLevel::Trace).ok();
fn start() {
unsafe {heap::gc::set_low_water_mark();}
mm::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
......@@ -165,5 +164,7 @@ fn start() {
PrintDiagnostics();
println!("Completed in {} msec", tElapsed);
println!("Finished with {} collections", heap::gc::GC_COUNT.load(Ordering::SeqCst));
println!("Finished with {} collections", heap::gc::GC_COUNT.load(Ordering::SeqCst));
mm::drop_mutator(mutator);
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment