WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 8ce5c1ce authored by qinsoon's avatar qinsoon
Browse files

test alloc and trace hybrid into immix space

parent 18b6addf
......@@ -291,7 +291,7 @@ fn gc() {
pub const PUSH_BACK_THRESHOLD: usize = 50;
pub static GC_THREADS: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
const TRACE_GC: bool = false;
const TRACE_GC: bool = true;
#[allow(unused_variables)]
#[inline(never)]
......@@ -399,27 +399,45 @@ pub fn steal_trace_object(
let (type_encode, type_size): (&TypeEncode, ByteOffset) = {
let type_slot = ImmixSpace::get_type_byte_slot_static(obj.to_address());
let encode = unsafe { type_slot.load::<MediumObjectEncode>() };
let (type_id, type_size) = if encode.is_medium() {
(encode.type_id(), encode.size())
} else {
let small_encode: &SmallObjectEncode = unsafe { transmute(&encode) };
(small_encode.type_id(), encode.size())
let (type_id, type_size) = if small_encode.is_small() {
trace_if!(TRACE_GC, " trace small obj: {} ({:?})", obj, small_encode);
trace_if!(
TRACE_GC,
" id {}, size {}",
small_encode.type_id(),
small_encode.size()
);
(small_encode.type_id(), small_encode.size())
} else {
trace_if!(TRACE_GC, " trace medium obj: {} ({:?})", obj, encode);
trace_if!(
TRACE_GC,
" id {}, size {}",
encode.type_id(),
encode.size()
);
(encode.type_id(), encode.size())
};
(&GlobalTypeTable::table()[type_id], type_size as ByteOffset)
};
let mut offset: ByteOffset = 0;
trace_if!(TRACE_GC, " -fix part-");
for i in 0..type_encode.fix_len() {
trace_word(type_encode.fix_ty(i), obj, offset, local_queue, job_sender);
offset += POINTER_SIZE as ByteOffset;
}
// for variable part
trace_if!(TRACE_GC, " -var part-");
while offset < type_size {
for i in 0..type_encode.var_len() {
trace_word(type_encode.var_ty(i), obj, offset, local_queue, job_sender);
offset += POINTER_SIZE as ByteOffset;
}
}
trace_if!(TRACE_GC, " -done-");
}
SpaceDescriptor::Freelist => unimplemented!()
}
......
......@@ -16,6 +16,7 @@ use common::ptr::*;
use heap::*;
use heap::immix::*;
use heap::gc;
use utils::*;
use utils::mem::memmap;
use utils::mem::memsec;
......@@ -232,6 +233,8 @@ impl ImmixSpace {
self.cur_growth_rate = n_blocks;
}
pub fn cleanup(&self) {}
#[inline(always)]
pub fn get(addr: Address) -> Raw<ImmixSpace> {
unsafe { Raw::from_addr(addr.mask(SPACE_LOWBITS_MASK)) }
......@@ -360,11 +363,18 @@ impl ImmixSpace {
unsafe {
memsec::memzero(&mut self.line_mark_table[0] as *mut LineMark, lines);
}
// erase gc bytes
let words = self.cur_size >> LOG_POINTER_SIZE;
unsafe {
memsec::memzero(&mut self.gc_byte_table[0] as *mut u8, words);
}
}
#[allow(unused_variables)]
#[allow(unused_assignments)]
pub fn sweep(&mut self) {
debug!("=== {:?} Sweep ===", self.desc);
debug_assert_eq!(
self.n_used_blocks() + self.n_usable_blocks(),
self.cur_blocks
......@@ -420,7 +430,6 @@ impl ImmixSpace {
}
if cfg!(debug_assertions) {
debug!("=== {:?} GC ===", self.desc);
debug!(
"free lines = {} of {} total ({} blocks)",
free_lines,
......@@ -449,6 +458,8 @@ impl ImmixSpace {
self.n_used_blocks() + self.n_usable_blocks(),
self.cur_blocks
);
trace!("=======================");
}
fn trace_details(&self) {
......
......@@ -202,7 +202,14 @@ pub extern "C" fn gc_init(config: GCConfig) {
/// destroys current GC instance
#[no_mangle]
pub extern "C" fn gc_destroy() {
*MY_GC.write().unwrap() = None;
objectmodel::cleanup();
let mut gc_lock = MY_GC.write().unwrap();
{
let gc = gc_lock.as_ref().unwrap();
gc.immix_tiny.cleanup();
gc.immix_normal.cleanup();
}
*gc_lock = None;
}
/// creates a mutator
......
......@@ -25,7 +25,17 @@ mod header;
pub static INIT_MARK_STATE: usize = 1;
static MARK_STATE: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub fn init() {}
#[cfg(feature = "use-sidemap")]
pub fn init() {
use objectmodel::sidemap::*;
GlobalTypeTable::init();
}
#[cfg(feature = "use-sidemap")]
pub fn cleanup() {
use objectmodel::sidemap::*;
GlobalTypeTable::cleanup();
}
pub fn flip_mark_state() {
let mark_state = MARK_STATE.load(atomic::Ordering::SeqCst);
......
......@@ -96,6 +96,15 @@ impl GlobalTypeTable {
trace!("Global Type Table initialization done");
}
pub fn cleanup() {
{
let mut mmap_lock = GTT_MMAP.lock().unwrap();
*mmap_lock = None;
}
GLOBAL_TYPE_TABLE_PTR.store(0, Ordering::Relaxed);
GLOBAL_TYPE_TABLE_META.store(0, Ordering::Relaxed);
}
#[inline(always)]
fn table_meta() -> &'static mut GlobalTypeTable {
unsafe { mem::transmute(GLOBAL_TYPE_TABLE_META.load(Ordering::Relaxed)) }
......
......@@ -17,6 +17,7 @@ use objectmodel::sidemap::type_encode::WordType;
use utils::*;
use std::mem::transmute;
use std::fmt;
pub const MAX_TINY_OBJECT: ByteSize = 32;
pub const MAX_SMALL_OBJECT: ByteSize = 64;
......@@ -30,7 +31,7 @@ pub const MAX_MEDIUM_OBJECT: ByteSize = 2048;
/// u, 1 bit - unused
/// ri, 2 bits - ref encode for ith word
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone)]
pub struct TinyObjectEncode {
b: u8
}
......@@ -51,11 +52,17 @@ impl TinyObjectEncode {
}
#[inline(always)]
pub fn field(self, i: usize) -> WordType {
let f = self.b & (0b11u8 << (i << 1));
let f = (self.b >> (i << 1)) & 0b11u8;
unsafe { transmute(f) }
}
}
impl fmt::Debug for TinyObjectEncode {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "TinyObjectEncode ({:08b})", self.b)
}
}
#[cfg(test)]
mod tiny_object_encoding {
use super::*;
......@@ -94,7 +101,7 @@ mod tiny_object_encoding {
/// sz, 2 bits - size encode (00: 32, 01:40, 10: 48, 11: 56)
/// type_id, 13 bits - type id
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone)]
pub struct SmallObjectEncode {
w: u16
}
......@@ -102,6 +109,10 @@ pub struct SmallObjectEncode {
pub const SMALL_ID_WIDTH: usize = 13;
impl SmallObjectEncode {
#[inline(always)]
pub fn new(w: u16) -> SmallObjectEncode {
SmallObjectEncode { w }
}
#[inline(always)]
pub fn is_small(self) -> bool {
(self.w >> 15) == 1
......@@ -115,7 +126,13 @@ impl SmallObjectEncode {
#[inline(always)]
pub fn type_id(self) -> TypeID {
debug_assert!(self.is_small());
(self.w & (1u16 << (SMALL_ID_WIDTH + 1) - 1)) as usize
(self.w & ((1u16 << SMALL_ID_WIDTH) - 1)) as usize
}
}
impl fmt::Debug for SmallObjectEncode {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "SmallObjectEncode ({:016b})", self.w)
}
}
......@@ -169,12 +186,16 @@ mod small_object_encoding {
/// type_id, 23 bits - type id
/// size , 8 bits - size encode (sz -> 64 + sz * 8)
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone)]
pub struct MediumObjectEncode {
d: u32
}
impl MediumObjectEncode {
#[inline(always)]
pub fn new(d: u32) -> MediumObjectEncode {
MediumObjectEncode { d }
}
#[inline(always)]
pub fn is_medium(self) -> bool {
(self.d >> 31) == 0
......@@ -192,6 +213,12 @@ impl MediumObjectEncode {
}
}
impl fmt::Debug for MediumObjectEncode {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "MediumObjectEncode ({:032b})", self.d)
}
}
#[cfg(test)]
mod medium_object_encoding {
use super::*;
......
......@@ -16,5 +16,6 @@
extern crate log;
mod test_immix_tiny;
mod test_immix_normal;
//mod test_gcbench;
//mod test_gc_linked_list;
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate mu_gc;
extern crate mu_utils;
extern crate log;
use self::mu_gc::*;
use self::mu_gc::heap;
use self::mu_gc::heap::*;
use self::mu_gc::heap::immix::*;
use self::mu_gc::heap::gc::*;
use self::mu_gc::objectmodel::sidemap::*;
use self::mu_utils::*;
use std::sync::atomic::Ordering;
#[allow(dead_code)]
pub const SPACIOUS_SPACE_SIZE: usize = 500 << 20; // 500mb
#[allow(dead_code)]
pub const LIMITED_SPACE_SIZE: usize = 20 << 20; // 20mb
#[allow(dead_code)]
pub const SMALL_SPACE_SIZE: usize = 1 << 19; // 512kb
#[test]
pub fn test_normal_immix_linkedlist() {
const IMMIX_SPACE_SIZE: usize = SMALL_SPACE_SIZE;
const OBJECT_SIZE: usize = 32; // small object
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = 4;
start_logging_trace();
gc_init(GCConfig {
immix_tiny_size: 0,
immix_normal_size: IMMIX_SPACE_SIZE,
lo_size: 0,
n_gcthreads: 1,
enable_gc: true
});
// insert type (id:0, 32 bytes, 1st field is a reference)
let small_header = {
let fix_ty = {
let mut ret = [0u8; 63];
ret[0] = 0b00000001u8;
ret
};
let id = GlobalTypeTable::insert_small_entry(TypeEncode::new(4, fix_ty, 0, [0; 63]));
println!("type id = {}", id);
let raw_encode = 0b1000_0000_0000_0000u16 | ((id & 0b0001_1111_1111_1111usize) as u16);
SmallObjectEncode::new(raw_encode)
};
println!("Small Header: {:?}", small_header);
let (_, normal_space) = get_spaces();
let mutator = new_mutator();
let mut last_obj: Address = unsafe { Address::zero() };
for _ in 0..WORK_LOAD {
yieldpoint(mutator);
let res = muentry_alloc_normal(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_small_object(mutator, res, small_header);
// the first field of this object points to the last object
unsafe {
res.to_address().store(last_obj);
}
last_obj = res.to_address();
}
// keep the linked list alive
let last_obj = unsafe { last_obj.to_object_reference() };
add_to_root(last_obj);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 1);
assert_eq!(normal_space.last_gc_used_lines, 2);
// another gc
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 2);
assert_eq!(normal_space.last_gc_used_lines, 2);
// set the linked list free, and do gc
remove_root(last_obj);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 3);
assert_eq!(normal_space.last_gc_used_lines, 0);
drop_mutator(mutator);
gc_destroy();
}
#[test]
pub fn test_normal_immix_hybrid() {
const IMMIX_SPACE_SIZE: usize = SMALL_SPACE_SIZE;
const OBJECT_SIZE: usize = 16; // small object
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = 4;
const HYBRID_LEN: usize = 4;
const HYBRID_FIX_SIZE: usize = 32;
const HYBRID_VAR_SIZE: usize = OBJECT_SIZE;
const HYBRID_SIZE: usize = HYBRID_FIX_SIZE + HYBRID_LEN * HYBRID_VAR_SIZE;
start_logging_trace();
gc_init(GCConfig {
immix_tiny_size: IMMIX_SPACE_SIZE,
immix_normal_size: IMMIX_SPACE_SIZE,
lo_size: 0,
n_gcthreads: 1,
enable_gc: true
});
let tiny_header = TinyObjectEncode::new(0);
let hybrid_header = {
let var_ty = {
let mut ret = [0u8; 63];
ret[0] = 0b01u8;
ret
};
let encode = TypeEncode::new(
(HYBRID_FIX_SIZE >> LOG_POINTER_SIZE) as u8,
[0; 63],
(HYBRID_VAR_SIZE >> LOG_POINTER_SIZE) as u8,
var_ty
);
let id = GlobalTypeTable::insert_large_entry(encode);
println!("hybrid type id = {}", id);
let raw_encode = ((id << 8) | 0b100usize) as u32;
MediumObjectEncode::new(raw_encode)
};
println!("Tiny header: {:?}", tiny_header);
println!("Hybrid header: {:?}", hybrid_header);
let (tiny_space, normal_space) = get_spaces();
let mutator = new_mutator();
// alloc 4 tiny object
let mut tiny_objects = vec![];
for _ in 0..WORK_LOAD {
yieldpoint(mutator);
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_tiny_object(mutator, res, tiny_header);
tiny_objects.push(res);
}
// alloc a hybrid
let hyb = muentry_alloc_normal(mutator, HYBRID_SIZE, OBJECT_ALIGN);
muentry_init_medium_object(mutator, hyb, hybrid_header);
// put references to tiny objects as var part of the hybrid
let hyb_base = hyb.to_address();
for i in 0..WORK_LOAD {
let offset: ByteOffset = (HYBRID_FIX_SIZE + (i * HYBRID_VAR_SIZE)) as isize;
unsafe { (hyb_base + offset).store(tiny_objects[i]) }
}
add_to_root(hyb);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 1);
assert_eq!(tiny_space.last_gc_used_lines, 2);
assert_eq!(normal_space.last_gc_used_lines, 2);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 2);
assert_eq!(tiny_space.last_gc_used_lines, 2);
assert_eq!(normal_space.last_gc_used_lines, 2);
remove_root(hyb);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 3);
assert_eq!(tiny_space.last_gc_used_lines, 0);
assert_eq!(normal_space.last_gc_used_lines, 0);
drop_mutator(mutator);
gc_destroy();
}
......@@ -26,23 +26,18 @@ use self::mu_utils::*;
use std::sync::atomic::Ordering;
#[allow(dead_code)]
const SPACIOUS_SPACE_SIZE: usize = 500 << 20; // 500mb
pub const SPACIOUS_SPACE_SIZE: usize = 500 << 20; // 500mb
#[allow(dead_code)]
const LIMITED_SPACE_SIZE: usize = 20 << 20; // 20mb
pub const LIMITED_SPACE_SIZE: usize = 20 << 20; // 20mb
#[allow(dead_code)]
const SMALL_SPACE_SIZE: usize = 1 << 19; // 512kb
#[allow(dead_code)]
const IMMIX_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
#[allow(dead_code)]
const LO_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
pub const SMALL_SPACE_SIZE: usize = 1 << 19; // 512kb
#[test]
pub fn test_tiny_immix_alloc() {
const IMMIX_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
const OBJECT_SIZE: usize = 16;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = BYTES_IN_BLOCK / OBJECT_SIZE;
// we should see the slow paths get invoked exactly twice
start_logging_trace();
gc_init(GCConfig {
......@@ -69,6 +64,7 @@ pub fn test_tiny_immix_alloc() {
#[test]
pub fn test_tiny_immix_gc() {
const IMMIX_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
const OBJECT_SIZE: usize = 16;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = BYTES_IN_BLOCK / OBJECT_SIZE;
......@@ -172,7 +168,9 @@ pub fn test_tiny_immix_linkedlist() {
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_tiny_object(mutator, res, header);
// the first field of this object points to the last object
unsafe { res.to_address().store(last_obj); }
unsafe {
res.to_address().store(last_obj);
}
last_obj = res.to_address();
}
......@@ -197,247 +195,3 @@ pub fn test_tiny_immix_linkedlist() {
drop_mutator(mutator);
gc_destroy();
}
//
//const LARGE_OBJECT_SIZE: usize = 256;
//
//#[test]
//#[allow(unused_variables)]
//fn test_exhaust_alloc_large() {
// gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
// let mut mutator = gc::new_mutator();
//
// start_logging_trace();
//
// for _ in 0..WORK_LOAD {
// mutator.yieldpoint();
//
// let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
// gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
// }
//
// mutator.destroy();
//}
//
//#[test]
//#[allow(unused_variables)]
//fn test_alloc_large_lo_trigger_gc() {
// const KEEP_N_ROOTS: usize = 1;
// let mut roots: usize = 0;
//
// gc::gc_init(SMALL_SPACE_SIZE, 4096 * 10, 8, true);
// let mut mutator = gc::new_mutator();
//
// start_logging_trace();
//
// for _ in 0..WORK_LOAD {
// mutator.yieldpoint();
//
// let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
// gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
//
// if roots < KEEP_N_ROOTS {
// gc::add_to_root(res);
// roots += 1;
// }
// }
//
// mutator.destroy();
//}
//
//#[test]
//#[allow(unused_variables)]
//fn test_alloc_large_both_trigger_gc() {
// gc::gc_init(SMALL_SPACE_SIZE, 4096 * 10, 8, true);
// let mut mutator = gc::new_mutator();
//
// start_logging_trace();
//
// // this will exhaust the lo space
// for _ in 0..10 {
// mutator.yieldpoint();
//
// let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
// gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
// }
//
// // this will trigger a gc, and allocate it in the collected space
// let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
// gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
//
// // this will trigger gcs for immix space
// for _ in 0..100000 {
// mutator.yieldpoint();
//
// let res = mutator.alloc(OBJECT_SIZE, OBJECT_ALIGN);
// mutator.init_object(res, FIXSIZE_REFx2_ENCODE);
// }
//
// mutator.destroy();
//}
//
//#[test]
//#[cfg(feature = "use-sidemap")]
//fn test_alloc_mark() {
// gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
// let mut mutator = gc::new_mutator();
//
// println!(
// "Trying to allocate 1 object of (size {}, align {}). ",
// OBJECT_SIZE,
// OBJECT_ALIGN
// );
// const ACTUAL_OBJECT_SIZE: usize = OBJECT_SIZE;
// println!(
// "Considering header size of {}, an object should be {}. ",
// 0,
// ACTUAL_OBJECT_SIZE
// );
//
// println!(
// "Trying to allocate {} objects, which will take roughly {} bytes",
// WORK_LOAD,
// WORK_LOAD * ACTUAL_OBJECT_SIZE
// );
// let mut objs = vec![];
// for _ in 0..WORK_LOAD {
// let res = mutator.alloc(ACTUAL_OBJECT_SIZE, OBJECT_ALIGN);
// mutator.init_object(res, FIXSIZE_REFx2_ENCODE);
//