WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 18b6addf authored by qinsoon's avatar qinsoon
Browse files

debug immix tiny

parent 10495f88
......@@ -42,8 +42,8 @@ const NO_CONTROLLER: isize = -1;
pub fn init(n_gcthreads: usize) {
CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
GC_THREADS.store(n_gcthreads, Ordering::SeqCst);
GC_COUNT.store(0, Ordering::SeqCst);
}
pub fn trigger_gc() {
......@@ -167,19 +167,9 @@ pub fn sync_barrier(mutator: &mut Mutator) {
// init roots
{
let mut roots = ROOTS.write().unwrap();
// clear existing roots (roots from last gc)
roots.clear();
// add explicity roots
let gc = MY_GC.read().unwrap();
for objref in gc.as_ref().unwrap().roots.iter() {
roots.push(*objref);
}
// scan its stack
let mut thread_roots = stack_scan();
roots.append(&mut thread_roots);
ROOTS.write().unwrap().append(&mut thread_roots);
}
// wait for all mutators to be blocked
......@@ -262,18 +252,19 @@ fn gc() {
trace!("GC starts");
// creates root deque
let mut roots: &mut Vec<ObjectReference> = &mut ROOTS.write().unwrap();
trace!("total roots: {}", roots.len());
// mark & trace
{
// creates root deque
let mut roots: &mut Vec<ObjectReference> = &mut ROOTS.write().unwrap();
let gccontext_guard = MY_GC.read().unwrap();
let gccontext = gccontext_guard.as_ref().unwrap();
for obj in gccontext.roots.iter() {
roots.push(*obj);
}
trace!("total roots: {}", roots.len());
start_trace(&mut roots);
}
......@@ -290,12 +281,18 @@ fn gc() {
}
objectmodel::flip_mark_state();
// clear existing roots (roots from last gc)
ROOTS.write().unwrap().clear();
trace!("GC finishes");
}
pub const PUSH_BACK_THRESHOLD: usize = 50;
pub static GC_THREADS: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
const TRACE_GC: bool = false;
#[allow(unused_variables)]
#[inline(never)]
pub fn start_trace(work_stack: &mut Vec<ObjectReference>) {
......@@ -310,7 +307,9 @@ pub fn start_trace(work_stack: &mut Vec<ObjectReference>) {
let (sender, receiver) = channel::<ObjectReference>();
let mut gc_threads = vec![];
for _ in 0..GC_THREADS.load(atomic::Ordering::SeqCst) {
let n_gcthreads = GC_THREADS.load(atomic::Ordering::SeqCst);
trace!("launching {} gc threads...", n_gcthreads);
for _ in 0..n_gcthreads {
let new_stealer = stealer.clone();
let new_sender = sender.clone();
let t = thread::spawn(move || { start_steal_trace(new_stealer, new_sender); });
......@@ -345,14 +344,18 @@ fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender: mpsc::Sender
loop {
let work = {
if !local_queue.is_empty() {
local_queue.pop().unwrap()
let ret = local_queue.pop().unwrap();
trace_if!(TRACE_GC, "got object {} from local queue", ret);
ret
} else {
let work = stealer.steal();
match work {
let ret = match work {
Steal::Empty => return,
Steal::Abort => continue,
Steal::Data(obj) => obj
}
};
trace_if!(TRACE_GC, "got object {} from global queue", ret);
ret
}
};
......@@ -368,24 +371,6 @@ pub fn steal_trace_object(
job_sender: &mpsc::Sender<ObjectReference>,
mark_state: u8
) {
// if cfg!(debug_assertions) {
// // check if this object in within the heap, if it is an object
// if !immix_space.is_valid_object(obj.to_address()) &&
// !lo_space.is_valid_object(obj.to_address())
// {
// use std::process;
//
// println!("trying to trace an object that is not valid");
// println!("address: 0x{:x}", obj);
// println!("---");
// println!("immix space: {}", immix_space);
// println!("lo space: {}", lo_space);
//
// println!("invalid object during tracing");
// process::exit(101);
// }
// }
match SpaceDescriptor::get(obj) {
SpaceDescriptor::ImmixTiny => {
// mark current object traced
......@@ -395,6 +380,7 @@ pub fn steal_trace_object(
ImmixSpace::get_type_byte_slot_static(obj.to_address()).load::<TinyObjectEncode>()
};
trace_if!(TRACE_GC, " trace tiny obj: {} ({:?})", obj, encode);
for i in 0..encode.n_fields() {
trace_word(
encode.field(i),
......@@ -448,12 +434,23 @@ fn trace_word(
local_queue: &mut Vec<ObjectReference>,
job_sender: &mpsc::Sender<ObjectReference>
) {
trace_if!(
TRACE_GC,
" follow field (offset: {}) of {} with type {:?}",
offset,
obj,
word_ty
);
match word_ty {
WordType::NonRef => {}
WordType::Ref => {
let field_addr = obj.to_address() + offset;
let edge = unsafe { field_addr.load::<ObjectReference>() };
if edge.to_address().is_zero() {
return;
}
match SpaceDescriptor::get(edge) {
SpaceDescriptor::ImmixTiny | SpaceDescriptor::ImmixNormal => {
if !immix::is_object_traced(edge) {
......
......@@ -152,7 +152,7 @@ impl ImmixSpace {
space.cur_blocks = 0;
trace!(" initialized cur_end/size/blocks");
space.total_blocks = BLOCKS_IN_SPACE;
space.total_blocks = space_size >> LOG_BYTES_IN_BLOCK;
unsafe {
// use ptr::write to avoid destruction of the old values
use std::ptr;
......@@ -319,7 +319,11 @@ impl ImmixSpace {
#[allow(unreachable_code)]
pub fn get_next_usable_block(&mut self) -> Option<Raw<ImmixBlock>> {
if TRACE_ALLOC {
self.trace_details();
debug!(
"{} blocks usable, {} blocks used",
self.n_usable_blocks(),
self.n_used_blocks()
);
}
let new_block = self.usable_blocks.lock().unwrap().pop_front();
match new_block {
......@@ -361,79 +365,90 @@ impl ImmixSpace {
#[allow(unused_variables)]
#[allow(unused_assignments)]
pub fn sweep(&mut self) {
debug_assert_eq!(
self.n_used_blocks() + self.n_usable_blocks(),
self.cur_blocks
);
// some statistics
let mut free_lines = 0;
let mut used_lines = 0;
let mut usable_blocks = 0;
let mut full_blocks = 0;
let mut used_blocks_lock = self.used_blocks.lock().unwrap();
let mut usable_blocks_lock = self.usable_blocks.lock().unwrap();
usable_blocks = usable_blocks_lock.len();
let mut live_blocks: LinkedList<Raw<ImmixBlock>> = LinkedList::new();
while !used_blocks_lock.is_empty() {
let block = used_blocks_lock.pop_front().unwrap();
let line_index = self.get_line_mark_index(block.mem_start());
let block_index = self.get_block_mark_index(block.mem_start());
let mut has_free_lines = false;
// find free lines in the block, and set their line mark as free
// (not zeroing the memory yet)
for i in line_index..(line_index + LINES_IN_BLOCK) {
if self.line_mark_table[i] != LineMark::Live &&
self.line_mark_table[i] != LineMark::ConservLive
{
has_free_lines = true;
self.line_mark_table[i] = LineMark::Free;
free_lines += 1;
} else {
used_lines += 1;
{
let mut used_blocks_lock = self.used_blocks.lock().unwrap();
let mut usable_blocks_lock = self.usable_blocks.lock().unwrap();
let mut all_blocks: LinkedList<Raw<ImmixBlock>> = {
let mut ret = LinkedList::new();
ret.append(&mut used_blocks_lock);
ret.append(&mut usable_blocks_lock);
ret
};
debug_assert_eq!(all_blocks.len(), self.cur_blocks);
while !all_blocks.is_empty() {
let block = all_blocks.pop_front().unwrap();
let line_index = self.get_line_mark_index(block.mem_start());
let block_index = self.get_block_mark_index(block.mem_start());
let mut has_free_lines = false;
// find free lines in the block, and set their line mark as free
// (not zeroing the memory yet)
for i in line_index..(line_index + LINES_IN_BLOCK) {
if self.line_mark_table[i] != LineMark::Live &&
self.line_mark_table[i] != LineMark::ConservLive
{
has_free_lines = true;
self.line_mark_table[i] = LineMark::Free;
free_lines += 1;
} else {
used_lines += 1;
}
}
}
if has_free_lines {
self.block_mark_table[block_index] = BlockMark::Usable;
usable_blocks += 1;
usable_blocks_lock.push_front(block);
} else {
self.block_mark_table[block_index] = BlockMark::Full;
full_blocks += 1;
live_blocks.push_front(block);
if has_free_lines {
trace!("Block {} is usable", block.addr());
self.block_mark_table[block_index] = BlockMark::Usable;
usable_blocks_lock.push_front(block);
} else {
trace!("Block {} is full", block.addr());
self.block_mark_table[block_index] = BlockMark::Full;
used_blocks_lock.push_front(block);
}
}
}
used_blocks_lock.append(&mut live_blocks);
if cfg!(debug_assertions) {
debug!("=== {:?} ===", self.desc);
debug!("=== {:?} GC ===", self.desc);
debug!(
"free lines = {} of {} total ({} blocks)",
free_lines,
self.total_blocks * LINES_IN_BLOCK,
self.total_blocks
self.cur_blocks * LINES_IN_BLOCK,
self.cur_blocks
);
debug!(
"used lines = {} of {} total ({} blocks)",
used_lines,
self.total_blocks * LINES_IN_BLOCK,
self.total_blocks
self.cur_blocks * LINES_IN_BLOCK,
self.cur_blocks
);
debug!("usable blocks = {}", usable_blocks);
debug!("full blocks = {}", full_blocks);
debug!("usable blocks = {}", self.n_usable_blocks());
debug!("full blocks = {}", self.n_used_blocks());
}
self.last_gc_free_lines = free_lines;
self.last_gc_used_lines = used_lines;
if full_blocks == self.total_blocks {
if self.n_used_blocks() == self.total_blocks && self.total_blocks != 0 {
println!("Out of memory in Immix Space");
process::exit(1);
}
debug_assert!(full_blocks + usable_blocks == self.cur_blocks);
debug_assert_eq!(
self.n_used_blocks() + self.n_usable_blocks(),
self.cur_blocks
);
}
fn trace_details(&self) {
......
......@@ -29,13 +29,6 @@ pub const IMMIX_SPACE_RATIO: f64 = 1.0 - LO_SPACE_RATIO;
pub const LO_SPACE_RATIO: f64 = 0.2;
pub const DEFAULT_HEAP_SIZE: usize = 500 << 20;
lazy_static! {
pub static ref IMMIX_SPACE_SIZE : AtomicUsize =
AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * IMMIX_SPACE_RATIO) as usize );
pub static ref LO_SPACE_SIZE : AtomicUsize =
AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * LO_SPACE_RATIO) as usize );
}
// preallocating 16 GB for space
pub const LOG_BYTES_PREALLOC_SPACE: usize = 34;
pub const BYTES_PREALLOC_SPACE: ByteSize = 1 << LOG_BYTES_PREALLOC_SPACE;
......
......@@ -87,9 +87,7 @@ use heap::*;
use heap::immix::BYTES_IN_LINE;
use heap::immix::ImmixSpace;
use heap::immix::ImmixAllocator;
use utils::LinkedHashSet;
use utils::Address;
use utils::ObjectReference;
use utils::*;
use objectmodel::sidemap::*;
use std::sync::Arc;
......@@ -133,57 +131,77 @@ pub use heap::Mutator;
//pub use heap::immix::CURSOR_OFFSET as ALLOCATOR_CURSOR_OFFSET;
/// offset to the immix allocator limit from its pointer
//pub use heap::immix::LIMIT_OFFSET as ALLOCATOR_LIMIT_OFFSET;
/// GC represents the context for the current running GC instance
struct GC {
immix_tiny: Raw<ImmixSpace>,
immix_normal: Raw<ImmixSpace>,
// lo: Arc<FreeListSpace>,
gc_types: Vec<Arc<GCType>>,
roots: LinkedHashSet<ObjectReference>
}
lazy_static! {
static ref MY_GC : RwLock<Option<GC>> = RwLock::new(None);
}
impl GC {
pub fn is_heap_object(&self, addr: Address) -> bool {
self.immix_tiny.addr_in_space(addr) || self.immix_normal.addr_in_space(addr)
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct GCConfig {
pub immix_tiny_size: ByteSize,
pub immix_normal_size: ByteSize,
pub lo_size: ByteSize,
pub n_gcthreads: usize,
pub enable_gc: bool
}
// the implementation of this GC will be changed dramatically in the future,
// but the exposed interface is likely to stay the same.
/// initializes the GC
#[no_mangle]
pub extern "C" fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize, enable_gc: bool) {
pub extern "C" fn gc_init(config: GCConfig) {
trace!("Initializing GC...");
// init object model - init this first, since spaces may use it
objectmodel::init();
// init space size
heap::IMMIX_SPACE_SIZE.store(immix_size, Ordering::SeqCst);
heap::LO_SPACE_SIZE.store(lo_size, Ordering::SeqCst);
// init spaces
trace!(" initializing tiny immix space...");
let immix_tiny = ImmixSpace::new(SpaceDescriptor::ImmixTiny, immix_size >> 1);
let immix_tiny = ImmixSpace::new(SpaceDescriptor::ImmixTiny, config.immix_tiny_size);
trace!(" initializing normal immix space...");
let immix_normal = ImmixSpace::new(SpaceDescriptor::ImmixNormal, immix_size >> 1);
let immix_normal = ImmixSpace::new(SpaceDescriptor::ImmixNormal, config.immix_normal_size);
// trace!(" initializing large object space...");
// let lo_space = Arc::new(FreeListSpace::new(lo_size));
heap::gc::init(n_gcthreads);
// init GC
heap::gc::init(config.n_gcthreads);
*MY_GC.write().unwrap() = Some(GC {
immix_tiny,
immix_normal,
gc_types: vec![],
roots: LinkedHashSet::new()
});
if enable_gc {
heap::gc::ENABLE_GC.store(true, Ordering::Relaxed);
} else {
heap::gc::ENABLE_GC.store(false, Ordering::Relaxed);
}
heap::gc::ENABLE_GC.store(config.enable_gc, Ordering::Relaxed);
info!(
"heap is {} bytes (immix: {} bytes, lo: {} bytes) . ",
immix_size + lo_size,
immix_size,
lo_size
"heap is {} bytes (immix_tiny: {} bytes, immix_normal: {} bytes) . ",
config.immix_tiny_size + config.immix_normal_size,
config.immix_tiny_size,
config.immix_normal_size
);
info!("{} gc threads", n_gcthreads);
if !enable_gc {
info!("{} gc threads", config.n_gcthreads);
if !config.enable_gc {
warn!("GC disabled (panic when a collection is triggered)");
}
}
/// destroys current GC instance
#[no_mangle]
pub extern "C" fn gc_destoy() {
pub extern "C" fn gc_destroy() {
*MY_GC.write().unwrap() = None;
}
......@@ -432,33 +450,8 @@ pub extern "C" fn persist_heap(roots: Vec<Address>) -> objectdump::HeapDump {
objectdump::HeapDump::from_roots(roots)
}
/// GC represents the context for the current running GC instance
struct GC {
immix_tiny: Raw<ImmixSpace>,
immix_normal: Raw<ImmixSpace>,
// lo: Arc<FreeListSpace>,
gc_types: Vec<Arc<GCType>>,
roots: LinkedHashSet<ObjectReference>
}
lazy_static! {
static ref MY_GC : RwLock<Option<GC>> = RwLock::new(None);
}
impl GC {
pub fn is_heap_object(&self, addr: Address) -> bool {
self.immix_tiny.addr_in_space(addr) || self.immix_normal.addr_in_space(addr)
}
}
// the following API functions may get removed in the future
/// prints current GC context for debugging
#[no_mangle]
pub extern "C" fn print_gc_context() {
println!("GC CONTEXT UNKNOWN");
}
/// gets immix space and freelist space
#[no_mangle]
pub extern "C" fn get_spaces() -> (Raw<ImmixSpace>, Raw<ImmixSpace>) {
......
......@@ -30,7 +30,7 @@ pub const MAX_MEDIUM_OBJECT: ByteSize = 2048;
/// u, 1 bit - unused
/// ri, 2 bits - ref encode for ith word
#[repr(C, packed)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug)]
pub struct TinyObjectEncode {
b: u8
}
......@@ -94,7 +94,7 @@ mod tiny_object_encoding {
/// sz, 2 bits - size encode (00: 32, 01:40, 10: 48, 11: 56)
/// type_id, 13 bits - type id
#[repr(C, packed)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug)]
pub struct SmallObjectEncode {
w: u16
}
......@@ -169,7 +169,7 @@ mod small_object_encoding {
/// type_id, 23 bits - type id
/// size , 8 bits - size encode (sz -> 64 + sz * 8)
#[repr(C, packed)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug)]
pub struct MediumObjectEncode {
d: u32
}
......@@ -238,7 +238,7 @@ mod medium_object_encoding {
/// Stored in a large object space - by address, we can know it is a large object
/// Header is used for it
#[repr(C, packed)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug)]
pub struct LargeObjectEncode {
size: u64,
tyid: u32,
......
......@@ -15,6 +15,6 @@
#[macro_use]
extern crate log;
mod test_gc_harness;
mod test_immix_tiny;
//mod test_gcbench;
//mod test_gc_linked_list;
......@@ -20,6 +20,7 @@ use self::mu_gc::*;
use self::mu_gc::heap;
use self::mu_gc::heap::*;
use self::mu_gc::heap::immix::*;
use self::mu_gc::heap::gc::*;
use self::mu_gc::objectmodel::sidemap::*;
use self::mu_utils::*;
use std::sync::atomic::Ordering;
......@@ -44,7 +45,13 @@ pub fn test_tiny_immix_alloc() {
// we should see the slow paths get invoked exactly twice
start_logging_trace();
gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
gc_init(GCConfig {
immix_tiny_size: IMMIX_SPACE_SIZE,
immix_normal_size: 0,
lo_size: 0,
n_gcthreads: 8,
enable_gc: false
});
let (tiny_space, _) = get_spaces();
let mutator = new_mutator();
for _ in 0..WORK_LOAD {
......@@ -55,6 +62,9 @@ pub fn test_tiny_immix_alloc() {
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
assert_eq!(tiny_space.n_used_blocks(), 1);
drop_mutator(mutator);
gc_destroy();
}
#[test]
......@@ -65,7 +75,13 @@ pub fn test_tiny_immix_gc() {
// we should see the slow paths get invoked exactly twice
start_logging_trace();
gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, true);
gc_init(GCConfig {
immix_tiny_size: IMMIX_SPACE_SIZE,
immix_normal_size: 0,
lo_size: 0,
n_gcthreads: 8,
enable_gc: true
});
let (tiny_space, _) = get_spaces();
let mutator = new_mutator();
let tiny_header = TinyObjectEncode::new(0b0u8);
......@@ -91,43 +107,97 @@ pub fn test_tiny_immix_gc() {
// no line should be alive
assert_eq!(tiny_space.last_gc_used_lines, 0);
drop_mutator(mutator);
gc_destroy();
}
#[test]
pub fn test_tiny_immix_exhaust() {
const IMMIX_SPACE_SIZE: usize = SMALL_SPACE_SIZE;
const OBJECT_SIZE: usize = 16;
const OBJECT_ALIGN: usize = 8;
// to trigger GC exactly 2 times
const WORK_LOAD: usize = (IMMIX_SPACE_SIZE / OBJECT_SIZE) * 2 + 1;
start_logging_trace();
gc_init(GCConfig {
immix_tiny_size: IMMIX_SPACE_SIZE,
immix_normal_size: 0,
lo_size: 0,
n_gcthreads: 8,
enable_gc: true
});
let (tiny_space, _) = get_spaces();
let mutator = new_mutator();
let tiny_header = TinyObjectEncode::new(0b0u8);
for _ in 0..WORK_LOAD {
yieldpoint(mutator);
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_tiny_object(mutator, res, tiny_header);
}
assert_eq!(tiny_space.n_used_blocks(), 0);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 2);
drop_mutator(mutator);
gc_destroy();
}
#[test]
pub fn test_tiny_immix_linkedlist() {
const IMMIX_SPACE_SIZE: usize = SMALL_SPACE_SIZE;
const OBJECT_SIZE: usize = 16;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize