Commit a61f8784 authored by qinsoon's avatar qinsoon

[wip] can allocate and sweep dead objects

Currently a treadmill for the whole space will be created, this takes huge amount of time
and even if treadmill space is not used, it needs to be traversed in every gc. Going to
change this tomorrow
parent 12a6f5bf
Pipeline #227 failed with stage
in 18 minutes and 41 seconds
......@@ -2,8 +2,11 @@
use utils::Address;
use utils::mem::memmap;
use utils::LOG_POINTER_SIZE;
use common::AddressMap;
use objectmodel;
use std::ptr;
use std::sync::Arc;
use std::fmt;
......@@ -64,17 +67,97 @@ impl FreeListSpace {
size / BLOCK_SIZE + 1
};
trace!("before allocation, space: {}", self);
trace!("requiring {} bytes ({} blocks)", size, blocks_needed);
let mut treadmill = self.treadmill.lock().unwrap();
let res = treadmill.alloc_blocks(blocks_needed);
let res = {
let mut treadmill = self.treadmill.lock().unwrap();
treadmill.alloc_blocks(blocks_needed)
};
trace!("after allocation, space: {}", self);
res
}
pub fn init_object(&self, addr: Address, encode: u8) {
unsafe {
*self.alloc_map().offset((addr.diff(self.start) >> LOG_POINTER_SIZE) as isize) = encode;
objectmodel::mark_as_untraced(self.trace_map(), self.start, addr, objectmodel::load_mark_state());
}
}
pub fn sweep(&self) {
trace!("going to sweep treadmill space");
trace!("{}", self);
let mut treadmill = self.treadmill.lock().unwrap();
let trace_map = self.trace_map();
let mark_state = objectmodel::load_mark_state();
let mut resnapped_any = false;
unimplemented!()
loop {
trace!("scanning {}", unsafe{&*treadmill.scan});
let addr = unsafe{&*treadmill.scan}.payload;
if objectmodel::is_traced(trace_map, self.start, unsafe { addr.to_object_reference() }, mark_state) {
// the object is alive, do not need to 'move' its node
// but they will be alive, we will set them to opposite mark color
// (meaning they are not available after flip)
unsafe{&mut *treadmill.scan}.color = objectmodel::flip(mark_state);
trace!("is alive, set color to {}", objectmodel::flip(mark_state));
// advance cur backwards
treadmill.scan = unsafe{&*treadmill.scan}.prev();
} else {
// this object is dead
// we do not need to set their color
// we resnap it after current 'free' pointer
if treadmill.scan != treadmill.free {
// since we are going to move current node (scan), we get its prev first
let prev = unsafe{&*treadmill.scan}.prev();
trace!("get scan's prev before resnapping it: {}", unsafe{&*prev});
let alive_node = unsafe { &mut *treadmill.scan }.remove();
trace!("is dead, take it out of treadmill");
trace!("treadmill: {}", &treadmill as &Treadmill);
// insert alive node after free
unsafe{&mut *treadmill.free}.insert_after(alive_node);
trace!("insert after free");
trace!("treadmill: {}", &treadmill as &Treadmill);
// if this is the first object inserted, it is the 'bottom'
// then 1) all resnapped objects will be between 'free' and 'bottom'
// 2) the traversal can stop when scan meets bottom
if !resnapped_any {
treadmill.b = treadmill.scan;
resnapped_any = true;
}
treadmill.scan = prev;
} else {
trace!("is dead and it is free pointer, do not move it");
treadmill.scan = unsafe{&*treadmill.scan}.prev();
}
}
// check if we can stop
if resnapped_any && treadmill.scan == treadmill.b {
return;
}
if !resnapped_any && treadmill.scan == treadmill.free {
// we never set bottom (meaning everything is alive)
println!("didnt free up any memory in treadmill space");
panic!("we ran out of memory in large object space")
}
}
}
}
......@@ -116,11 +199,8 @@ impl fmt::Display for FreeListSpace {
}
struct Treadmill{
available_color: TreadmillNodeColor,
free: *mut TreadmillNode,
scan: *mut TreadmillNode,
t : *mut TreadmillNode,
b : *mut TreadmillNode
}
......@@ -134,24 +214,25 @@ impl Treadmill {
let mut tail = free;
while addr < end {
tail = unsafe {(&mut *tail)}.insert_after(addr);
tail = unsafe {(&mut *tail)}.init_insert_after(addr);
addr = addr.plus(BLOCK_SIZE);
}
Treadmill {
available_color: TreadmillNodeColor::Ecru,
free: free,
scan: free,
t: free,
b: free
}
}
fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
let unavailable_color = objectmodel::load_mark_state();
// check if we have n_blocks available
let mut cur = self.free;
for _ in 0..n_blocks {
if unsafe{&*cur}.color != self.available_color {
if unsafe{&*cur}.color == unavailable_color {
trace!("next block color is {}, no available blocks, return zero", unavailable_color);
return unsafe {Address::zero()};
}
......@@ -161,13 +242,17 @@ impl Treadmill {
// we make sure that n_blocks are available, mark them as black
let mut cur2 = self.free;
for _ in 0..n_blocks {
unsafe{&mut *cur2}.color = TreadmillNodeColor::Black;
unsafe{&mut *cur2}.color = unavailable_color;
cur2 = unsafe {&*cur2}.next
}
debug_assert!(cur == cur2);
let ret = self.free;
self.free = cur;
trace!("set free to {}", unsafe {&*cur});
unsafe{&*ret}.payload
}
}
......@@ -176,6 +261,7 @@ impl fmt::Display for Treadmill {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut cursor = self.free;
write!(f, "\n").unwrap();
loop {
write!(f, "{}", unsafe{&*cursor}).unwrap();
......@@ -188,14 +274,11 @@ impl fmt::Display for Treadmill {
if cursor == self.b {
write!(f, "(bottom)").unwrap();
}
if cursor == self.t {
write!(f, "(top)").unwrap();
}
if unsafe{&*cursor}.next() == self.free {
break;
} else {
write!(f, "->").unwrap();
write!(f, "\n->").unwrap();
cursor = unsafe{&*cursor}.next();
}
}
......@@ -204,17 +287,9 @@ impl fmt::Display for Treadmill {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum TreadmillNodeColor {
Ecru,
White,
Black,
Grey
}
struct TreadmillNode {
payload: Address,
color: TreadmillNodeColor,
color: u8,
prev: *mut TreadmillNode,
next: *mut TreadmillNode
......@@ -230,7 +305,8 @@ impl TreadmillNode {
fn singleton(addr: Address) -> *mut TreadmillNode {
let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr,
color: TreadmillNodeColor::Ecru,
// starts as 0 (1, i.e. mark_state, means allocated/alive)
color: objectmodel::flip(objectmodel::load_mark_state()),
prev: ptr::null_mut(),
next: ptr::null_mut(),
}));
......@@ -245,12 +321,12 @@ impl TreadmillNode {
}
/// returns the inserted node
fn insert_after(&mut self, addr: Address) -> *mut TreadmillNode {
fn init_insert_after(&mut self, addr: Address) -> *mut TreadmillNode {
unsafe {
// node <- ptr -> node.next
let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr,
color: TreadmillNodeColor::Ecru,
color: objectmodel::flip(objectmodel::load_mark_state()),
// inserted between node and node.next
prev: self as *mut TreadmillNode,
next: self.next
......@@ -266,6 +342,45 @@ impl TreadmillNode {
}
}
fn insert_after(&mut self, node: *mut TreadmillNode) {
unsafe {
// self <- node -> self.next
(&mut *node).next = self.next;
(&mut *node).prev = self as *mut TreadmillNode;
// self.next -> node
self.next = node;
// node <- node.next.prev
(&mut *(&mut *node).next).prev = node;
}
}
/// remove current node from treadmill, and returns the node
fn remove(&mut self) -> *mut TreadmillNode {
if self.next == self as *mut TreadmillNode && self.prev == self as *mut TreadmillNode {
// if this is the only node, return itself
self as *mut TreadmillNode
} else {
// we need to take it out from the list
unsafe {
use std::ptr;
// its prev node's next will be its next node
(&mut *self.prev).next = self.next as *mut TreadmillNode;
// its next node' prev will be its prev node
(&mut *self.next).prev = self.prev as *mut TreadmillNode;
// clear current node prev and next
self.prev = ptr::null_mut();
self.next = ptr::null_mut();
}
// then return it
self as *mut TreadmillNode
}
}
fn next(&self) -> *mut TreadmillNode {
self.next
}
......
......@@ -141,7 +141,7 @@ pub fn sync_barrier(mutator: &mut ImmixMutatorLocal) {
block_current_thread(mutator);
// reset current mutator
mutator.reset();
mutator.reset_after_gc();
} else {
// this thread is controller
// other threads should block
......@@ -174,7 +174,7 @@ pub fn sync_barrier(mutator: &mut ImmixMutatorLocal) {
}
}
// every mutator thread will reset themselves, so only reset current mutator here
mutator.reset();
mutator.reset_after_gc();
// resume
{
......@@ -289,7 +289,7 @@ fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<
use objectmodel;
let mut local_queue = vec![];
let mark_state = objectmodel::MARK_STATE.load(Ordering::SeqCst) as u8;
let mark_state = objectmodel::load_mark_state();
loop {
let work = {
......@@ -340,6 +340,7 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
} else if lo_space.addr_in_space(addr) {
// mark object
objectmodel::mark_as_traced(lo_space.trace_map(), lo_space.start(), obj, mark_state);
trace!("mark object @ {} to {}", obj, mark_state);
(lo_space.alloc_map(), lo_space.start())
} else {
......@@ -355,6 +356,9 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
let value = objectmodel::get_ref_byte(alloc_map, space_start, obj);
let (ref_bits, short_encode) = (bit_utils::lower_bits(value, objectmodel::REF_BITS_LEN), bit_utils::test_nth_bit(value, objectmodel::SHORT_ENCODE_BIT));
match ref_bits {
0b0000_0000 => {
},
0b0000_0001 => {
steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
},
......
......@@ -2,6 +2,7 @@ use heap::immix;
use heap::immix::ImmixSpace;
use heap::immix::immix_space::ImmixBlock;
use heap::gc;
use objectmodel;
use utils::LOG_POINTER_SIZE;
use utils::Address;
......@@ -32,6 +33,7 @@ pub struct ImmixMutatorLocal {
// use raw pointer here instead of AddressMapTable
// to avoid indirection in fast path
alloc_map : *mut u8,
trace_map : *mut u8,
space_start: Address,
// cursor might be invalid, but Option<Address> is expensive here
......@@ -46,6 +48,8 @@ pub struct ImmixMutatorLocal {
space : Arc<ImmixSpace>,
block : Option<Box<ImmixBlock>>,
mark_state: u8
}
lazy_static! {
......@@ -73,6 +77,11 @@ impl ImmixMutatorLocal {
self.block = None;
}
pub fn reset_after_gc(&mut self) {
self.reset();
self.mark_state ^= 1;
}
pub fn new(space : Arc<ImmixSpace>) -> ImmixMutatorLocal {
let global = Arc::new(ImmixMutatorGlobal::new());
......@@ -89,9 +98,11 @@ impl ImmixMutatorLocal {
cursor: unsafe {Address::zero()}, limit: unsafe {Address::zero()}, line: immix::LINES_IN_BLOCK,
block: None,
alloc_map: space.alloc_map.ptr,
trace_map: space.trace_map.ptr,
space_start: space.start(),
global: global,
space: space,
space: space,
mark_state: objectmodel::INIT_MARK_STATE as u8
};
*id_lock += 1;
......@@ -164,6 +175,7 @@ impl ImmixMutatorLocal {
pub fn init_object(&mut self, addr: Address, encode: u8) {
unsafe {
*self.alloc_map.offset((addr.diff(self.space_start) >> LOG_POINTER_SIZE) as isize) = encode;
objectmodel::mark_as_untraced(self.trace_map, self.space_start, addr, self.mark_state);
}
}
......
......@@ -66,6 +66,9 @@ pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<FreeListSpace>) {
pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
// set this line to turn on certain level of debugging info
// simple_logger::init_with_level(log::LogLevel::Trace).ok();
// init object model - init this first, since spaces may use it
objectmodel::init();
// init space size
heap::IMMIX_SPACE_SIZE.store(immix_size, Ordering::SeqCst);
......@@ -86,9 +89,6 @@ pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
// gc threads
heap::gc::GC_THREADS.store(n_gcthreads, Ordering::SeqCst);
info!("{} gc threads", n_gcthreads);
// init object model
objectmodel::init();
}
#[no_mangle]
......@@ -125,15 +125,21 @@ pub extern fn yieldpoint_slow(mutator: *mut ImmixMutatorLocal) {
#[no_mangle]
#[inline(always)]
pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let addr = unsafe {mutator.as_mut().unwrap()}.alloc(size, align);
let addr = unsafe {&mut *mutator}.alloc(size, align);
unsafe {addr.to_object_reference()}
}
#[no_mangle]
#[inline(always)]
pub extern fn init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u8) {
unsafe {&mut *mutator}.init_object(obj.to_address(), encode);
}
#[no_mangle]
#[inline(never)]
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = unsafe {mutator.as_mut().unwrap()}.try_alloc_from_local(size, align);
let ret = unsafe {&mut *mutator}.try_alloc_from_local(size, align);
unsafe {ret.to_object_reference()}
}
......@@ -142,4 +148,9 @@ pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize,
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
unsafe {ret.to_object_reference()}
}
#[no_mangle]
pub extern fn muentry_init_large_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u8) {
MY_GC.read().unwrap().as_ref().unwrap().lo_space.init_object(obj.to_address(), encode);
}
\ No newline at end of file
use std::sync::atomic;
pub static MARK_STATE : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
pub const OBJECT_HEADER_SIZE : usize = 0;
pub static INIT_MARK_STATE : usize = 1;
static MARK_STATE : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub fn init() {
MARK_STATE.store(1, atomic::Ordering::SeqCst);
MARK_STATE.store(INIT_MARK_STATE, atomic::Ordering::SeqCst);
}
pub fn flip_mark_state() {
let mark_state = MARK_STATE.load(atomic::Ordering::SeqCst);
if mark_state == 0 {
MARK_STATE.store(1, atomic::Ordering::SeqCst);
} else {
MARK_STATE.store(0, atomic::Ordering::SeqCst);
}
MARK_STATE.store(mark_state ^ 1, atomic::Ordering::SeqCst);
}
pub fn load_mark_state() -> u8 {
MARK_STATE.load(atomic::Ordering::SeqCst) as u8
}
pub fn flip(mark: u8) -> u8 {
mark ^ 1
}
#[allow(unused_variables)]
......@@ -83,6 +88,13 @@ pub fn mark_as_traced(trace_map: *mut u8, space_start: Address, obj: ObjectRefer
}
}
#[inline(always)]
pub fn mark_as_untraced(trace_map: *mut u8, space_start: Address, addr: Address, mark_state: u8) {
unsafe {
*trace_map.offset((addr.diff(space_start) >> LOG_POINTER_SIZE) as isize) = mark_state ^ 1;
}
}
#[inline(always)]
pub fn is_traced(trace_map: *mut u8, space_start: Address, obj: ObjectReference, mark_state: u8) -> bool {
unsafe {
......
......@@ -63,6 +63,7 @@ fn test_exhaust_alloc_large() {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, 0b1100_0000);
}
mutator.destroy();
......@@ -71,7 +72,7 @@ fn test_exhaust_alloc_large() {
#[test]
#[allow(unused_variables)]
fn test_alloc_large_trigger_gc() {
gc::gc_init(IMMIX_SPACE_SIZE, SMALL_SPACE_SIZE, 8);
gc::gc_init(SMALL_SPACE_SIZE, 4096 * 10, 8);
let mut mutator = gc::new_mutator();
start_logging();
......@@ -80,6 +81,7 @@ fn test_alloc_large_trigger_gc() {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, 0b1100_0000);
}
mutator.destroy();
......@@ -106,7 +108,7 @@ fn test_alloc_mark() {
let (shared_space, _) = gc::get_spaces();
println!("Start marking");
let mark_state = objectmodel::MARK_STATE.load(Ordering::SeqCst) as u8;
let mark_state = objectmodel::load_mark_state();
let line_mark_table = shared_space.line_mark_table();
let (space_start, space_end) = (shared_space.start(), shared_space.end());
......
......@@ -16,6 +16,17 @@ use self::gc::heap::freelist::FreeListSpace;
use std::mem::size_of;
use std::sync::atomic::Ordering;
extern crate log;
extern crate simple_logger;
use self::log::LogLevel;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {},
Err(_) => {}
}
}
const IMMIX_SPACE_SIZE : usize = 40 << 20;
const LO_SPACE_SIZE : usize = 40 << 20;
......@@ -116,6 +127,8 @@ fn alloc(mutator: &mut ImmixMutatorLocal) -> *mut Node {
fn start() {
unsafe {heap::gc::set_low_water_mark();}
start_logging();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
gc::gc_stats();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment