GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 12a6f5bf authored by qinsoon's avatar qinsoon

[wip] keep working on treadmill allocator

make tracing able to trace freelist space (may have degraded
performance, gonna need to tune gc performance later)
parent c95de6d9
Pipeline #226 failed with stage
in 25 minutes and 30 seconds
......@@ -4,10 +4,6 @@ version = "0.0.1"
authors = ["qinsoon <qinsoon@gmail.com>"]
build = "build.rs"
[features]
default = ["parallel-gc"]
parallel-gc = []
[lib]
crate-type = ["rlib"]
......@@ -17,6 +13,7 @@ gcc = "0.3"
[dependencies]
utils = {path = "../utils"}
time = "0.1.34"
lazy_static = "0.1.15"
log = "0.3.5"
simple_logger = "0.4.0"
......
......@@ -31,9 +31,9 @@ impl FreeListSpace {
}
pub fn alloc(&mut self, size: usize, align: usize) -> Option<Address> {
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
if self.used_bytes + size > self.size {
None
unsafe {Address::zero()}
} else {
let ret = aligned_alloc::aligned_alloc(size, align);
......@@ -43,7 +43,7 @@ impl FreeListSpace {
self.node_id += 1;
self.used_bytes += size;
Some(addr)
addr
}
}
......@@ -106,27 +106,6 @@ pub enum NodeMark {
}
unsafe impl Sync for NodeMark {}
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<RwLock<FreeListSpace>>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = {
let mut lo_space_lock = space.write().unwrap();
lo_space_lock.alloc(size, align)
};
match ret_addr {
Some(addr) => {
return addr;
},
None => {
gc::trigger_gc();
}
}
}
}
use std::fmt;
impl fmt::Display for FreeListSpace {
......
mod malloc_list;
mod treadmill;
pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::malloc_list::alloc_large;
\ No newline at end of file
//pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::treadmill::FreeListSpace;
use std::sync::Arc;
use std::sync::RwLock;
use heap::gc;
use utils::{Address, ObjectReference};
use heap::immix;
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<FreeListSpace>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = space.alloc(size, align);
if ret_addr.is_zero() {
gc::trigger_gc();
} else {
return ret_addr;
}
}
}
\ No newline at end of file
......@@ -2,9 +2,12 @@
use utils::Address;
use utils::mem::memmap;
use common::AddressMap;
use std::ptr;
use std::sync::Arc;
use common::AddressMap;
use std::fmt;
use std::sync::Mutex;
const SPACE_ALIGN : usize = 1 << 19;
const BLOCK_SIZE : usize = 1 << 12; // 4kb
......@@ -20,7 +23,7 @@ pub struct FreeListSpace {
#[allow(dead_code)]
mmap : memmap::Mmap,
treadmill: TreadMill
treadmill: Mutex<Treadmill>
}
impl FreeListSpace {
......@@ -32,44 +35,202 @@ impl FreeListSpace {
let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let end : Address = start.plus(space_size);
let trace_map = AddressMap::new(start, end);
let alloc_map = AddressMap::new(start, end);
if cfg!(debug_assertions) {
trace_map.init_all(0);
alloc_map.init_all(0);
}
let treadmill = Treadmill::new(start, end);
FreeListSpace {
start: start,
end: end,
alloc_map: Arc::new(alloc_map),
trace_map: Arc::new(trace_map),
mmap: anon_mmap,
treadmill: Mutex::new(treadmill)
}
}
pub fn alloc(&self, size: usize, align: usize) -> Address {
// every block is 'BLOCK_SIZE' aligned, usually we do not need to align
assert!(BLOCK_SIZE % align == 0);
let blocks_needed = if size % BLOCK_SIZE == 0 {
size / BLOCK_SIZE
} else {
size / BLOCK_SIZE + 1
};
trace!("requiring {} bytes ({} blocks)", size, blocks_needed);
let mut treadmill = self.treadmill.lock().unwrap();
let res = treadmill.alloc_blocks(blocks_needed);
res
}
pub fn sweep(&self) {
let mut treadmill = self.treadmill.lock().unwrap();
unimplemented!()
}
}
struct TreadMill{
free: *mut TreadMillNode,
scan: *mut TreadMillNode,
t : *mut TreadMillNode,
b : *mut TreadMillNode
use heap::Space;
impl Space for FreeListSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.end
}
#[inline(always)]
fn alloc_map(&self) -> *mut u8 {
self.alloc_map.ptr
}
#[inline(always)]
fn trace_map(&self) -> *mut u8 {
self.trace_map.ptr
}
}
unsafe impl Sync for FreeListSpace {}
unsafe impl Send for FreeListSpace {}
impl fmt::Display for FreeListSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FreeListSpace\n").unwrap();
write!(f, "range={:#X} ~ {:#X}\n", self.start, self.end).unwrap();
let treadmill : &Treadmill = &self.treadmill.lock().unwrap();
write!(f, "treadmill: {}", treadmill)
}
}
impl TreadMill {
fn new(start: Address, end: Address) -> TreadMill {
struct Treadmill{
available_color: TreadmillNodeColor,
free: *mut TreadmillNode,
scan: *mut TreadmillNode,
t : *mut TreadmillNode,
b : *mut TreadmillNode
}
impl Treadmill {
fn new(start: Address, end: Address) -> Treadmill {
let mut addr = start;
let free = TreadMillNode::singleton(addr);
let free = TreadmillNode::singleton(addr);
addr = addr.plus(BLOCK_SIZE);
let mut tail = free;
while addr < end {
tail = TreadMillNode::insert_after(tail, addr);
tail = unsafe {(&mut *tail)}.insert_after(addr);
addr = addr.plus(BLOCK_SIZE);
}
unimplemented!()
Treadmill {
available_color: TreadmillNodeColor::Ecru,
free: free,
scan: free,
t: free,
b: free
}
}
fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
// check if we have n_blocks available
let mut cur = self.free;
for _ in 0..n_blocks {
if unsafe{&*cur}.color != self.available_color {
return unsafe {Address::zero()};
}
cur = unsafe {&*cur}.next;
}
// we make sure that n_blocks are available, mark them as black
let mut cur2 = self.free;
for _ in 0..n_blocks {
unsafe{&mut *cur2}.color = TreadmillNodeColor::Black;
cur2 = unsafe {&*cur2}.next
}
let ret = self.free;
self.free = cur;
unsafe{&*ret}.payload
}
}
impl fmt::Display for Treadmill {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut cursor = self.free;
struct TreadMillNode {
loop {
write!(f, "{}", unsafe{&*cursor}).unwrap();
if cursor == self.free {
write!(f, "(free)").unwrap();
}
if cursor == self.scan {
write!(f, "(scan)").unwrap();
}
if cursor == self.b {
write!(f, "(bottom)").unwrap();
}
if cursor == self.t {
write!(f, "(top)").unwrap();
}
if unsafe{&*cursor}.next() == self.free {
break;
} else {
write!(f, "->").unwrap();
cursor = unsafe{&*cursor}.next();
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum TreadmillNodeColor {
Ecru,
White,
Black,
Grey
}
struct TreadmillNode {
payload: Address,
color: TreadmillNodeColor,
prev: *mut TreadMillNode,
next: *mut TreadMillNode
prev: *mut TreadmillNode,
next: *mut TreadmillNode
}
impl TreadMillNode {
fn singleton(addr: Address) -> *mut TreadMillNode {
let mut ptr = Box::into_raw(Box::new(TreadMillNode {
impl fmt::Display for TreadmillNode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}-{:?}]", self.payload, self.color)
}
}
impl TreadmillNode {
fn singleton(addr: Address) -> *mut TreadmillNode {
let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr,
color: TreadmillNodeColor::Ecru,
prev: ptr::null_mut(),
next: ptr::null_mut(),
}));
......@@ -84,23 +245,75 @@ impl TreadMillNode {
}
/// returns the inserted node
fn insert_after(node: *mut TreadMillNode, addr: Address) -> *mut TreadMillNode {
fn insert_after(&mut self, addr: Address) -> *mut TreadmillNode {
unsafe {
// node <- ptr -> node.next
let mut ptr = Box::into_raw(Box::new(TreadMillNode {
let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr,
color: TreadmillNodeColor::Ecru,
// inserted between node and node.next
prev: node,
next: (*node).next
prev: self as *mut TreadmillNode,
next: self.next
}));
// ptr <- node.next
(*(*node).next).prev = ptr;
unsafe{(&mut *self.next)}.prev = ptr;
// node -> ptr
(*node).next = ptr;
self.next = ptr;
ptr
}
}
fn next(&self) -> *mut TreadmillNode {
self.next
}
fn prev(&self) -> *mut TreadmillNode {
self.prev
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::BLOCK_SIZE;
#[test]
fn test_new_treadmill_space() {
let space = FreeListSpace::new(BLOCK_SIZE * 10);
println!("{}", space);
}
#[test]
fn test_treadmill_alloc() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..10 {
let ret = space.alloc(BLOCK_SIZE, 8);
println!("Allocation{}: {}", i, ret);
}
}
#[test]
fn test_treadmill_alloc_spanblock() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..5 {
let ret = space.alloc(BLOCK_SIZE * 2, 8);
println!("Allocation{}: {}", i, ret);
}
}
#[test]
fn test_treadmill_alloc_exhaust() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..20 {
let ret = space.alloc(BLOCK_SIZE, 8);
println!("Allocation{}: {}", i, ret);
}
}
}
\ No newline at end of file
......@@ -5,6 +5,7 @@ use heap::immix::ImmixSpace;
use heap::immix::ImmixLineMarkTable;
use heap::freelist::FreeListSpace;
use objectmodel;
use heap::Space;
use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
......@@ -13,13 +14,9 @@ use utils::bit_utils;
use std::sync::atomic::{AtomicIsize, Ordering};
use std::sync::{Arc, Mutex, Condvar, RwLock};
#[cfg(feature = "parallel-gc")]
use crossbeam::sync::chase_lev::*;
#[cfg(feature = "parallel-gc")]
use std::sync::mpsc;
#[cfg(feature = "parallel-gc")]
use std::sync::mpsc::channel;
#[cfg(feature = "parallel-gc")]
use std::thread;
use std::sync::atomic;
......@@ -28,9 +25,7 @@ lazy_static! {
static ref STW_COND : Arc<(Mutex<usize>, Condvar)> = {
Arc::new((Mutex::new(0), Condvar::new()))
};
static ref GET_ROOTS : RwLock<Box<Fn()->Vec<ObjectReference> + Sync + Send>> = RwLock::new(Box::new(get_roots));
static ref GC_CONTEXT : RwLock<GCContext> = RwLock::new(GCContext{immix_space: None, lo_space: None});
static ref ROOTS : RwLock<Vec<ObjectReference>> = RwLock::new(vec![]);
......@@ -41,24 +36,16 @@ const NO_CONTROLLER : isize = -1;
pub struct GCContext {
immix_space : Option<Arc<ImmixSpace>>,
lo_space : Option<Arc<RwLock<FreeListSpace>>>
}
fn get_roots() -> Vec<ObjectReference> {
vec![]
lo_space : Option<Arc<FreeListSpace>>
}
pub fn init(immix_space: Arc<ImmixSpace>, lo_space: Arc<RwLock<FreeListSpace>>) {
pub fn init(immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
let mut gccontext = GC_CONTEXT.write().unwrap();
gccontext.immix_space = Some(immix_space);
gccontext.lo_space = Some(lo_space);
}
pub fn init_get_roots(get_roots: Box<Fn()->Vec<ObjectReference> + Sync + Send>) {
*GET_ROOTS.write().unwrap() = get_roots;
}
pub fn trigger_gc() {
trace!("Triggering GC...");
......@@ -81,25 +68,6 @@ extern "C" {
fn get_registers_count() -> i32;
}
#[inline(always)]
pub fn is_valid_object(addr: Address, start: Address, end: Address, live_map: *mut u8) -> bool {
if addr >= end || addr < start {
return false;
}
let index = (addr.diff(start) >> LOG_POINTER_SIZE) as isize;
if !bit_utils::test_nth_bit(unsafe {*live_map.offset(index)}, objectmodel::OBJ_START_BIT) {
return false;
}
if !addr.is_aligned_to(POINTER_SIZE) {
return false;
}
true
}
pub fn stack_scan() -> Vec<ObjectReference> {
trace!("stack scanning...");
let stack_ptr : Address = unsafe {immmix_get_stack_ptr()};
......@@ -118,12 +86,14 @@ pub fn stack_scan() -> Vec<ObjectReference> {
let mut ret = vec![];
let gccontext = GC_CONTEXT.read().unwrap();
let immix_space = gccontext.immix_space.as_ref().unwrap();
let lo_space = gccontext.lo_space.as_ref().unwrap();
while cursor < low_water_mark {
let value : Address = unsafe {cursor.load::<Address>()};
if is_valid_object(value, immix_space.start(), immix_space.end(), immix_space.alloc_map.ptr) {
if immix_space.is_valid_object(value) || lo_space.is_valid_object(value) {
ret.push(unsafe {value.to_object_reference()});
}
......@@ -138,7 +108,7 @@ pub fn stack_scan() -> Vec<ObjectReference> {
for i in 0..registers_count {
let value = unsafe {*registers.offset(i as isize)};
if is_valid_object(value, immix_space.start(), immix_space.end(), immix_space.alloc_map.ptr) {
if immix_space.is_valid_object(value) || lo_space.is_valid_object(value){
ret.push(unsafe {value.to_object_reference()});
}
}
......@@ -253,10 +223,13 @@ fn gc() {
// sweep
{
let mut gccontext = GC_CONTEXT.write().unwrap();
let immix_space = gccontext.immix_space.as_mut().unwrap();
let gccontext = GC_CONTEXT.read().unwrap();
let immix_space = gccontext.immix_space.as_ref().unwrap();
immix_space.sweep();
let lo_space = gccontext.lo_space.as_ref().unwrap();
lo_space.sweep();
}
objectmodel::flip_mark_state();
......@@ -270,8 +243,7 @@ pub static GC_THREADS : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
#[allow(unused_variables)]
#[inline(never)]
#[cfg(feature = "parallel-gc")]
pub fn start_trace(work_stack: &mut Vec<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<RwLock<FreeListSpace>>) {
pub fn start_trace(work_stack: &mut Vec<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
// creates root deque
let (mut worker, stealer) = deque();
......@@ -313,26 +285,10 @@ pub fn start_trace(work_stack: &mut Vec<ObjectReference>, immix_space: Arc<Immix
}
#[allow(unused_variables)]
#[inline(never)]
#[cfg(not(feature = "parallel-gc"))]
pub fn start_trace(local_queue: &mut Vec<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<RwLock<FreeListSpace>>) {
let mark_state = objectmodel::MARK_STATE.load(Ordering::SeqCst) as u8;
while !local_queue.is_empty() {
trace_object(local_queue.pop().unwrap(), local_queue, immix_space.alloc_map.ptr, immix_space.trace_map.ptr, &immix_space.line_mark_table, immix_space.start(), immix_space.end(), mark_state);
}
}
#[allow(unused_variables)]
#[cfg(feature = "parallel-gc")]
fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<RwLock<FreeListSpace>>) {
fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
use objectmodel;
let mut local_queue = vec![];
let line_mark_table = &immix_space.line_mark_table;
let (alloc_map, trace_map) = (immix_space.alloc_map.ptr, immix_space.trace_map.ptr);
let (space_start, space_end) = (immix_space.start(), immix_space.end());
let mark_state = objectmodel::MARK_STATE.load(Ordering::SeqCst) as u8;
loop {
......@@ -349,63 +305,75 @@ fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<
}
};
steal_trace_object(work, &mut local_queue, &job_sender, alloc_map, trace_map, line_mark_table, space_start, space_end, mark_state, &lo_space);
steal_trace_object(work, &mut local_queue, &job_sender, mark_state, &immix_space, &lo_space);
}
}
#[inline(always)]
#[cfg(feature = "parallel-gc")]
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, alloc_map: *mut u8, trace_map: *mut u8, line_mark_table: &ImmixLineMarkTable, immix_start: Address, immix_end: Address, mark_state: u8, lo_space: &Arc<RwLock<FreeListSpace>>) {
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
if cfg!(debug_assertions) {
// check if this object in within the heap, if it is an object
if !is_valid_object(obj.to_address(), immix_start, immix_end, alloc_map) {
if !immix_space.is_valid_object(obj.to_address()) && !lo_space.is_valid_object(obj.to_address()){
use std::process;
println!("trying to trace an object that is not valid");
println!("address: 0x{:x}", obj);
println!("---");
println!("immix space: 0x{:x} - 0x{:x}", immix_start, immix_end);
println!("lo space: {}", *lo_space.read().unwrap());
println!("immix space: {}", immix_space);
println!("lo space: {}", lo_space);
println!("invalid object during tracing");
process::exit(101);
}
}
objectmodel::mark_as_traced(trace_map, immix_start, obj, mark_state);
let addr = obj.to_address();
if addr >= immix_start && addr < immix_end {
line_mark_table.mark_line_live(addr);
let (alloc_map, space_start) = if immix_space.addr_in_space(addr) {
// mark object
objectmodel::mark_as_traced(immix_space.trace_map(), immix_space.start(), obj, mark_state);
// mark line
immix_space.line_mark_table.mark_line_live(addr);
(immix_space.alloc_map(), immix_space.start())
} else if lo_space.addr_in_space(addr) {
// mark object
objectmodel::mark_as_traced(lo_space.trace_map(), lo_space.start(), obj, mark_state);
(lo_space.alloc_map(), lo_space.start())
} else {
// freelist mark
}
println!("unexpected address: {}", addr);
println!("immix space: {}", immix_space);
println!("lo space : {}", lo_space);
panic!("error during tracing object")
};
let mut base = addr;
loop {
let value = objectmodel::get_ref_byte(alloc_map, immix_start, obj);
let value = objectmodel::get_ref_byte(alloc_map, space_start, obj);
let (ref_bits, short_encode) = (bit_utils::lower_bits(value, objectmodel::REF_BITS_LEN), bit_utils::test_nth_bit(value, objectmodel::SHORT_ENCODE_BIT));
match ref_bits {
0b0000_0001 => {
steal_process_edge(base, 0, local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
},
0b0000_0011 => {
steal_process_edge(base, 0, local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 8, local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(base, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
},
0b0000_1111 => {
steal_process_edge(base, 0, local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 8, local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 16,local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 24,local_queue, alloc_map, trace_map, immix_start, immix_end, job_sender, mark_state);
steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(base, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(base, 16,local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(base, 24,local_queue, job_sender, mark_state, immix_space, lo_space);
},
_ => {
panic!("unexpcted ref_bits patterns: {:b}", ref_bits);
error!("unexpected ref_bits patterns: {:b}", ref_bits);
unimplemented!()
}
}
assert!(short_encode);
if short_encode {
return;
} else {
......@@ -415,87 +383,48 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
}
#[inline(always)]
#[cfg(feature = "parallel-gc")]
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, alloc_map: *mut u8, trace_map: *mut u8, immix_start: Address, immix_end: Address, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8) {
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
let field_addr = base.plus(offset);
let edge = unsafe{field_addr.load::<ObjectReference>()};
if cfg!(debug_assertions) {
use std::process;
// check if this object in within the heap, if it is an object
if !edge.to_address().is_zero() && !is_valid_object(edge.to_address(), immix_start, immix_end, alloc_map) {
if !edge.to_address().is_zero() && !immix_space.is_valid_object(edge.to_address()) && !lo_space.is_valid_object(edge.to_address()) {