Commit 12a6f5bf authored by qinsoon's avatar qinsoon

[wip] keep working on treadmill allocator

make tracing able to trace freelist space (may have degraded
performance, gonna need to tune gc performance later)
parent c95de6d9
Pipeline #226 failed with stage
in 25 minutes and 30 seconds
......@@ -4,10 +4,6 @@ version = "0.0.1"
authors = ["qinsoon <qinsoon@gmail.com>"]
build = "build.rs"
[features]
default = ["parallel-gc"]
parallel-gc = []
[lib]
crate-type = ["rlib"]
......@@ -17,6 +13,7 @@ gcc = "0.3"
[dependencies]
utils = {path = "../utils"}
time = "0.1.34"
lazy_static = "0.1.15"
log = "0.3.5"
simple_logger = "0.4.0"
......
......@@ -31,9 +31,9 @@ impl FreeListSpace {
}
pub fn alloc(&mut self, size: usize, align: usize) -> Option<Address> {
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
if self.used_bytes + size > self.size {
None
unsafe {Address::zero()}
} else {
let ret = aligned_alloc::aligned_alloc(size, align);
......@@ -43,7 +43,7 @@ impl FreeListSpace {
self.node_id += 1;
self.used_bytes += size;
Some(addr)
addr
}
}
......@@ -106,27 +106,6 @@ pub enum NodeMark {
}
unsafe impl Sync for NodeMark {}
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<RwLock<FreeListSpace>>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = {
let mut lo_space_lock = space.write().unwrap();
lo_space_lock.alloc(size, align)
};
match ret_addr {
Some(addr) => {
return addr;
},
None => {
gc::trigger_gc();
}
}
}
}
use std::fmt;
impl fmt::Display for FreeListSpace {
......
mod malloc_list;
mod treadmill;
pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::malloc_list::alloc_large;
\ No newline at end of file
//pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::treadmill::FreeListSpace;
use std::sync::Arc;
use std::sync::RwLock;
use heap::gc;
use utils::{Address, ObjectReference};
use heap::immix;
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<FreeListSpace>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = space.alloc(size, align);
if ret_addr.is_zero() {
gc::trigger_gc();
} else {
return ret_addr;
}
}
}
\ No newline at end of file
......@@ -2,9 +2,12 @@
use utils::Address;
use utils::mem::memmap;
use common::AddressMap;
use std::ptr;
use std::sync::Arc;
use common::AddressMap;
use std::fmt;
use std::sync::Mutex;
const SPACE_ALIGN : usize = 1 << 19;
const BLOCK_SIZE : usize = 1 << 12; // 4kb
......@@ -20,7 +23,7 @@ pub struct FreeListSpace {
#[allow(dead_code)]
mmap : memmap::Mmap,
treadmill: TreadMill
treadmill: Mutex<Treadmill>
}
impl FreeListSpace {
......@@ -32,44 +35,202 @@ impl FreeListSpace {
let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let end : Address = start.plus(space_size);
let trace_map = AddressMap::new(start, end);
let alloc_map = AddressMap::new(start, end);
if cfg!(debug_assertions) {
trace_map.init_all(0);
alloc_map.init_all(0);
}
let treadmill = Treadmill::new(start, end);
FreeListSpace {
start: start,
end: end,
alloc_map: Arc::new(alloc_map),
trace_map: Arc::new(trace_map),
mmap: anon_mmap,
treadmill: Mutex::new(treadmill)
}
}
pub fn alloc(&self, size: usize, align: usize) -> Address {
// every block is 'BLOCK_SIZE' aligned, usually we do not need to align
assert!(BLOCK_SIZE % align == 0);
let blocks_needed = if size % BLOCK_SIZE == 0 {
size / BLOCK_SIZE
} else {
size / BLOCK_SIZE + 1
};
trace!("requiring {} bytes ({} blocks)", size, blocks_needed);
let mut treadmill = self.treadmill.lock().unwrap();
let res = treadmill.alloc_blocks(blocks_needed);
res
}
pub fn sweep(&self) {
let mut treadmill = self.treadmill.lock().unwrap();
unimplemented!()
}
}
struct TreadMill{
free: *mut TreadMillNode,
scan: *mut TreadMillNode,
t : *mut TreadMillNode,
b : *mut TreadMillNode
use heap::Space;
impl Space for FreeListSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.end
}
#[inline(always)]
fn alloc_map(&self) -> *mut u8 {
self.alloc_map.ptr
}
#[inline(always)]
fn trace_map(&self) -> *mut u8 {
self.trace_map.ptr
}
}
unsafe impl Sync for FreeListSpace {}
unsafe impl Send for FreeListSpace {}
impl fmt::Display for FreeListSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FreeListSpace\n").unwrap();
write!(f, "range={:#X} ~ {:#X}\n", self.start, self.end).unwrap();
let treadmill : &Treadmill = &self.treadmill.lock().unwrap();
write!(f, "treadmill: {}", treadmill)
}
}
impl TreadMill {
fn new(start: Address, end: Address) -> TreadMill {
struct Treadmill{
available_color: TreadmillNodeColor,
free: *mut TreadmillNode,
scan: *mut TreadmillNode,
t : *mut TreadmillNode,
b : *mut TreadmillNode
}
impl Treadmill {
fn new(start: Address, end: Address) -> Treadmill {
let mut addr = start;
let free = TreadMillNode::singleton(addr);
let free = TreadmillNode::singleton(addr);
addr = addr.plus(BLOCK_SIZE);
let mut tail = free;
while addr < end {
tail = TreadMillNode::insert_after(tail, addr);
tail = unsafe {(&mut *tail)}.insert_after(addr);
addr = addr.plus(BLOCK_SIZE);
}
unimplemented!()
Treadmill {
available_color: TreadmillNodeColor::Ecru,
free: free,
scan: free,
t: free,
b: free
}
}
fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
// check if we have n_blocks available
let mut cur = self.free;
for _ in 0..n_blocks {
if unsafe{&*cur}.color != self.available_color {
return unsafe {Address::zero()};
}
cur = unsafe {&*cur}.next;
}
// we make sure that n_blocks are available, mark them as black
let mut cur2 = self.free;
for _ in 0..n_blocks {
unsafe{&mut *cur2}.color = TreadmillNodeColor::Black;
cur2 = unsafe {&*cur2}.next
}
let ret = self.free;
self.free = cur;
unsafe{&*ret}.payload
}
}
impl fmt::Display for Treadmill {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut cursor = self.free;
struct TreadMillNode {
loop {
write!(f, "{}", unsafe{&*cursor}).unwrap();
if cursor == self.free {
write!(f, "(free)").unwrap();
}
if cursor == self.scan {
write!(f, "(scan)").unwrap();
}
if cursor == self.b {
write!(f, "(bottom)").unwrap();
}
if cursor == self.t {
write!(f, "(top)").unwrap();
}
if unsafe{&*cursor}.next() == self.free {
break;
} else {
write!(f, "->").unwrap();
cursor = unsafe{&*cursor}.next();
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum TreadmillNodeColor {
Ecru,
White,
Black,
Grey
}
struct TreadmillNode {
payload: Address,
color: TreadmillNodeColor,
prev: *mut TreadMillNode,
next: *mut TreadMillNode
prev: *mut TreadmillNode,
next: *mut TreadmillNode
}
impl TreadMillNode {
fn singleton(addr: Address) -> *mut TreadMillNode {
let mut ptr = Box::into_raw(Box::new(TreadMillNode {
impl fmt::Display for TreadmillNode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}-{:?}]", self.payload, self.color)
}
}
impl TreadmillNode {
fn singleton(addr: Address) -> *mut TreadmillNode {
let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr,
color: TreadmillNodeColor::Ecru,
prev: ptr::null_mut(),
next: ptr::null_mut(),
}));
......@@ -84,23 +245,75 @@ impl TreadMillNode {
}
/// returns the inserted node
fn insert_after(node: *mut TreadMillNode, addr: Address) -> *mut TreadMillNode {
fn insert_after(&mut self, addr: Address) -> *mut TreadmillNode {
unsafe {
// node <- ptr -> node.next
let mut ptr = Box::into_raw(Box::new(TreadMillNode {
let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr,
color: TreadmillNodeColor::Ecru,
// inserted between node and node.next
prev: node,
next: (*node).next
prev: self as *mut TreadmillNode,
next: self.next
}));
// ptr <- node.next
(*(*node).next).prev = ptr;
unsafe{(&mut *self.next)}.prev = ptr;
// node -> ptr
(*node).next = ptr;
self.next = ptr;
ptr
}
}
fn next(&self) -> *mut TreadmillNode {
self.next
}
fn prev(&self) -> *mut TreadmillNode {
self.prev
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::BLOCK_SIZE;
#[test]
fn test_new_treadmill_space() {
let space = FreeListSpace::new(BLOCK_SIZE * 10);
println!("{}", space);
}
#[test]
fn test_treadmill_alloc() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..10 {
let ret = space.alloc(BLOCK_SIZE, 8);
println!("Allocation{}: {}", i, ret);
}
}
#[test]
fn test_treadmill_alloc_spanblock() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..5 {
let ret = space.alloc(BLOCK_SIZE * 2, 8);
println!("Allocation{}: {}", i, ret);
}
}
#[test]
fn test_treadmill_alloc_exhaust() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..20 {
let ret = space.alloc(BLOCK_SIZE, 8);
println!("Allocation{}: {}", i, ret);
}
}
}
\ No newline at end of file
This diff is collapsed.
......@@ -73,16 +73,16 @@ impl LineMarkTable {
self.set(line_table_index + 1, immix::LineMark::ConservLive);
}
}
#[inline(always)]
pub fn mark_line_live2(&self, space_start: Address, addr: Address) {
let line_table_index = addr.diff(space_start) >> immix::LOG_BYTES_IN_LINE;
self.set(line_table_index, immix::LineMark::Live);
if line_table_index < self.len - 1 {
self.set(line_table_index + 1, immix::LineMark::ConservLive);
}
}
}
}
......@@ -231,7 +231,9 @@ impl ImmixSpace {
let mut full_blocks = 0;
let mut used_blocks_lock = self.used_blocks.lock().unwrap();
let mut usable_blocks_lock = self.usable_blocks.lock().unwrap();
usable_blocks = usable_blocks_lock.len();
let mut live_blocks : LinkedList<Box<ImmixBlock>> = LinkedList::new();
......@@ -299,6 +301,29 @@ impl ImmixSpace {
}
}
use heap::Space;
impl Space for ImmixSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.end
}
#[inline(always)]
fn alloc_map(&self) -> *mut u8 {
self.alloc_map.ptr
}
#[inline(always)]
fn trace_map(&self) -> *mut u8 {
self.trace_map.ptr
}
}
impl ImmixBlock {
pub fn get_next_available_line(&self, cur_line : usize) -> Option<usize> {
let mut i = cur_line;
......
use utils::Address;
use utils::bit_utils;
use utils::POINTER_SIZE;
use utils::LOG_POINTER_SIZE;
use std::sync::atomic::AtomicUsize;
use objectmodel;
pub mod immix;
pub mod freelist;
pub mod gc;
......@@ -16,6 +21,45 @@ lazy_static! {
pub static ref LO_SPACE_SIZE : AtomicUsize = AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * LO_SPACE_RATIO) as usize );
}
pub trait Space {
#[inline(always)]
fn start(&self) -> Address;
#[inline(always)]
fn end(&self) -> Address;
#[inline(always)]
fn alloc_map(&self) -> *mut u8;
#[inline(always)]
fn trace_map(&self) -> *mut u8;
#[inline(always)]
fn is_valid_object(&self, addr: Address) -> bool {
let start = self.start();
let end = self.end();
if addr >= end || addr < start {
return false;
}
let index = (addr.diff(start) >> LOG_POINTER_SIZE) as isize;
if !bit_utils::test_nth_bit(unsafe {*self.alloc_map().offset(index)}, objectmodel::OBJ_START_BIT) {
return false;
}
if !addr.is_aligned_to(POINTER_SIZE) {
return false;
}
true
}
#[inline(always)]
fn addr_in_space(&self, addr: Address) -> bool {
addr >= self.start() && addr < self.end()
}
}
#[inline(always)]
pub fn fill_alignment_gap(start : Address, end : Address) -> () {
debug_assert!(end >= start);
......
......@@ -33,16 +33,15 @@ pub use heap::immix::LIMIT_OFFSET as ALLOCATOR_LIMIT_OFFSET;
#[repr(C)]
pub struct GC {
immix_space: Arc<ImmixSpace>,
lo_space : Arc<RwLock<FreeListSpace>>
lo_space : Arc<FreeListSpace>
}
impl fmt::Debug for GC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "GC\n").unwrap();
write!(f, "{}", self.immix_space).unwrap();
let lo_lock = self.lo_space.read().unwrap();
write!(f, "{}", *lo_lock)
write!(f, "{}", self.lo_space)
}
}
......@@ -56,7 +55,7 @@ pub extern fn gc_stats() {
}
#[no_mangle]
pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<RwLock<FreeListSpace>>) {
pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<FreeListSpace>) {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
......@@ -74,7 +73,7 @@ pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
let (immix_space, lo_space) = {
let immix_space = Arc::new(ImmixSpace::new(immix_size));
let lo_space = Arc::new(RwLock::new(FreeListSpace::new(lo_size)));
let lo_space = Arc::new(FreeListSpace::new(lo_size));
heap::gc::init(immix_space.clone(), lo_space.clone());
......
mod test_gc_harness;
mod test_gcbench;
\ No newline at end of file
extern crate gc;
extern crate utils;
extern crate simple_logger;
extern crate log;
use self::log::LogLevel;
use self::gc::heap;
use self::gc::objectmodel;
use self::utils::Address;
use std::sync::atomic::Ordering;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {},
Err(_) => {}
}
}
const OBJECT_SIZE : usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD : usize = 10000;
const SPACIOUS_SPACE_SIZE : usize = 500 << 20; // 500mb
const LIMITED_SPACE_SIZE : usize = 20 << 20; // 20mb
const SMALL_SPACE_SIZE : usize = 1 << 19; // 512kb
const IMMIX_SPACE_SIZE : usize = SPACIOUS_SPACE_SIZE;
const LO_SPACE_SIZE : usize = SPACIOUS_SPACE_SIZE;
#[test]
fn test_exhaust_alloc() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
println!("Trying to allocate {} objects of (size {}, align {}). ", WORK_LOAD, OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
println!("Considering header size of {}, an object should be {}. ", 0, ACTUAL_OBJECT_SIZE);
println!("This would take {} bytes of {} bytes heap", WORK_LOAD * ACTUAL_OBJECT_SIZE, heap::IMMIX_SPACE_SIZE.load(Ordering::SeqCst));
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = mutator.alloc(OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0011);
}
mutator.destroy();
}
const LARGE_OBJECT_SIZE : usize = 256;
#[test]
#[allow(unused_variables)]
fn test_exhaust_alloc_large() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
start_logging();
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
}
mutator.destroy();
}
#[test]
#[allow(unused_variables)]
fn test_alloc_large_trigger_gc() {
gc::gc_init(IMMIX_SPACE_SIZE, SMALL_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
start_logging();
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
}
mutator.destroy();
}
#[test]
fn test_alloc_mark() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
println!("Trying to allocate 1 object of (size {}, align {}). ", OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
println!("Considering header size of {}, an object should be {}. ", 0, ACTUAL_OBJECT_SIZE);
println!("Trying to allocate {} objects, which will take roughly {} bytes", WORK_LOAD, WORK_LOAD * ACTUAL_OBJECT_SIZE);
let mut objs = vec![];
for _ in 0..WORK_LOAD {
let res = mutator.alloc(ACTUAL_OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0011);
objs.push(unsafe {res.to_object_reference()});
}
let (shared_space, _) = gc::get_spaces();
println!("Start marking");
let mark_state = objectmodel::MARK_STATE.load(Ordering::SeqCst) as u8;
let line_mark_table = shared_space.line_mark_table();
let (space_start, space_end) = (shared_space.start(), shared_space.end());
let trace_map = shared_space.trace_map.ptr;
for i in 0..objs.len() {
let obj = unsafe {*objs.get_unchecked(i)};
// mark the object as traced
objectmodel::mark_as_traced(trace_map, space_start, obj, mark_state);
// mark meta-data
if obj.to_address() >= space_start && obj.to_address() < space_end {
line_mark_table.mark_line_live2(space_start, obj.to_address());
}
}
mutator.destroy();
}
#[allow(dead_code)]
struct Node<'a> {
hdr : u64,