Commit 27013485 authored by John Zhang's avatar John Zhang

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents e8ffe9ba a61f8784
......@@ -4,10 +4,6 @@ version = "0.0.1"
authors = ["qinsoon <qinsoon@gmail.com>"]
build = "build.rs"
[features]
default = ["parallel-gc"]
parallel-gc = []
[lib]
crate-type = ["rlib"]
......@@ -17,6 +13,7 @@ gcc = "0.3"
[dependencies]
utils = {path = "../utils"}
time = "0.1.34"
lazy_static = "0.1.15"
log = "0.3.5"
simple_logger = "0.4.0"
......
......@@ -31,9 +31,9 @@ impl FreeListSpace {
}
pub fn alloc(&mut self, size: usize, align: usize) -> Option<Address> {
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
if self.used_bytes + size > self.size {
None
unsafe {Address::zero()}
} else {
let ret = aligned_alloc::aligned_alloc(size, align);
......@@ -43,7 +43,7 @@ impl FreeListSpace {
self.node_id += 1;
self.used_bytes += size;
Some(addr)
addr
}
}
......@@ -106,27 +106,6 @@ pub enum NodeMark {
}
unsafe impl Sync for NodeMark {}
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<RwLock<FreeListSpace>>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = {
let mut lo_space_lock = space.write().unwrap();
lo_space_lock.alloc(size, align)
};
match ret_addr {
Some(addr) => {
return addr;
},
None => {
gc::trigger_gc();
}
}
}
}
use std::fmt;
impl fmt::Display for FreeListSpace {
......
mod malloc_list;
mod treadmill;
pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::malloc_list::alloc_large;
\ No newline at end of file
//pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::treadmill::FreeListSpace;
use std::sync::Arc;
use std::sync::RwLock;
use heap::gc;
use utils::{Address, ObjectReference};
use heap::immix;
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<FreeListSpace>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = space.alloc(size, align);
if ret_addr.is_zero() {
gc::trigger_gc();
} else {
return ret_addr;
}
}
}
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
......@@ -2,6 +2,7 @@ use heap::immix;
use heap::immix::ImmixSpace;
use heap::immix::immix_space::ImmixBlock;
use heap::gc;
use objectmodel;
use utils::LOG_POINTER_SIZE;
use utils::Address;
......@@ -32,6 +33,7 @@ pub struct ImmixMutatorLocal {
// use raw pointer here instead of AddressMapTable
// to avoid indirection in fast path
alloc_map : *mut u8,
trace_map : *mut u8,
space_start: Address,
// cursor might be invalid, but Option<Address> is expensive here
......@@ -46,6 +48,8 @@ pub struct ImmixMutatorLocal {
space : Arc<ImmixSpace>,
block : Option<Box<ImmixBlock>>,
mark_state: u8
}
lazy_static! {
......@@ -73,6 +77,11 @@ impl ImmixMutatorLocal {
self.block = None;
}
pub fn reset_after_gc(&mut self) {
self.reset();
self.mark_state ^= 1;
}
pub fn new(space : Arc<ImmixSpace>) -> ImmixMutatorLocal {
let global = Arc::new(ImmixMutatorGlobal::new());
......@@ -89,9 +98,11 @@ impl ImmixMutatorLocal {
cursor: unsafe {Address::zero()}, limit: unsafe {Address::zero()}, line: immix::LINES_IN_BLOCK,
block: None,
alloc_map: space.alloc_map.ptr,
trace_map: space.trace_map.ptr,
space_start: space.start(),
global: global,
space: space,
space: space,
mark_state: objectmodel::INIT_MARK_STATE as u8
};
*id_lock += 1;
......@@ -164,6 +175,7 @@ impl ImmixMutatorLocal {
pub fn init_object(&mut self, addr: Address, encode: u8) {
unsafe {
*self.alloc_map.offset((addr.diff(self.space_start) >> LOG_POINTER_SIZE) as isize) = encode;
objectmodel::mark_as_untraced(self.trace_map, self.space_start, addr, self.mark_state);
}
}
......
......@@ -73,16 +73,16 @@ impl LineMarkTable {
self.set(line_table_index + 1, immix::LineMark::ConservLive);
}
}
#[inline(always)]
pub fn mark_line_live2(&self, space_start: Address, addr: Address) {
let line_table_index = addr.diff(space_start) >> immix::LOG_BYTES_IN_LINE;
self.set(line_table_index, immix::LineMark::Live);
if line_table_index < self.len - 1 {
self.set(line_table_index + 1, immix::LineMark::ConservLive);
}
}
}
}
......@@ -231,7 +231,9 @@ impl ImmixSpace {
let mut full_blocks = 0;
let mut used_blocks_lock = self.used_blocks.lock().unwrap();
let mut usable_blocks_lock = self.usable_blocks.lock().unwrap();
usable_blocks = usable_blocks_lock.len();
let mut live_blocks : LinkedList<Box<ImmixBlock>> = LinkedList::new();
......@@ -299,6 +301,29 @@ impl ImmixSpace {
}
}
use heap::Space;
impl Space for ImmixSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.end
}
#[inline(always)]
fn alloc_map(&self) -> *mut u8 {
self.alloc_map.ptr
}
#[inline(always)]
fn trace_map(&self) -> *mut u8 {
self.trace_map.ptr
}
}
impl ImmixBlock {
pub fn get_next_available_line(&self, cur_line : usize) -> Option<usize> {
let mut i = cur_line;
......
use utils::Address;
use utils::bit_utils;
use utils::POINTER_SIZE;
use utils::LOG_POINTER_SIZE;
use std::sync::atomic::AtomicUsize;
use objectmodel;
pub mod immix;
pub mod freelist;
pub mod gc;
......@@ -16,6 +21,45 @@ lazy_static! {
pub static ref LO_SPACE_SIZE : AtomicUsize = AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * LO_SPACE_RATIO) as usize );
}
pub trait Space {
#[inline(always)]
fn start(&self) -> Address;
#[inline(always)]
fn end(&self) -> Address;
#[inline(always)]
fn alloc_map(&self) -> *mut u8;
#[inline(always)]
fn trace_map(&self) -> *mut u8;
#[inline(always)]
fn is_valid_object(&self, addr: Address) -> bool {
let start = self.start();
let end = self.end();
if addr >= end || addr < start {
return false;
}
let index = (addr.diff(start) >> LOG_POINTER_SIZE) as isize;
if !bit_utils::test_nth_bit(unsafe {*self.alloc_map().offset(index)}, objectmodel::OBJ_START_BIT) {
return false;
}
if !addr.is_aligned_to(POINTER_SIZE) {
return false;
}
true
}
#[inline(always)]
fn addr_in_space(&self, addr: Address) -> bool {
addr >= self.start() && addr < self.end()
}
}
#[inline(always)]
pub fn fill_alignment_gap(start : Address, end : Address) -> () {
debug_assert!(end >= start);
......
......@@ -33,16 +33,15 @@ pub use heap::immix::LIMIT_OFFSET as ALLOCATOR_LIMIT_OFFSET;
#[repr(C)]
pub struct GC {
immix_space: Arc<ImmixSpace>,
lo_space : Arc<RwLock<FreeListSpace>>
lo_space : Arc<FreeListSpace>
}
impl fmt::Debug for GC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "GC\n").unwrap();
write!(f, "{}", self.immix_space).unwrap();
let lo_lock = self.lo_space.read().unwrap();
write!(f, "{}", *lo_lock)
write!(f, "{}", self.lo_space)
}
}
......@@ -56,7 +55,7 @@ pub extern fn gc_stats() {
}
#[no_mangle]
pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<RwLock<FreeListSpace>>) {
pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<FreeListSpace>) {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
......@@ -67,6 +66,9 @@ pub extern fn get_spaces() -> (Arc<ImmixSpace>, Arc<RwLock<FreeListSpace>>) {
pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
// set this line to turn on certain level of debugging info
// simple_logger::init_with_level(log::LogLevel::Trace).ok();
// init object model - init this first, since spaces may use it
objectmodel::init();
// init space size
heap::IMMIX_SPACE_SIZE.store(immix_size, Ordering::SeqCst);
......@@ -74,7 +76,7 @@ pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
let (immix_space, lo_space) = {
let immix_space = Arc::new(ImmixSpace::new(immix_size));
let lo_space = Arc::new(RwLock::new(FreeListSpace::new(lo_size)));
let lo_space = Arc::new(FreeListSpace::new(lo_size));
heap::gc::init(immix_space.clone(), lo_space.clone());
......@@ -87,9 +89,6 @@ pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
// gc threads
heap::gc::GC_THREADS.store(n_gcthreads, Ordering::SeqCst);
info!("{} gc threads", n_gcthreads);
// init object model
objectmodel::init();
}
#[no_mangle]
......@@ -126,15 +125,21 @@ pub extern fn yieldpoint_slow(mutator: *mut ImmixMutatorLocal) {
#[no_mangle]
#[inline(always)]
pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let addr = unsafe {mutator.as_mut().unwrap()}.alloc(size, align);
let addr = unsafe {&mut *mutator}.alloc(size, align);
unsafe {addr.to_object_reference()}
}
#[no_mangle]
#[inline(always)]
pub extern fn init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u8) {
unsafe {&mut *mutator}.init_object(obj.to_address(), encode);
}
#[no_mangle]
#[inline(never)]
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = unsafe {mutator.as_mut().unwrap()}.try_alloc_from_local(size, align);
let ret = unsafe {&mut *mutator}.try_alloc_from_local(size, align);
unsafe {ret.to_object_reference()}
}
......@@ -143,4 +148,9 @@ pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize,
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
unsafe {ret.to_object_reference()}
}
#[no_mangle]
pub extern fn muentry_init_large_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u8) {
MY_GC.read().unwrap().as_ref().unwrap().lo_space.init_object(obj.to_address(), encode);
}
\ No newline at end of file
use std::sync::atomic;
pub static MARK_STATE : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
pub const OBJECT_HEADER_SIZE : usize = 0;
pub static INIT_MARK_STATE : usize = 1;
static MARK_STATE : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub fn init() {
MARK_STATE.store(1, atomic::Ordering::SeqCst);
MARK_STATE.store(INIT_MARK_STATE, atomic::Ordering::SeqCst);
}
pub fn flip_mark_state() {
let mark_state = MARK_STATE.load(atomic::Ordering::SeqCst);
if mark_state == 0 {
MARK_STATE.store(1, atomic::Ordering::SeqCst);
} else {
MARK_STATE.store(0, atomic::Ordering::SeqCst);
}
MARK_STATE.store(mark_state ^ 1, atomic::Ordering::SeqCst);
}
pub fn load_mark_state() -> u8 {
MARK_STATE.load(atomic::Ordering::SeqCst) as u8
}
pub fn flip(mark: u8) -> u8 {
mark ^ 1
}
#[allow(unused_variables)]
......@@ -83,6 +88,13 @@ pub fn mark_as_traced(trace_map: *mut u8, space_start: Address, obj: ObjectRefer
}
}
#[inline(always)]
pub fn mark_as_untraced(trace_map: *mut u8, space_start: Address, addr: Address, mark_state: u8) {
unsafe {
*trace_map.offset((addr.diff(space_start) >> LOG_POINTER_SIZE) as isize) = mark_state ^ 1;
}
}
#[inline(always)]
pub fn is_traced(trace_map: *mut u8, space_start: Address, obj: ObjectReference, mark_state: u8) -> bool {
unsafe {
......
mod test_gc_harness;
mod test_gcbench;
\ No newline at end of file
extern crate gc;
extern crate utils;
extern crate simple_logger;
extern crate log;
use self::log::LogLevel;
use self::gc::heap;
use self::gc::objectmodel;
use self::utils::Address;
use std::sync::atomic::Ordering;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {},
Err(_) => {}
}
}
const OBJECT_SIZE : usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD : usize = 10000;
const SPACIOUS_SPACE_SIZE : usize = 500 << 20; // 500mb
const LIMITED_SPACE_SIZE : usize = 20 << 20; // 20mb
const SMALL_SPACE_SIZE : usize = 1 << 19; // 512kb
const IMMIX_SPACE_SIZE : usize = SPACIOUS_SPACE_SIZE;
const LO_SPACE_SIZE : usize = SPACIOUS_SPACE_SIZE;
#[test]
fn test_exhaust_alloc() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
println!("Trying to allocate {} objects of (size {}, align {}). ", WORK_LOAD, OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
println!("Considering header size of {}, an object should be {}. ", 0, ACTUAL_OBJECT_SIZE);
println!("This would take {} bytes of {} bytes heap", WORK_LOAD * ACTUAL_OBJECT_SIZE, heap::IMMIX_SPACE_SIZE.load(Ordering::SeqCst));
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = mutator.alloc(OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0011);
}
mutator.destroy();
}
const LARGE_OBJECT_SIZE : usize = 256;
#[test]
#[allow(unused_variables)]
fn test_exhaust_alloc_large() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
start_logging();
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, 0b1100_0000);
}
mutator.destroy();
}
#[test]
#[allow(unused_variables)]
fn test_alloc_large_trigger_gc() {
gc::gc_init(SMALL_SPACE_SIZE, 4096 * 10, 8);
let mut mutator = gc::new_mutator();
start_logging();
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, 0b1100_0000);
}
mutator.destroy();
}
#[test]
fn test_alloc_mark() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
println!("Trying to allocate 1 object of (size {}, align {}). ", OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
println!("Considering header size of {}, an object should be {}. ", 0, ACTUAL_OBJECT_SIZE);
println!("Trying to allocate {} objects, which will take roughly {} bytes", WORK_LOAD, WORK_LOAD * ACTUAL_OBJECT_SIZE);
let mut objs = vec![];
for _ in 0..WORK_LOAD {
let res = mutator.alloc(ACTUAL_OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0011);
objs.push(unsafe {res.to_object_reference()});
}
let (shared_space, _) = gc::get_spaces();
println!("Start marking");
let mark_state = objectmodel::load_mark_state();
let line_mark_table = shared_space.line_mark_table();
let (space_start, space_end) = (shared_space.start(), shared_space.end());
let trace_map = shared_space.trace_map.ptr;
for i in 0..objs.len() {
let obj = unsafe {*objs.get_unchecked(i)};
// mark the object as traced
objectmodel::mark_as_traced(trace_map, space_start, obj, mark_state);
// mark meta-data
if obj.to_address() >= space_start && obj.to_address() < space_end {
line_mark_table.mark_line_live2(space_start, obj.to_address());
}
}
mutator.destroy();
}
#[allow(dead_code)]
struct Node<'a> {
hdr : u64,
next : &'a Node<'a>,
unused_ptr : usize,
unused_int : i32,
unused_int2: i32
}
#[test]
fn test_alloc_trace() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8);
let mut mutator = gc::new_mutator();
let (shared_space, lo_space) = gc::get_spaces();
println!("Trying to allocate 1 object of (size {}, align {}). ", OBJECT_SIZE, OBJECT_ALIGN);
const ACTUAL_OBJECT_SIZE : usize = OBJECT_SIZE;
println!("Considering header size of {}, an object should be {}. ", 0, ACTUAL_OBJECT_SIZE);
println!("Trying to allocate {} objects, which will take roughly {} bytes", WORK_LOAD, WORK_LOAD * ACTUAL_OBJECT_SIZE);
let root = mutator.alloc(ACTUAL_OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(root, 0b1100_0001);
let mut prev = root;
for _ in 0..WORK_LOAD - 1 {
let res = mutator.alloc(ACTUAL_OBJECT_SIZE, OBJECT_ALIGN);
mutator.init_object(res, 0b1100_0001);
// set prev's 1st field (offset 0) to this object
unsafe {prev.store::<Address>(res)};
prev = res;
}
println!("Start tracing");
let mut roots = vec![unsafe {root.to_object_reference()}];
heap::gc::start_trace(&mut roots, shared_space, lo_space);
mutator.destroy();
}
\ No newline at end of file
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(unused_variables)]
#![allow(dead_code)]
#![allow(unused_imports)]
extern crate gc;
extern crate time;
use self::gc::heap;
use self::gc::heap::immix::ImmixMutatorLocal;
use self::gc::heap::immix::ImmixSpace;
use self::gc::heap::freelist;
use self::gc::heap::freelist::FreeListSpace;
use std::mem::size_of;
use std::sync::atomic::Ordering;
extern crate log;
extern crate simple_logger;
use self::log::LogLevel;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {},
Err(_) => {}
}
}
const IMMIX_SPACE_SIZE : usize = 40 << 20;
const LO_SPACE_SIZE : usize = 40 << 20;
const kStretchTreeDepth : i32 = 18;
const kLongLivedTreeDepth : i32 = 16;
const kArraySize : i32 = 500000;
const kMinTreeDepth : i32 = 4;
const kMaxTreeDepth : i32 = 16;
struct Node {
left : *mut Node,
right : *mut Node,
i : i32,
j : i32
}
struct Array {
value : [f64; kArraySize as usize]
}
fn init_Node(me: *mut Node, l: *mut Node, r: *mut Node) {
unsafe {
(*me).left = l;
(*me).right = r;
}
}
fn TreeSize(i: i32) -> i32{
(1 << (i + 1)) - 1
}
fn NumIters(i: i32) -> i32 {
2 * TreeSize(kStretchTreeDepth) / TreeSize(i)
}
fn Populate(iDepth: i32, thisNode: *mut Node, mutator: &mut ImmixMutatorLocal) {
if iDepth <= 0 {
return;
} else {
unsafe {
(*thisNode).left = alloc(mutator);
(*thisNode).right = alloc(mutator);
Populate(iDepth - 1, (*thisNode).left, mutator);
Populate(iDepth - 1, (*thisNode).right, mutator);
}
}
}
fn MakeTree(iDepth: i32, mutator: &mut ImmixMutatorLocal) -> *mut Node {
if iDepth <= 0 {
alloc(mutator)
} else {
let left = MakeTree(iDepth - 1, mutator);
let right = MakeTree(iDepth - 1, mutator);
let result = alloc(mutator);
init_Node(result, left, right);
result
}
}
fn PrintDiagnostics() {
}
fn TimeConstruction(depth: i32, mutator: &mut ImmixMutatorLocal) {
let iNumIters = NumIters(depth);
println!("creating {} trees of depth {}", iNumIters, depth);
let tStart = time::now_utc();
for _ in 0..iNumIters {
let tempTree = alloc(mutator);
Populate(depth, tempTree, mutator);
// destroy tempTree
}
let tFinish = time::now_utc();
println!("\tTop down construction took {} msec", (tFinish - tStart).num_milliseconds());
let tStart = time::now_utc();
for _ in 0..iNumIters {
let tempTree = MakeTree(depth, mutator);
}
let tFinish = time::now_utc();
println!("\tButtom up construction took {} msec", (tFinish - tStart).num_milliseconds());
}
#[inline(always)]
fn alloc(mutator: &mut ImmixMutatorLocal) -> *mut Node {
let addr = mutator.alloc(size_of::<Node>(), 8);