WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 27013485 authored by John Zhang's avatar John Zhang
Browse files

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents e8ffe9ba a61f8784
...@@ -4,10 +4,6 @@ version = "0.0.1" ...@@ -4,10 +4,6 @@ version = "0.0.1"
authors = ["qinsoon <qinsoon@gmail.com>"] authors = ["qinsoon <qinsoon@gmail.com>"]
build = "build.rs" build = "build.rs"
[features]
default = ["parallel-gc"]
parallel-gc = []
[lib] [lib]
crate-type = ["rlib"] crate-type = ["rlib"]
...@@ -17,6 +13,7 @@ gcc = "0.3" ...@@ -17,6 +13,7 @@ gcc = "0.3"
[dependencies] [dependencies]
utils = {path = "../utils"} utils = {path = "../utils"}
time = "0.1.34"
lazy_static = "0.1.15" lazy_static = "0.1.15"
log = "0.3.5" log = "0.3.5"
simple_logger = "0.4.0" simple_logger = "0.4.0"
......
...@@ -31,9 +31,9 @@ impl FreeListSpace { ...@@ -31,9 +31,9 @@ impl FreeListSpace {
} }
pub fn alloc(&mut self, size: usize, align: usize) -> Option<Address> { pub fn alloc(&mut self, size: usize, align: usize) -> Address {
if self.used_bytes + size > self.size { if self.used_bytes + size > self.size {
None unsafe {Address::zero()}
} else { } else {
let ret = aligned_alloc::aligned_alloc(size, align); let ret = aligned_alloc::aligned_alloc(size, align);
...@@ -43,7 +43,7 @@ impl FreeListSpace { ...@@ -43,7 +43,7 @@ impl FreeListSpace {
self.node_id += 1; self.node_id += 1;
self.used_bytes += size; self.used_bytes += size;
Some(addr) addr
} }
} }
...@@ -106,27 +106,6 @@ pub enum NodeMark { ...@@ -106,27 +106,6 @@ pub enum NodeMark {
} }
unsafe impl Sync for NodeMark {} unsafe impl Sync for NodeMark {}
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<RwLock<FreeListSpace>>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = {
let mut lo_space_lock = space.write().unwrap();
lo_space_lock.alloc(size, align)
};
match ret_addr {
Some(addr) => {
return addr;
},
None => {
gc::trigger_gc();
}
}
}
}
use std::fmt; use std::fmt;
impl fmt::Display for FreeListSpace { impl fmt::Display for FreeListSpace {
......
mod malloc_list; mod malloc_list;
mod treadmill; mod treadmill;
pub use heap::freelist::malloc_list::FreeListSpace; //pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::malloc_list::alloc_large; pub use heap::freelist::treadmill::FreeListSpace;
\ No newline at end of file
use std::sync::Arc;
use std::sync::RwLock;
use heap::gc;
use utils::{Address, ObjectReference};
use heap::immix;
#[inline(never)]
pub fn alloc_large(size: usize, align: usize, mutator: &mut immix::ImmixMutatorLocal, space: Arc<FreeListSpace>) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = space.alloc(size, align);
if ret_addr.is_zero() {
gc::trigger_gc();
} else {
return ret_addr;
}
}
}
\ No newline at end of file
...@@ -2,9 +2,15 @@ ...@@ -2,9 +2,15 @@
use utils::Address; use utils::Address;
use utils::mem::memmap; use utils::mem::memmap;
use utils::LOG_POINTER_SIZE;
use common::AddressMap;
use objectmodel;
use std::ptr; use std::ptr;
use std::sync::Arc; use std::sync::Arc;
use common::AddressMap; use std::fmt;
use std::sync::Mutex;
const SPACE_ALIGN : usize = 1 << 19; const SPACE_ALIGN : usize = 1 << 19;
const BLOCK_SIZE : usize = 1 << 12; // 4kb const BLOCK_SIZE : usize = 1 << 12; // 4kb
...@@ -20,7 +26,7 @@ pub struct FreeListSpace { ...@@ -20,7 +26,7 @@ pub struct FreeListSpace {
#[allow(dead_code)] #[allow(dead_code)]
mmap : memmap::Mmap, mmap : memmap::Mmap,
treadmill: TreadMill treadmill: Mutex<Treadmill>
} }
impl FreeListSpace { impl FreeListSpace {
...@@ -32,44 +38,275 @@ impl FreeListSpace { ...@@ -32,44 +38,275 @@ impl FreeListSpace {
let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN); let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let end : Address = start.plus(space_size); let end : Address = start.plus(space_size);
unimplemented!() let trace_map = AddressMap::new(start, end);
let alloc_map = AddressMap::new(start, end);
if cfg!(debug_assertions) {
trace_map.init_all(0);
alloc_map.init_all(0);
}
let treadmill = Treadmill::new(start, end);
FreeListSpace {
start: start,
end: end,
alloc_map: Arc::new(alloc_map),
trace_map: Arc::new(trace_map),
mmap: anon_mmap,
treadmill: Mutex::new(treadmill)
}
}
pub fn alloc(&self, size: usize, align: usize) -> Address {
// every block is 'BLOCK_SIZE' aligned, usually we do not need to align
assert!(BLOCK_SIZE % align == 0);
let blocks_needed = if size % BLOCK_SIZE == 0 {
size / BLOCK_SIZE
} else {
size / BLOCK_SIZE + 1
};
trace!("before allocation, space: {}", self);
trace!("requiring {} bytes ({} blocks)", size, blocks_needed);
let res = {
let mut treadmill = self.treadmill.lock().unwrap();
treadmill.alloc_blocks(blocks_needed)
};
trace!("after allocation, space: {}", self);
res
}
pub fn init_object(&self, addr: Address, encode: u8) {
unsafe {
*self.alloc_map().offset((addr.diff(self.start) >> LOG_POINTER_SIZE) as isize) = encode;
objectmodel::mark_as_untraced(self.trace_map(), self.start, addr, objectmodel::load_mark_state());
}
}
pub fn sweep(&self) {
trace!("going to sweep treadmill space");
trace!("{}", self);
let mut treadmill = self.treadmill.lock().unwrap();
let trace_map = self.trace_map();
let mark_state = objectmodel::load_mark_state();
let mut resnapped_any = false;
loop {
trace!("scanning {}", unsafe{&*treadmill.scan});
let addr = unsafe{&*treadmill.scan}.payload;
if objectmodel::is_traced(trace_map, self.start, unsafe { addr.to_object_reference() }, mark_state) {
// the object is alive, do not need to 'move' its node
// but they will be alive, we will set them to opposite mark color
// (meaning they are not available after flip)
unsafe{&mut *treadmill.scan}.color = objectmodel::flip(mark_state);
trace!("is alive, set color to {}", objectmodel::flip(mark_state));
// advance cur backwards
treadmill.scan = unsafe{&*treadmill.scan}.prev();
} else {
// this object is dead
// we do not need to set their color
// we resnap it after current 'free' pointer
if treadmill.scan != treadmill.free {
// since we are going to move current node (scan), we get its prev first
let prev = unsafe{&*treadmill.scan}.prev();
trace!("get scan's prev before resnapping it: {}", unsafe{&*prev});
let alive_node = unsafe { &mut *treadmill.scan }.remove();
trace!("is dead, take it out of treadmill");
trace!("treadmill: {}", &treadmill as &Treadmill);
// insert alive node after free
unsafe{&mut *treadmill.free}.insert_after(alive_node);
trace!("insert after free");
trace!("treadmill: {}", &treadmill as &Treadmill);
// if this is the first object inserted, it is the 'bottom'
// then 1) all resnapped objects will be between 'free' and 'bottom'
// 2) the traversal can stop when scan meets bottom
if !resnapped_any {
treadmill.b = treadmill.scan;
resnapped_any = true;
}
treadmill.scan = prev;
} else {
trace!("is dead and it is free pointer, do not move it");
treadmill.scan = unsafe{&*treadmill.scan}.prev();
}
}
// check if we can stop
if resnapped_any && treadmill.scan == treadmill.b {
return;
}
if !resnapped_any && treadmill.scan == treadmill.free {
// we never set bottom (meaning everything is alive)
println!("didnt free up any memory in treadmill space");
panic!("we ran out of memory in large object space")
}
}
} }
} }
struct TreadMill{ use heap::Space;
free: *mut TreadMillNode,
scan: *mut TreadMillNode, impl Space for FreeListSpace {
t : *mut TreadMillNode, #[inline(always)]
b : *mut TreadMillNode fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.end
}
#[inline(always)]
fn alloc_map(&self) -> *mut u8 {
self.alloc_map.ptr
}
#[inline(always)]
fn trace_map(&self) -> *mut u8 {
self.trace_map.ptr
}
} }
impl TreadMill { unsafe impl Sync for FreeListSpace {}
fn new(start: Address, end: Address) -> TreadMill { unsafe impl Send for FreeListSpace {}
impl fmt::Display for FreeListSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FreeListSpace\n").unwrap();
write!(f, "range={:#X} ~ {:#X}\n", self.start, self.end).unwrap();
let treadmill : &Treadmill = &self.treadmill.lock().unwrap();
write!(f, "treadmill: {}", treadmill)
}
}
struct Treadmill{
free: *mut TreadmillNode,
scan: *mut TreadmillNode,
b : *mut TreadmillNode
}
impl Treadmill {
fn new(start: Address, end: Address) -> Treadmill {
let mut addr = start; let mut addr = start;
let free = TreadMillNode::singleton(addr); let free = TreadmillNode::singleton(addr);
addr = addr.plus(BLOCK_SIZE);
let mut tail = free; let mut tail = free;
while addr < end { while addr < end {
tail = TreadMillNode::insert_after(tail, addr); tail = unsafe {(&mut *tail)}.init_insert_after(addr);
addr = addr.plus(BLOCK_SIZE);
} }
unimplemented!() Treadmill {
free: free,
scan: free,
b: free
}
}
fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
let unavailable_color = objectmodel::load_mark_state();
// check if we have n_blocks available
let mut cur = self.free;
for _ in 0..n_blocks {
if unsafe{&*cur}.color == unavailable_color {
trace!("next block color is {}, no available blocks, return zero", unavailable_color);
return unsafe {Address::zero()};
}
cur = unsafe {&*cur}.next;
}
// we make sure that n_blocks are available, mark them as black
let mut cur2 = self.free;
for _ in 0..n_blocks {
unsafe{&mut *cur2}.color = unavailable_color;
cur2 = unsafe {&*cur2}.next
}
debug_assert!(cur == cur2);
let ret = self.free;
self.free = cur;
trace!("set free to {}", unsafe {&*cur});
unsafe{&*ret}.payload
} }
} }
impl fmt::Display for Treadmill {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut cursor = self.free;
write!(f, "\n").unwrap();
loop {
write!(f, "{}", unsafe{&*cursor}).unwrap();
if cursor == self.free {
write!(f, "(free)").unwrap();
}
if cursor == self.scan {
write!(f, "(scan)").unwrap();
}
if cursor == self.b {
write!(f, "(bottom)").unwrap();
}
struct TreadMillNode { if unsafe{&*cursor}.next() == self.free {
break;
} else {
write!(f, "\n->").unwrap();
cursor = unsafe{&*cursor}.next();
}
}
Ok(())
}
}
struct TreadmillNode {
payload: Address, payload: Address,
color: u8,
prev: *mut TreadmillNode,
next: *mut TreadmillNode
}
prev: *mut TreadMillNode, impl fmt::Display for TreadmillNode {
next: *mut TreadMillNode fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}-{:?}]", self.payload, self.color)
}
} }
impl TreadMillNode { impl TreadmillNode {
fn singleton(addr: Address) -> *mut TreadMillNode { fn singleton(addr: Address) -> *mut TreadmillNode {
let mut ptr = Box::into_raw(Box::new(TreadMillNode { let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr, payload: addr,
// starts as 0 (1, i.e. mark_state, means allocated/alive)
color: objectmodel::flip(objectmodel::load_mark_state()),
prev: ptr::null_mut(), prev: ptr::null_mut(),
next: ptr::null_mut(), next: ptr::null_mut(),
})); }));
...@@ -84,23 +321,114 @@ impl TreadMillNode { ...@@ -84,23 +321,114 @@ impl TreadMillNode {
} }
/// returns the inserted node /// returns the inserted node
fn insert_after(node: *mut TreadMillNode, addr: Address) -> *mut TreadMillNode { fn init_insert_after(&mut self, addr: Address) -> *mut TreadmillNode {
unsafe { unsafe {
// node <- ptr -> node.next // node <- ptr -> node.next
let mut ptr = Box::into_raw(Box::new(TreadMillNode { let mut ptr = Box::into_raw(Box::new(TreadmillNode {
payload: addr, payload: addr,
color: objectmodel::flip(objectmodel::load_mark_state()),
// inserted between node and node.next // inserted between node and node.next
prev: node, prev: self as *mut TreadmillNode,
next: (*node).next next: self.next
})); }));
// ptr <- node.next // ptr <- node.next
(*(*node).next).prev = ptr; unsafe{(&mut *self.next)}.prev = ptr;
// node -> ptr // node -> ptr
(*node).next = ptr; self.next = ptr;
ptr ptr
} }
} }
fn insert_after(&mut self, node: *mut TreadmillNode) {
unsafe {
// self <- node -> self.next
(&mut *node).next = self.next;
(&mut *node).prev = self as *mut TreadmillNode;
// self.next -> node
self.next = node;
// node <- node.next.prev
(&mut *(&mut *node).next).prev = node;
}
}
/// remove current node from treadmill, and returns the node
fn remove(&mut self) -> *mut TreadmillNode {
if self.next == self as *mut TreadmillNode && self.prev == self as *mut TreadmillNode {
// if this is the only node, return itself
self as *mut TreadmillNode
} else {
// we need to take it out from the list
unsafe {
use std::ptr;
// its prev node's next will be its next node
(&mut *self.prev).next = self.next as *mut TreadmillNode;
// its next node' prev will be its prev node
(&mut *self.next).prev = self.prev as *mut TreadmillNode;
// clear current node prev and next
self.prev = ptr::null_mut();
self.next = ptr::null_mut();
}
// then return it
self as *mut TreadmillNode
}
}
fn next(&self) -> *mut TreadmillNode {
self.next
}
fn prev(&self) -> *mut TreadmillNode {
self.prev
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::BLOCK_SIZE;
#[test]
fn test_new_treadmill_space() {
let space = FreeListSpace::new(BLOCK_SIZE * 10);
println!("{}", space);
}
#[test]
fn test_treadmill_alloc() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..10 {
let ret = space.alloc(BLOCK_SIZE, 8);
println!("Allocation{}: {}", i, ret);
}
}
#[test]
fn test_treadmill_alloc_spanblock() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..5 {
let ret = space.alloc(BLOCK_SIZE * 2, 8);
println!("Allocation{}: {}", i, ret);
}
}
#[test]
fn test_treadmill_alloc_exhaust() {
let mut space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..20 {
let ret = space.alloc(BLOCK_SIZE, 8);
println!("Allocation{}: {}", i, ret);
}
}
} }
\ No newline at end of file
...@@ -5,6 +5,7 @@ use heap::immix::ImmixSpace; ...@@ -5,6 +5,7 @@ use heap::immix::ImmixSpace;
use heap::immix::ImmixLineMarkTable; use heap::immix::ImmixLineMarkTable;
use heap::freelist::FreeListSpace; use heap::freelist::FreeListSpace