WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit ef467bc0 authored by qinsoon's avatar qinsoon
Browse files

immix tiny mostly works

parent 12b9e253
......@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::*;
mod bitmap;
mod address_bitmap;
mod address_map;
......@@ -21,3 +23,7 @@ pub mod objectdump;
pub use self::address_bitmap::AddressBitmap;
pub use self::address_map::AddressMap;
pub const SIZE_1KB: ByteSize = 1 << 10;
pub const SIZE_1MB: ByteSize = 1 << 20;
pub const SIZE_1GB: ByteSize = 1 << 30;
\ No newline at end of file
......@@ -38,21 +38,19 @@ pub struct FreeListSpace {
pub trace_map: Arc<AddressMap<u8>>,
#[allow(dead_code)]
mmap: memmap::Mmap,
mmap: memmap::MmapMut,
treadmill: Mutex<Treadmill>
}
impl FreeListSpace {
pub fn new(space_size: usize) -> FreeListSpace {
let anon_mmap: memmap::Mmap = match memmap::Mmap::anonymous(
space_size + SPACE_ALIGN,
memmap::Protection::ReadWrite
) {
Ok(m) => m,
Err(_) => panic!("failed to call mmap")
};
let start: Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let mut anon_mmap: memmap::MmapMut =
match memmap::MmapMut::map_anon(space_size + SPACE_ALIGN) {
Ok(m) => m,
Err(_) => panic!("failed to call mmap")
};
let start: Address = Address::from_ptr::<u8>(anon_mmap.as_mut_ptr()).align_up(SPACE_ALIGN);
let end: Address = start + space_size;
let trace_map = AddressMap::new(start, end);
......
......@@ -257,6 +257,14 @@ fn gc() {
atomic::Ordering::SeqCst
);
// each space prepares for GC
{
let mut gccontext_guard = MY_GC.write().unwrap();
let mut gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.prepare_for_gc();
gccontext.immix_normal.prepare_for_gc();
}
trace!("GC starts");
// creates root deque
......@@ -278,12 +286,12 @@ fn gc() {
// sweep
{
let gccontext_guard = MY_GC.read().unwrap();
let gccontext = gccontext_guard.as_ref().unwrap();
let mut gccontext_guard = MY_GC.write().unwrap();
let mut gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.sweep();
gccontext.immix_normal.sweep();
gccontext.lo.sweep();
// gccontext.lo.sweep();
}
objectmodel::flip_mark_state();
......@@ -389,7 +397,7 @@ pub fn steal_trace_object(
immix::mark_object_traced(obj);
let encode = unsafe {
ImmixBlock::get_type_map_slot_static(obj.to_address()).load::<TinyObjectEncode>()
ImmixSpace::get_type_byte_slot_static(obj.to_address()).load::<TinyObjectEncode>()
};
for i in 0..encode.n_fields() {
......@@ -408,7 +416,7 @@ pub fn steal_trace_object(
// get type encode
let (type_encode, type_size): (&TypeEncode, ByteOffset) = {
let type_slot = ImmixBlock::get_type_map_slot_static(obj.to_address());
let type_slot = ImmixSpace::get_type_byte_slot_static(obj.to_address());
let encode = unsafe { type_slot.load::<MediumObjectEncode>() };
let (type_id, type_size) = if encode.is_medium() {
(encode.type_id(), encode.size())
......
......@@ -28,7 +28,7 @@ use std::sync::Arc;
use std::sync::RwLock;
use std::sync::atomic::{AtomicBool, Ordering};
const TRACE_ALLOC_FASTPATH: bool = true;
const TRACE_ALLOC: bool = false;
#[repr(C)]
pub struct ImmixAllocator {
......@@ -37,9 +37,14 @@ pub struct ImmixAllocator {
// to Address::zero() so that alloc will branch to slow path
cursor: Address,
limit: Address,
line: u8,
space: Raw<ImmixSpace>,
line: usize,
block: Option<Raw<ImmixBlock>>,
large_cursor: Address,
large_limit: Address,
large_block: Option<Raw<ImmixBlock>>,
space: Raw<ImmixSpace>,
mutator: *mut Mutator
}
......@@ -54,9 +59,12 @@ impl ImmixAllocator {
// should not use Address::zero() other than initialization
self.cursor = Address::zero();
self.limit = Address::zero();
self.large_cursor = Address::zero();
self.large_limit = Address::zero();
}
self.line = LINES_IN_BLOCK as u8;
self.line = LINES_IN_BLOCK;
self.block = None;
self.large_block = None;
}
pub fn reset_after_gc(&mut self) {
......@@ -67,8 +75,11 @@ impl ImmixAllocator {
ImmixAllocator {
cursor: unsafe { Address::zero() },
limit: unsafe { Address::zero() },
line: LINES_IN_BLOCK as u8,
line: LINES_IN_BLOCK,
block: None,
large_cursor: unsafe { Address::zero() },
large_limit: unsafe { Address::zero() },
large_block: None,
space,
mutator: ptr::null_mut()
}
......@@ -79,76 +90,79 @@ impl ImmixAllocator {
}
pub fn destroy(&mut self) {
self.return_block();
self.return_block(true);
self.return_block(false);
}
#[inline(always)]
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
// this part of code will slow down allocation
let align = objectmodel::check_alignment(align);
let size = size + objectmodel::OBJECT_HEADER_SIZE;
// end
if TRACE_ALLOC_FASTPATH {
trace!("Mutator: fastpath alloc: size={}, align={}", size, align);
}
trace_if!(
TRACE_ALLOC,
"Mutator: fastpath alloc: size={}, align={}",
size,
align
);
let start = self.cursor.align_up(align);
let end = start + size;
if TRACE_ALLOC_FASTPATH {
trace!(
"Mutator: fastpath alloc: start=0x{:x}, end=0x{:x}",
start,
end
);
}
trace_if!(
TRACE_ALLOC,
"Mutator: fastpath alloc: start=0x{:x}, end=0x{:x}",
start,
end
);
if end > self.limit {
let ret = self.try_alloc_from_local(size, align);
if TRACE_ALLOC_FASTPATH {
trace!(
"Mutator: fastpath alloc: try_alloc_from_local()=0x{:x}",
ret
if size > BYTES_IN_LINE {
trace_if!(TRACE_ALLOC, "Mutator: overflow alloc()");
self.overflow_alloc(size, align)
} else {
trace_if!(
TRACE_ALLOC,
"Mutator: fastpath alloc: try_alloc_from_local()"
);
self.try_alloc_from_local(size, align)
}
if cfg!(debug_assertions) {
if !ret.is_aligned_to(align) {
use std::process;
println!("wrong alignment on 0x{:x}, expected align: {}", ret, align);
process::exit(102);
}
}
// this offset should be removed as well (for performance)
ret + (-objectmodel::OBJECT_HEADER_OFFSET)
} else {
if cfg!(debug_assertions) {
if !start.is_aligned_to(align) {
use std::process;
println!(
"wrong alignment on 0x{:x}, expected align: {}",
start,
align
);
process::exit(102);
}
}
self.cursor = end;
start
}
}
#[inline(never)]
pub fn overflow_alloc(&mut self, size: usize, align: usize) -> Address {
let start = self.large_cursor.align_up(align);
let end = start + size;
start + (-objectmodel::OBJECT_HEADER_OFFSET)
trace_if!(
TRACE_ALLOC,
"Mutator: overflow alloc: start={}, end={}",
start,
end
);
if end > self.large_limit {
self.alloc_from_global(size, align, true)
} else {
self.large_cursor = end;
start
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
pub fn init_object<T>(&mut self, addr: Address, encode: T) {
let map_slot = ImmixBlock::get_type_map_slot_static(addr);
let map_slot = ImmixSpace::get_type_byte_slot_static(addr);
unsafe {
map_slot.store(encode);
}
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn init_object(&mut self, addr: Address, encode: u64) {
......@@ -162,6 +176,7 @@ impl ImmixAllocator {
pub fn init_hybrid<T>(&mut self, addr: Address, encode: T, len: u64) {
unimplemented!()
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn init_hybrid(&mut self, addr: Address, encode: u64, len: u64) {
......@@ -174,11 +189,16 @@ impl ImmixAllocator {
#[inline(never)]
pub fn try_alloc_from_local(&mut self, size: usize, align: usize) -> Address {
if self.line < LINES_IN_BLOCK as u8 {
if self.line < LINES_IN_BLOCK {
let opt_next_available_line = {
let cur_line = self.line;
self.block().get_next_available_line(cur_line)
};
trace_if!(
TRACE_ALLOC,
"Mutator: alloc from local, next available line: {:?}",
opt_next_available_line
);
match opt_next_available_line {
Some(next_available_line) => {
......@@ -196,9 +216,7 @@ impl ImmixAllocator {
}
for line in next_available_line..end_line {
self.block()
.line_mark_table_mut()
.set(line, LineMark::FreshAlloc);
self.block().set_line_mark(line, LineMark::FreshAlloc);
}
// allocate fast path
......@@ -208,18 +226,17 @@ impl ImmixAllocator {
self.cursor = end;
start
}
None => self.alloc_from_global(size, align)
None => self.alloc_from_global(size, align, false)
}
} else {
// we need to alloc from global space
self.alloc_from_global(size, align)
self.alloc_from_global(size, align, false)
}
}
fn alloc_from_global(&mut self, size: usize, align: usize) -> Address {
trace!("Mutator: slowpath: alloc_from_global");
self.return_block();
fn alloc_from_global(&mut self, size: usize, align: usize, request_large: bool) -> Address {
trace!("Mutator: slowpath: alloc_from_global()");
self.return_block(request_large);
loop {
// check if yield
......@@ -233,17 +250,25 @@ impl ImmixAllocator {
// we zero lines that get used in try_alloc_from_local()
// b.lazy_zeroing();
self.block = Some(b);
self.cursor = self.block().mem_start();
self.limit = self.block().mem_start();
self.line = 0;
if request_large {
self.large_cursor = b.mem_start();
self.limit = b.mem_start() + BYTES_IN_BLOCK;
self.large_block = Some(b);
return self.alloc(size, align);
} else {
self.cursor = b.mem_start();
self.limit = b.mem_start();
self.line = 0;
self.block = Some(b);
trace!(
"Mutator: slowpath: new block starting from 0x{:x}",
self.cursor
);
trace!(
"Mutator: slowpath: new block starting from 0x{:x}",
self.cursor
);
return self.try_alloc_from_local(size, align);
return self.try_alloc_from_local(size, align);
}
}
None => {
continue;
......@@ -253,30 +278,28 @@ impl ImmixAllocator {
}
pub fn prepare_for_gc(&mut self) {
self.return_block();
self.return_block(true);
self.return_block(false);
}
fn return_block(&mut self) {
if self.block.is_some() {
trace!("finishing block {:?}", self.block.as_ref().unwrap());
if cfg!(debug_assertions) {
let block = self.block.as_ref().unwrap();
ImmixAllocator::sanity_check_finished_block(block);
fn return_block(&mut self, request_large: bool) {
if request_large {
if self.large_block.is_some() {
trace!(
"finishing large block {}",
self.large_block.as_ref().unwrap().addr()
);
self.space
.return_used_block(self.large_block.take().unwrap());
}
} else {
if self.block.is_some() {
trace!("finishing block {}", self.block.as_ref().unwrap().addr());
self.space.return_used_block(self.block.take().unwrap());
}
self.space.return_used_block(self.block.take().unwrap());
}
}
#[cfg(feature = "use-sidemap")]
#[allow(unused_variables)]
fn sanity_check_finished_block(block: &ImmixBlock) {}
#[cfg(not(feature = "use-sidemap"))]
#[allow(unused_variables)]
fn sanity_check_finished_block(block: &ImmixBlock) {}
fn block(&mut self) -> &mut ImmixBlock {
self.block.as_mut().unwrap()
}
......@@ -302,13 +325,15 @@ impl ImmixAllocator {
impl fmt::Display for ImmixAllocator {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.cursor.is_zero() {
write!(f, "Mutator (not initialized)")
write!(f, "Mutator (not initialized)").unwrap();
} else {
write!(f, "Mutator:\n").unwrap();
write!(f, "cursor= {:#X}\n", self.cursor).unwrap();
write!(f, "limit = {:#X}\n", self.limit).unwrap();
write!(f, "line = {}\n", self.line).unwrap();
write!(f, "block = {}", self.block.as_ref().unwrap())
write!(f, "large cursor = {}\n", self.large_cursor).unwrap();
write!(f, "large limit = {}\n", self.large_limit).unwrap();
}
Ok(())
}
}
This diff is collapsed.
......@@ -13,6 +13,7 @@
// limitations under the License.
use utils::*;
use heap::*;
use std::mem::size_of;
mod immix_space;
......@@ -45,12 +46,7 @@ pub use self::immix_space::is_object_traced;
// | ...... |
// |__________________|
pub const IMMIX_SPACE_ALIGN: ByteSize = (1 << 34); // 16GB
pub const IMMIX_SPACE_LOWBITS_MASK: usize = !(IMMIX_SPACE_ALIGN - 1);
// preallocating 16 GB for immix space
pub const LOG_BYTES_PREALLOC_IMMIX_SPACE: usize = 34;
pub const BYTES_PREALLOC_IMMIX_SPACE: ByteSize = 1 << LOG_BYTES_PREALLOC_IMMIX_SPACE;
// 64KB Immix Block
pub const LOG_BYTES_IN_BLOCK: usize = 16;
......@@ -61,16 +57,17 @@ pub const LOG_BYTES_IN_LINE: usize = 8;
pub const BYTES_IN_LINE: ByteSize = (1 << LOG_BYTES_IN_LINE);
// 256K blocks per space
pub const BLOCKS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_IMMIX_SPACE - LOG_BYTES_IN_BLOCK);
pub const BLOCKS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_SPACE - LOG_BYTES_IN_BLOCK);
// 64M lines per space
pub const LINES_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_IMMIX_SPACE - LOG_BYTES_IN_LINE);
pub const LINES_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_SPACE - LOG_BYTES_IN_LINE);
// 2G words per space
pub const WORDS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_IMMIX_SPACE - LOG_POINTER_SIZE);
pub const WORDS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_SPACE - LOG_POINTER_SIZE);
// 256 lines per block
pub const LINES_IN_BLOCK: usize = 1 << (LOG_BYTES_IN_BLOCK - LOG_BYTES_IN_LINE);
pub const LOG_LINES_IN_BLOCK: usize = LOG_BYTES_IN_BLOCK - LOG_BYTES_IN_LINE;
// 64KB space metadata (we do not need this much though, but for alignment, we use 64KB)
pub const BYTES_META_SPACE: ByteSize = BYTES_IN_BLOCk;
pub const BYTES_META_SPACE: ByteSize = BYTES_IN_BLOCK;
// 256KB block mark table (1 byte per block)
pub const BYTES_META_BLOCK_MARK_TABLE: ByteSize = BLOCKS_IN_SPACE;
// 64MB line mark table
......@@ -78,7 +75,7 @@ pub const BYTES_META_LINE_MARK_TABLE: ByteSize = LINES_IN_SPACE;
// 1GB GC byte table
pub const BYTES_META_GC_TABLE: ByteSize = WORDS_IN_SPACE >> 1;
// 1GB TYPE byte table
pub const BYTES_META_TYPE_TABLE: ByteSize = WORDS_IN_SPACE >> 2;
pub const BYTES_META_TYPE_TABLE: ByteSize = WORDS_IN_SPACE >> 1;
pub const OFFSET_META_BLOCK_MARK_TABLE: ByteOffset = BYTES_META_SPACE as ByteOffset;
pub const OFFSET_META_LINE_MARK_TABLE: ByteOffset =
......@@ -86,7 +83,9 @@ pub const OFFSET_META_LINE_MARK_TABLE: ByteOffset =
pub const OFFSET_META_GC_TABLE: ByteOffset =
OFFSET_META_LINE_MARK_TABLE + BYTES_META_LINE_MARK_TABLE as ByteOffset;
pub const OFFSET_META_TYPE_TABLE: ByteOffset =
OFFSET_META_META_GC_TABLE + BYTES_META_GC_TABLE as ByteOffset;
OFFSET_META_GC_TABLE + BYTES_META_GC_TABLE as ByteOffset;
pub const OFFSET_MEM_START: ByteOffset =
OFFSET_META_TYPE_TABLE + BYTES_META_TYPE_TABLE as ByteOffset;
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
......@@ -101,7 +100,7 @@ pub enum LineMark {
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum BlockMark {
Uninitialized,
Uninitialized = 0,
Usable,
Full
}
......@@ -38,11 +38,15 @@ lazy_static! {
AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * LO_SPACE_RATIO) as usize );
}
pub const SPACE_ALIGN: ByteSize = (1 << 19); // 512K
// preallocating 16 GB for space
pub const LOG_BYTES_PREALLOC_SPACE: usize = 34;
pub const BYTES_PREALLOC_SPACE: ByteSize = 1 << LOG_BYTES_PREALLOC_SPACE;
pub const SPACE_ALIGN: ByteSize = BYTES_PREALLOC_SPACE; // 16GB
pub const SPACE_LOWBITS_MASK: usize = !(SPACE_ALIGN - 1);
#[repr(u8)]
#[derive(Copy, Clone)]
#[repr(u64)]
#[derive(Copy, Clone, Debug)]
pub enum SpaceDescriptor {
ImmixTiny,
ImmixNormal,
......
......@@ -154,15 +154,14 @@ pub extern "C" fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize,
let immix_tiny = ImmixSpace::new(SpaceDescriptor::ImmixTiny, immix_size >> 1);
trace!(" initializing normal immix space...");
let immix_normal = ImmixSpace::new(SpaceDescriptor::ImmixNormal, immix_size >> 1);
trace!(" initializing large object space...");
let lo_space = Arc::new(FreeListSpace::new(lo_size));
// trace!(" initializing large object space...");
// let lo_space = Arc::new(FreeListSpace::new(lo_size));
heap::gc::init(n_gcthreads);
*MY_GC.write().unwrap() = Some(GC {
immix_tiny,
immix_normal,
lo: lo_space,
gc_types: vec![],
roots: LinkedHashSet::new()
});
......@@ -281,15 +280,12 @@ pub extern "C" fn muentry_alloc_tiny(
size: usize,
align: usize
) -> ObjectReference {
let addr = unsafe { &mut *mutator }.tiny.alloc(size, align);
trace!(
"muentry_alloc_tiny(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
addr
);
unsafe { addr.to_object_reference() }
unsafe {
(&mut *mutator)
.tiny
.alloc(size, align)
.to_object_reference()
}
}
#[inline(always)]
......@@ -299,15 +295,12 @@ pub extern "C" fn muentry_alloc_normal(
size: usize,
align: usize
) -> ObjectReference {
let addr = unsafe { &mut *mutator }.normal.alloc(size, align);
trace!(
"muentry_alloc_normal(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
addr
);
unsafe { addr.to_object_reference() }
unsafe {
(&mut *mutator)
.normal
.alloc(size, align)
.to_object_reference()
}
}
/// allocates an object with slowpath in the immix space
......@@ -318,18 +311,7 @@ pub extern "C" fn muentry_alloc_tiny_slow(
size: usize,
align: usize
) -> Address {
let ret = unsafe { &mut *mutator }
.tiny
.try_alloc_from_local(size, align);
trace!(
"muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
ret
);
ret
unsafe { (&mut *mutator).tiny.try_alloc_from_local(size, align) }
}
/// allocates an object with slowpath in the immix space
......@@ -340,18 +322,7 @@ pub extern "C" fn muentry_alloc_normal_slow(
size: usize,
align: usize
) -> Address {
let ret = unsafe { &mut *mutator }
.normal
.try_alloc_from_local(size, align);
trace!(
"muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
ret
);
ret
unsafe { (&mut *mutator).normal.try_alloc_from_local(size, align) }