Commit b4c73e34 authored by qinsoon's avatar qinsoon

able to compile gc (not working yet)

parent 7c205d27
......@@ -14,12 +14,12 @@
#![allow(dead_code)]
use std;
use std::u32;
use std::sync::Arc;
use utils::POINTER_SIZE;
use utils::ByteSize;
use utils::math::align_up;
use objectmodel;
use std::u32;
pub const GCTYPE_INIT_ID: u32 = u32::MAX;
// Id has size less than the alignment of the others so it needs to go at the end
......
......@@ -15,6 +15,7 @@
mod bitmap;
mod address_bitmap;
mod address_map;
pub mod ptr;
pub mod gctype;
pub mod objectdump;
......
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::Address;
use utils::ByteSize;
use std::ops::Deref;
use std::ops::DerefMut;
use std::fmt;
use std::mem::transmute;
#[repr(C)]
pub struct Raw<T: RawMemoryMetadata> {
inner: *mut T
}
impl<T: RawMemoryMetadata> Raw<T> {
pub unsafe fn from_ptr(ptr: *mut T) -> Raw<T> {
debug_assert!(!ptr.is_null());
Raw { inner: ptr }
}
pub unsafe fn from_addr(addr: Address) -> Raw<T> {
debug_assert!(!addr.is_zero());
Raw {
inner: addr.to_ptr_mut()
}
}
pub fn addr(&self) -> Address {
Address::from_mut_ptr(self.inner)
}
}
impl<T: RawMemoryMetadata> Clone for Raw<T> {
fn clone(&self) -> Self {
Raw { inner: self.inner }
}
}
impl<T: RawMemoryMetadata> Deref for Raw<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { transmute(self.inner) }
}
}
impl<T: RawMemoryMetadata> DerefMut for Raw<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { transmute(self.inner) }
}
}
impl<T: fmt::Debug + RawMemoryMetadata> fmt::Debug for Raw<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{:?}", &self)
}
}
impl<T: fmt::Display + RawMemoryMetadata> fmt::Display for Raw<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", &self)
}
}
unsafe impl<T: RawMemoryMetadata + Send> Send for Raw<T> {}
unsafe impl<T: RawMemoryMetadata + Sync> Sync for Raw<T> {}
pub trait RawMemoryMetadata {
/// the address of the metadata
fn addr(&self) -> Address;
/// the start address of the memory area (after the metadata)
fn mem_start(&self) -> Address;
}
......@@ -11,6 +11,10 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::Address;
use heap::gc;
use heap::immix;
use heap::Mutator;
mod malloc_list;
mod treadmill;
......@@ -19,15 +23,12 @@ mod treadmill;
pub use heap::freelist::treadmill::FreeListSpace;
use std::sync::Arc;
use heap::gc;
use utils::Address;
use heap::immix;
#[inline(never)]
pub fn alloc_large(
size: usize,
align: usize,
mutator: &mut immix::ImmixMutatorLocal,
mutator: &mut Mutator,
space: Arc<FreeListSpace>
) -> Address {
loop {
......
......@@ -112,12 +112,7 @@ impl FreeListSpace {
#[inline(always)]
#[cfg(feature = "use-sidemap")]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
objectmodel::is_traced(
self.trace_map(),
self.start,
unsafe { addr.to_object_reference() },
mark_state
)
unimplemented!()
}
#[inline(always)]
......@@ -236,13 +231,8 @@ impl Space for FreeListSpace {
}
#[inline(always)]
fn alloc_map(&self) -> *mut u8 {
self.alloc_map.ptr
}
#[inline(always)]
fn trace_map(&self) -> *mut u8 {
self.trace_map.ptr
fn is_valid_object(&self, addr: Address) -> bool {
true
}
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -12,35 +12,67 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::ByteSize;
use utils::ByteOffset;
mod immix_space;
mod immix_mutator;
pub use self::immix_space::ImmixSpace;
pub use self::immix_mutator::ImmixMutatorLocal;
pub use self::immix_mutator::ImmixMutatorGlobal;
pub use self::immix_space::LineMarkTable as ImmixLineMarkTable;
pub use self::immix_mutator::MUTATORS;
pub use self::immix_mutator::N_MUTATORS;
pub use self::immix_space::ImmixBlock;
pub use self::immix_mutator::ImmixAllocator;
pub use self::immix_mutator::CURSOR_OFFSET;
pub use self::immix_mutator::LIMIT_OFFSET;
pub use self::immix_space::mark_object_traced;
pub use self::immix_space::is_object_traced;
pub const LOG_BYTES_IN_LINE: usize = 8;
pub const BYTES_IN_LINE: usize = (1 << LOG_BYTES_IN_LINE);
pub const BYTES_IN_LINE: ByteSize = (1 << LOG_BYTES_IN_LINE);
pub const LOG_BYTES_IN_BLOCK: usize = 16;
pub const BYTES_IN_BLOCK: usize = (1 << LOG_BYTES_IN_BLOCK);
pub const LINES_IN_BLOCK: usize = (1 << (LOG_BYTES_IN_BLOCK - LOG_BYTES_IN_LINE));
pub const BYTES_IN_BLOCK: ByteSize = (1 << LOG_BYTES_IN_BLOCK);
/// size of metadata for block (should be the same as size_of::<ImmixBlock>())
pub const BLOCK_META: ByteSize = 16;
/// GC map immediately follows the meta data
pub const OFFSET_GC_MAP_IN_BLOCK: ByteOffset = BLOCK_META as ByteOffset;
/// GC map byte size
pub const BYTES_GC_MAP_IN_BLOCK: ByteSize = (BYTES_IN_BLOCK - BLOCK_META) / 9 / 2;
/// type map immediately follows the GC map
pub const OFFSET_TYPE_MAP_IN_BLOCK: ByteOffset =
OFFSET_GC_MAP_IN_BLOCK + BYTES_GC_MAP_IN_BLOCK as isize;
/// type map byte size
pub const BYTES_TYPE_MAP_IN_BLOCK: ByteSize = BYTES_GC_MAP_IN_BLOCK;
/// the memory start for actual use
pub const OFFSET_MEMORY_START_IN_BLOCK: ByteOffset =
OFFSET_TYPE_MAP_IN_BLOCK + BYTES_TYPE_MAP_IN_BLOCK as isize;
/// size of usable memory in a block
pub const BYTES_MEM_IN_BLOCK: ByteSize =
BYTES_IN_BLOCK - BLOCK_META - BYTES_GC_MAP_IN_BLOCK - BYTES_TYPE_MAP_IN_BLOCK;
/// how many lines are in block (227)
pub const LINES_IN_BLOCK: usize =
(BYTES_IN_BLOCK - BLOCK_META - BYTES_GC_MAP_IN_BLOCK - BYTES_TYPE_MAP_IN_BLOCK) / BYTES_IN_LINE;
pub const IMMIX_SPACE_ALIGN: ByteSize = (1 << 19); // 512K
pub const IMMIX_SPACE_LOWBITS_MASK: usize = !(IMMIX_SPACE_ALIGN - 1);
pub const IMMIX_BLOCK_ALIGN: ByteSize = BYTES_IN_BLOCK; // 64K
pub const IMMIX_BLOCK_LOWBITS_MASK: usize = !(IMMIX_BLOCK_ALIGN - 1);
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum LineMark {
Free,
Free = 0,
Live,
FreshAlloc,
ConservLive,
PrevLive
}
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum BlockMark {
Uninitialized,
Usable,
Full
}
......@@ -12,12 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::Address;
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::*;
use utils::bit_utils;
use std::sync::atomic::AtomicUsize;
use objectmodel;
use common::ptr::*;
use heap::immix::*;
use std::sync::atomic::AtomicUsize;
use std::sync::RwLock;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
pub mod immix;
pub mod freelist;
......@@ -34,82 +38,172 @@ lazy_static! {
AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * LO_SPACE_RATIO) as usize );
}
pub const SPACE_ALIGN: ByteSize = (1 << 19); // 512K
pub const SPACE_LOWBITS_MASK: usize = !(SPACE_ALIGN - 1);
#[repr(u8)]
#[derive(Copy, Clone)]
pub enum SpaceDescriptor {
ImmixTiny,
ImmixNormal,
Freelist
}
impl SpaceDescriptor {
pub fn get(obj: ObjectReference) -> SpaceDescriptor {
unsafe {
obj.to_address()
.mask(SPACE_LOWBITS_MASK)
.load::<SpaceDescriptor>()
}
}
}
pub trait Space {
#[inline(always)]
fn start(&self) -> Address;
#[inline(always)]
fn end(&self) -> Address;
#[inline(always)]
fn alloc_map(&self) -> *mut u8;
fn is_valid_object(&self, addr: Address) -> bool;
#[inline(always)]
fn trace_map(&self) -> *mut u8;
fn addr_in_space(&self, addr: Address) -> bool {
addr >= self.start() && addr < self.end()
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
fn is_valid_object(&self, addr: Address) -> bool {
let start = self.start();
let end = self.end();
#[allow(dead_code)]
pub const ALIGNMENT_VALUE: u8 = 1;
if addr >= end || addr < start {
return false;
#[inline(always)]
#[allow(dead_code)]
pub fn fill_alignment_gap(start: Address, end: Address) -> () {
debug_assert!(end >= start);
unsafe {
start.memset(ALIGNMENT_VALUE, end - start);
}
}
const MAX_MUTATORS: usize = 1024;
lazy_static! {
pub static ref MUTATORS : RwLock<Vec<Option<Arc<MutatorGlobal>>>> = {
let mut ret = Vec::with_capacity(MAX_MUTATORS);
for _ in 0..MAX_MUTATORS {
ret.push(None);
}
RwLock::new(ret)
};
pub static ref N_MUTATORS : RwLock<usize> = RwLock::new(0);
}
let index = ((addr - start) >> LOG_POINTER_SIZE) as isize;
#[repr(C)]
pub struct Mutator {
id: usize,
pub tiny: ImmixAllocator,
pub normal: ImmixAllocator,
global: Arc<MutatorGlobal>
}
// use side map
if !bit_utils::test_nth_bit_u8(
unsafe { *self.alloc_map().offset(index) },
objectmodel::OBJ_START_BIT,
1
) {
return false;
impl Mutator {
pub fn new(
tiny: ImmixAllocator,
normal: ImmixAllocator,
global: Arc<MutatorGlobal>
) -> Mutator {
let mut id_lock = N_MUTATORS.write().unwrap();
{
let mut mutators_lock = MUTATORS.write().unwrap();
mutators_lock.remove(*id_lock);
mutators_lock.insert(*id_lock, Some(global.clone()));
}
if !addr.is_aligned_to(POINTER_SIZE) {
return false;
}
let ret = Mutator {
id: *id_lock,
tiny,
normal,
global
};
*id_lock += 1;
true
ret
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
fn is_valid_object(&self, addr: Address) -> bool {
let start = self.start();
let end = self.end();
pub fn id(&self) -> usize {
self.id
}
if addr >= end || addr < start {
return false;
}
pub fn reset(&mut self) {
self.tiny.reset();
self.normal.reset();
}
// use header
let hdr = unsafe { (addr + objectmodel::OBJECT_HEADER_OFFSET).load::<u64>() };
if !objectmodel::header_is_object_start(hdr) {
return false;
}
pub fn reset_after_gc(&mut self) {
self.reset()
}
if !addr.is_aligned_to(POINTER_SIZE) {
return false;
}
pub fn prepare_for_gc(&mut self) {
self.tiny.prepare_for_gc();
self.normal.prepare_for_gc();
}
pub fn destroy(&mut self) {
let mut mutator_count_lock = N_MUTATORS.write().unwrap();
let mut mutators_lock = MUTATORS.write().unwrap();
mutators_lock.push(None);
mutators_lock.swap_remove(self.id);
true
*mutator_count_lock = *mutator_count_lock - 1;
if cfg!(debug_assertions) {
debug!(
"destroy mutator. Now live mutators = {}",
*mutator_count_lock
);
}
}
#[inline(always)]
fn addr_in_space(&self, addr: Address) -> bool {
addr >= self.start() && addr < self.end()
pub fn yieldpoint(&mut self) {
if self.global.take_yield() {
self.yieldpoint_slow();
}
}
#[inline(never)]
pub fn yieldpoint_slow(&mut self) {
trace!("Mutator{}: yieldpoint triggered, slow path", self.id);
gc::sync_barrier(self);
}
}
#[allow(dead_code)]
pub const ALIGNMENT_VALUE: u8 = 1;
pub struct MutatorGlobal {
take_yield: AtomicBool,
still_blocked: AtomicBool
}
#[inline(always)]
#[allow(dead_code)]
pub fn fill_alignment_gap(start: Address, end: Address) -> () {
debug_assert!(end >= start);
unsafe {
start.memset(ALIGNMENT_VALUE, end - start);
impl MutatorGlobal {
pub fn new() -> MutatorGlobal {
MutatorGlobal {
take_yield: AtomicBool::new(false),
still_blocked: AtomicBool::new(false)
}
}
#[inline(always)]
pub fn is_still_blocked(&self) -> bool {
self.still_blocked.load(Ordering::SeqCst)
}
pub fn set_still_blocked(&self, b: bool) {
self.still_blocked.store(b, Ordering::SeqCst);
}
pub fn set_take_yield(&self, b: bool) {
self.take_yield.store(b, Ordering::SeqCst);
}
#[inline(always)]
pub fn take_yield(&self) -> bool {
self.take_yield.load(Ordering::SeqCst)
}
}
This diff is collapsed.
......@@ -16,7 +16,7 @@ use std::sync::atomic;
use utils::ByteSize;
#[cfg(feature = "use-sidemap")]
mod sidemap;
pub mod sidemap;
#[cfg(not(feature = "use-sidemap"))]
mod header;
......@@ -25,9 +25,7 @@ mod header;
pub static INIT_MARK_STATE: usize = 1;
static MARK_STATE: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
pub fn init() {
MARK_STATE.store(INIT_MARK_STATE, atomic::Ordering::SeqCst);
}
pub fn init() {}
pub fn flip_mark_state() {
let mark_state = MARK_STATE.load(atomic::Ordering::SeqCst);
......@@ -60,37 +58,9 @@ pub use self::sidemap::MINIMAL_ALIGNMENT;
pub use self::sidemap::OBJECT_HEADER_SIZE;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::OBJECT_HEADER_OFFSET;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::REF_BITS_LEN;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::OBJ_START_BIT;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::SHORT_ENCODE_BIT;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::print_object;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::mark_as_traced;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::mark_as_untraced;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::is_traced;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_is_fix_size;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_has_ref_map;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_is_object_start;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_get_gctype_id;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_get_ref_map;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_get_object_size;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::header_get_hybrid_length;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::get_ref_byte;
// --- header ----
......
......@@ -44,7 +44,7 @@ use objectmodel::sidemap::object_encode::SMALL_ID_WIDTH;
/// |________________|
///
#[repr(C, packed)]
struct GlobalTypeTable {
pub struct GlobalTypeTable {
/// current index for small entries
small_entry_i: usize,
/// current index for large entries
......@@ -105,7 +105,7 @@ impl GlobalTypeTable {
}
#[inline(always)]
fn table() -> &'static mut [TypeEncode; N_TYPES] {
pub fn table() -> &'static mut [TypeEncode; N_TYPES] {
unsafe { mem::transmute(global_type_table_ptr.load(Ordering::Relaxed)) }
}
......
......@@ -81,6 +81,8 @@
use std::sync::atomic;
use common::gctype::GCType;
use heap::SpaceDescriptor;
use heap::immix::*;
use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
......@@ -97,49 +99,13 @@ pub const OBJECT_HEADER_OFFSET: ByteOffset = 0;
pub type TypeID = usize;
pub const N_TYPES: usize = 1 << 23;
pub mod object_encode;
pub mod type_encode;
pub mod global_type_table;
mod object_encode;
mod type_encode;
mod global_type_table;
#[inline(always)]
pub fn header_is_object_start(hdr: u64) -> bool {
unimplemented!()
}
#[inline(always)]
pub fn header_is_fix_size(hdr: u64) -> bool {
unimplemented!()
}
#[inline(always)]
pub fn header_is_traced(hdr: u64, mark_state: u8) -> bool {
unimplemented!()
}
#[inline(always)]
pub fn header_has_ref_map(hdr: u64) -> bool {
unimplemented!()
}
#[inline(always)]
pub fn header_get_ref_map(hdr: u64) -> u32 {
unimplemented!()
}
#[inline(always)]
pub fn header_get_hybrid_length(hdr: u64) -> u32 {
unimplemented!()
}
#[inline(always)]
pub fn header_get_gctype_id(hdr: u64) -> u32 {
unimplemented!()
}
#[inline(always)]
pub fn header_get_object_size(hdr: u64) -> u32 {
unimplemented!()
}
pub use objectmodel::sidemap::object_encode::*;
pub use objectmodel::sidemap::type_encode::*;
pub use objectmodel::sidemap::global_type_table::*;
pub fn gen_gctype_encode(ty: &GCType) -> u64 {
unimplemented!()
......@@ -151,128 +117,5 @@ pub fn gen_hybrid_gctype_encode(ty: &GCType, length: u32) -> u64 {
#[allow(unused_variables)]
pub fn print_object(obj: Address, space_start: Address, trace_map: *mut u8, alloc_map: *mut u8) {
let mut cursor = obj;
trace!("OBJECT 0x{:x}", obj);
loop {
let hdr = get_ref_byte(
alloc_map,
space_start,
unsafe { cursor.to_object_reference() }
);
let (ref_bits, short_encode) = (
bit_utils::lower_bits_u8(hdr, REF_BITS_LEN),
bit_utils::test_nth_bit_u8(hdr, SHORT_ENCODE_BIT, 1)
);
trace!(
"0x{:x} | val: 0x{:15x} | {}, hdr: {:b}",
cursor,
unsafe { cursor.load::<u64>() },
interpret_hdr_for_print_object(hdr, 0),
hdr
);
cursor += POINTER_SIZE;
trace!(
"0x{:x} | val: 0x{:15x} | {}",
cursor,
unsafe { cursor.load::<u64>() },
interpret_hdr_for_print_object(hdr, 1)
);
cursor += POINTER_SIZE;
trace!(
"0x{:x} | val: 0x{:15x} | {}",
cursor,
unsafe { cursor.load::<u64>() },
interpret_hdr_for_print_object(hdr, 2)
);
cursor += POINTER_SIZE;
trace!(
"0x{:x} | val: 0x{:15x} | {}",
cursor,
unsafe { cursor.load::<u64>() },
interpret_hdr_for_print_object(hdr, 3)
);
cursor += POINTER_SIZE;
trace!(
"0x{:x} | val: 0x{:15x} | {}",
cursor,
unsafe { cursor.load::<u64>() },
interpret_hdr_for_print_object(hdr, 4)
);
cursor += POINTER_SIZE;
trace!("0x{:x} | val: 0x{:15x} | {} {}",
cursor, unsafe{cursor.load::<u64>()}, interpret_hdr_for_print_object(hdr, 5),
{
if !short_encode {
"MORE..."
} else {
""
}
});
if short_encode {
return;
}
}
}
// index between 0 and 5
fn interpret_hdr_for_print_object(hdr: u8, index: usize) -> &'static str {
if bit_utils::test_nth_bit_u8(hdr, index, 1) {
"REF "
} else {
"NON-REF"
}
}
#[inline(always)]
pub fn mark_as_traced(
trace_map: *mut u8,
space_start: Address,
obj: ObjectReference,
mark_state: u8