Commit 0d6b3fc2 authored by qinsoon's avatar qinsoon

large object space

parent 5b25bf30
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common::ptr::*;
use heap::*;
use objectmodel::sidemap::*;
use std::ptr;
#[repr(C)]
pub struct FreelistAllocator {
space: Raw<FreelistSpace>,
mutator: *mut Mutator
}
impl FreelistAllocator {
pub fn new(space: Raw<FreelistSpace>) -> FreelistAllocator {
FreelistAllocator {
space,
mutator: ptr::null_mut()
}
}
pub fn set_mutator(&mut self, mutator: *mut Mutator) {
self.mutator = mutator;
}
pub fn alloc(&mut self, size: ByteSize, align: ByteSize) -> Address {
loop {
unsafe { &mut *self.mutator }.yieldpoint();
let ret = self.space.alloc(size, align);
if ret.is_zero() {
gc::trigger_gc();
} else {
return ret;
}
}
}
pub fn init_object(&mut self, addr: Address, encode: LargeObjectEncode) {
let slot = self.space.get_type_encode_slot(addr);
unsafe {
slot.store(encode);
}
}
}
\ No newline at end of file
This diff is collapsed.
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use utils::Address;
use aligned_alloc;
use std::collections::LinkedList;
pub struct FreeListSpace {
current_nodes: LinkedList<Box<FreeListNode>>,
node_id: usize,
size: usize,
used_bytes: usize
}
impl FreeListSpace {
pub fn new(size: usize) -> FreeListSpace {
FreeListSpace {
current_nodes: LinkedList::new(),
node_id: 0,
size: size,
used_bytes: 0
}
}
#[allow(unused_variables)]
pub fn mark(&mut self, obj: Address) {}
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
if self.used_bytes + size > self.size {
unsafe { Address::zero() }
} else {
let ret = aligned_alloc::aligned_alloc(size, align);
let addr = Address::from_ptr::<()>(ret);
self.current_nodes.push_front(Box::new(FreeListNode {
id: self.node_id,
start: addr,
size: size,
mark: NodeMark::FreshAlloc
}));
self.node_id += 1;
self.used_bytes += size;
addr
}
}
pub fn sweep(&mut self) {
let (new_nodes, new_used_bytes) = {
let mut ret = LinkedList::new();
let nodes = &mut self.current_nodes;
let mut used_bytes = 0;
while !nodes.is_empty() {
let mut node = nodes.pop_front().unwrap();
match node.mark {
NodeMark::Live => {
node.set_mark(NodeMark::PrevLive);
used_bytes += node.size;
ret.push_back(node);
}
NodeMark::PrevLive | NodeMark::FreshAlloc => {
let ptr = node.start.to_ptr::<()>() as *mut ();
// free the memory
unsafe {
aligned_alloc::aligned_free(ptr);
}
// do not add this node into new linked list
}
}
}
(ret, used_bytes)
};
self.current_nodes = new_nodes;
self.used_bytes = new_used_bytes;
}
pub fn current_nodes(&self) -> &LinkedList<Box<FreeListNode>> {
&self.current_nodes
}
pub fn current_nodes_mut(&mut self) -> &mut LinkedList<Box<FreeListNode>> {
&mut self.current_nodes
}
}
pub struct FreeListNode {
id: usize,
start: Address,
size: usize,
mark: NodeMark
}
impl FreeListNode {
pub fn set_mark(&mut self, mark: NodeMark) {
self.mark = mark;
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum NodeMark {
FreshAlloc,
PrevLive,
Live
}
unsafe impl Sync for NodeMark {}
use std::fmt;
impl fmt::Display for FreeListSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FreeListSpace\n").unwrap();
write!(f, "{} used, {} total\n", self.used_bytes, self.size).unwrap();
write!(f, "nodes:\n").unwrap();
for node in self.current_nodes() {
write!(f, " {}\n", node).unwrap();
}
write!(f, "done\n")
}
}
impl fmt::Display for FreeListNode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"FreeListNode#{}(start={:#X}, size={}, state={:?})",
self.id,
self.start,
self.size,
self.mark
)
}
}
......@@ -11,34 +11,8 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::Address;
use heap::gc;
use heap::Mutator;
mod freelist_space;
mod freelist_mutator;
mod malloc_list;
mod treadmill;
//pub use heap::freelist::malloc_list::FreeListSpace;
pub use heap::freelist::treadmill::FreeListSpace;
use std::sync::Arc;
#[inline(never)]
pub fn alloc_large(
size: usize,
align: usize,
mutator: &mut Mutator,
space: Arc<FreeListSpace>
) -> Address {
loop {
mutator.yieldpoint();
let ret_addr = space.alloc(size, align);
if ret_addr.is_zero() {
gc::trigger_gc();
} else {
return ret_addr;
}
}
}
pub use self::freelist_space::FreelistSpace;
pub use self::freelist_mutator::FreelistAllocator;
\ No newline at end of file
......@@ -109,7 +109,6 @@ impl FreeListSpace {
#[inline(always)]
#[cfg(feature = "use-sidemap")]
#[allow(unused_variables)]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
unimplemented!()
}
......
......@@ -13,6 +13,7 @@
// limitations under the License.
use heap::*;
use heap::freelist::*;
use objectmodel;
use objectmodel::sidemap::*;
use MY_GC;
......@@ -248,6 +249,7 @@ fn gc() {
let mut gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.prepare_for_gc();
gccontext.immix_normal.prepare_for_gc();
gccontext.lo.prepare_for_gc();
}
trace!("GC starts");
......@@ -277,7 +279,7 @@ fn gc() {
gccontext.immix_tiny.sweep();
gccontext.immix_normal.sweep();
// gccontext.lo.sweep();
gccontext.lo.sweep();
}
objectmodel::flip_mark_state();
......@@ -441,7 +443,30 @@ pub fn steal_trace_object(
}
trace_if!(TRACE_GC, " -done-");
}
SpaceDescriptor::Freelist => unimplemented!()
SpaceDescriptor::Freelist => {
let mut space = FreelistSpace::get(obj.to_address());
space.mark_object_traced(obj);
let encode = space.get_type_encode(obj);
let tyid = encode.type_id();
let ty = GlobalTypeTable::get_full_type(tyid);
let mut offset: ByteOffset = 0;
// fix part
for &word_ty in ty.fix.iter() {
trace_word(word_ty, obj, offset, local_queue, job_sender);
offset += POINTER_SIZE as ByteOffset;
}
if encode.hybrid_len() != 0 {
// for every hybrid element
for _ in 0..encode.hybrid_len() {
for &word_ty in ty.var.iter() {
trace_word(word_ty, obj, offset, local_queue, job_sender);
offset += POINTER_SIZE as ByteOffset;
}
}
}
}
}
}
......@@ -477,11 +502,22 @@ fn trace_word(
steal_process_edge(edge, local_queue, job_sender);
}
}
SpaceDescriptor::Freelist => unimplemented!()
SpaceDescriptor::Freelist => {
let space = FreelistSpace::get(edge.to_address());
if !space.is_object_traced(edge) {
debug!("edge {} is not traced, trace it", edge);
steal_process_edge(edge, local_queue, job_sender);
} else {
debug!("edge {} is traced, skip", edge);
}
}
}
}
WordType::WeakRef => unimplemented!(),
WordType::TaggedRef => unimplemented!()
WordType::WeakRef | WordType::TaggedRef => {
use std::process;
error!("unimplemented");
process::exit(1);
}
}
}
......
......@@ -133,7 +133,7 @@ impl ImmixAllocator {
}
#[inline(always)]
pub fn post_alloc(&mut self, obj: Address, size: usize, align: usize) {
pub fn post_alloc(&mut self, obj: Address, size: usize) {
if size > BYTES_IN_LINE {
let index = self.space.get_word_index(obj);
let slot = self.space.get_gc_byte_slot(index);
......
......@@ -17,7 +17,6 @@ use heap::*;
use heap::immix::*;
use heap::gc;
use objectmodel::sidemap::*;
use utils::*;
use utils::bit_utils;
use utils::mem::memmap;
use utils::mem::memsec;
......@@ -525,6 +524,7 @@ impl Space for ImmixSpace {
self.cur_end
}
#[inline(always)]
#[allow(unused_variables)]
fn is_valid_object(&self, addr: Address) -> bool {
// we cannot judge if it is a valid object, we always return true
true
......
......@@ -15,8 +15,8 @@
use utils::*;
use common::ptr::*;
use heap::immix::*;
use heap::freelist::*;
use std::sync::atomic::AtomicUsize;
use std::sync::RwLock;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
......@@ -97,6 +97,7 @@ pub struct Mutator {
id: usize,
pub tiny: ImmixAllocator,
pub normal: ImmixAllocator,
pub lo: FreelistAllocator,
global: Arc<MutatorGlobal>
}
......@@ -104,6 +105,7 @@ impl Mutator {
pub fn new(
tiny: ImmixAllocator,
normal: ImmixAllocator,
lo: FreelistAllocator,
global: Arc<MutatorGlobal>
) -> Mutator {
let mut id_lock = N_MUTATORS.write().unwrap();
......@@ -117,6 +119,7 @@ impl Mutator {
id: *id_lock,
tiny,
normal,
lo,
global
};
*id_lock += 1;
......
......@@ -84,9 +84,8 @@ use common::gctype::GCType;
use common::objectdump;
use common::ptr::*;
use heap::*;
use heap::immix::BYTES_IN_LINE;
use heap::immix::ImmixSpace;
use heap::immix::ImmixAllocator;
use heap::immix::*;
use heap::freelist::*;
use utils::*;
use objectmodel::sidemap::*;
......@@ -135,7 +134,7 @@ pub use heap::Mutator;
struct GC {
immix_tiny: Raw<ImmixSpace>,
immix_normal: Raw<ImmixSpace>,
// lo: Arc<FreeListSpace>,
lo: Raw<FreelistSpace>,
gc_types: Vec<Arc<GCType>>,
roots: LinkedHashSet<ObjectReference>
}
......@@ -146,7 +145,8 @@ lazy_static! {
impl GC {
pub fn is_heap_object(&self, addr: Address) -> bool {
self.immix_tiny.addr_in_space(addr) || self.immix_normal.addr_in_space(addr)
self.immix_tiny.addr_in_space(addr) || self.immix_normal.addr_in_space(addr) ||
self.lo.addr_in_space(addr)
}
}
......@@ -174,24 +174,26 @@ pub extern "C" fn gc_init(config: GCConfig) {
let immix_tiny = ImmixSpace::new(SpaceDescriptor::ImmixTiny, config.immix_tiny_size);
trace!(" initializing normal immix space...");
let immix_normal = ImmixSpace::new(SpaceDescriptor::ImmixNormal, config.immix_normal_size);
// trace!(" initializing large object space...");
// let lo_space = Arc::new(FreeListSpace::new(lo_size));
trace!(" initializing large object space...");
let lo = FreelistSpace::new(SpaceDescriptor::Freelist, config.lo_size);
// init GC
heap::gc::init(config.n_gcthreads);
*MY_GC.write().unwrap() = Some(GC {
immix_tiny,
immix_normal,
lo,
gc_types: vec![],
roots: LinkedHashSet::new()
});
heap::gc::ENABLE_GC.store(config.enable_gc, Ordering::Relaxed);
info!(
"heap is {} bytes (immix_tiny: {} bytes, immix_normal: {} bytes) . ",
config.immix_tiny_size + config.immix_normal_size,
"heap is {} bytes (immix_tiny: {} bytes, immix_normal: {} bytes, lo: {} bytes)",
config.immix_tiny_size + config.immix_normal_size + config.lo_size,
config.immix_tiny_size,
config.immix_normal_size
config.immix_normal_size,
config.lo_size
);
info!("{} gc threads", config.n_gcthreads);
if !config.enable_gc {
......@@ -222,12 +224,14 @@ pub extern "C" fn new_mutator() -> *mut Mutator {
let m: *mut Mutator = Box::into_raw(Box::new(Mutator::new(
ImmixAllocator::new(gc.immix_tiny.clone()),
ImmixAllocator::new(gc.immix_normal.clone()),
FreelistAllocator::new(gc.lo.clone()),
global
)));
// allocators have a back pointer to the mutator
unsafe { (&mut *m) }.tiny.set_mutator(m);
unsafe { (&mut *m) }.normal.set_mutator(m);
unsafe { (&mut *m) }.lo.set_mutator(m);
m
}
......@@ -320,7 +324,7 @@ pub extern "C" fn muentry_alloc_normal(
) -> ObjectReference {
let m = mutator_ref(mutator);
let res = m.normal.alloc(size, align);
m.normal.post_alloc(res, size, align);
m.normal.post_alloc(res, size);
unsafe { res.to_object_reference() }
}
......@@ -346,7 +350,7 @@ pub extern "C" fn muentry_alloc_normal_slow(
) -> Address {
let m = mutator_ref(mutator);
let res = m.normal.alloc_slow(size, align);
m.normal.post_alloc(res, size, align);
m.normal.post_alloc(res, size);
res
}
......@@ -359,22 +363,9 @@ pub extern "C" fn muentry_alloc_large(
size: usize,
align: usize
) -> ObjectReference {
// let ret = freelist::alloc_large(
// size,
// align,
// unsafe { mutator.as_mut().unwrap() },
// MY_GC.read().unwrap().as_ref().unwrap().lo.clone()
// );
// trace!(
// "muentry_alloc_large(mutator: {:?}, size: {}, align: {}) = {}",
// mutator,
// size,
// align,
// ret
// );
//
// unsafe { ret.to_object_reference() }
unimplemented!()
let m = mutator_ref(mutator);
let res = m.lo.alloc(size, align);
unsafe { res.to_object_reference() }
}
#[no_mangle]
......@@ -433,6 +424,18 @@ pub extern "C" fn muentry_init_medium_object(
.init_object(obj.to_address(), encode);
}
#[no_mangle]
#[inline(always)]
pub extern "C" fn muentry_init_large_object(
mutator: *mut Mutator,
obj: ObjectReference,
encode: LargeObjectEncode
) {
unsafe { &mut *mutator }
.lo
.init_object(obj.to_address(), encode);
}
/// initializes a hybrid type object
#[no_mangle]
#[inline(never)]
......@@ -462,16 +465,25 @@ pub extern "C" fn persist_heap(roots: Vec<Address>) -> objectdump::HeapDump {
// the following API functions may get removed in the future
/// gets immix space and freelist space
#[no_mangle]
pub extern "C" fn get_spaces() -> (Raw<ImmixSpace>, Raw<ImmixSpace>) {
pub extern "C" fn get_space_immix_tiny() -> Raw<ImmixSpace> {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
space.immix_tiny.clone()
}
#[no_mangle]
pub extern "C" fn get_space_immix_normal() -> Raw<ImmixSpace> {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
space.immix_normal.clone()
}
(
space.immix_tiny.clone(),
space.immix_normal.clone() // space.lo.clone()
)
#[no_mangle]
pub extern "C" fn get_space_freelist() -> Raw<FreelistSpace> {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
space.lo.clone()
}
/// informs GC of a GCType
......
......@@ -13,7 +13,9 @@
// limitations under the License.
use std::sync::Mutex;
use std::sync::RwLock;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::collections::HashMap;
use std::mem;
use utils::mem::memmap;
use utils::math;
......@@ -21,7 +23,7 @@ use utils::Address;
use objectmodel::sidemap::TypeID;
use objectmodel::sidemap::N_TYPES;
use objectmodel::sidemap::type_encode::TypeEncode;
use objectmodel::sidemap::type_encode::*;
use objectmodel::sidemap::object_encode::SMALL_ID_WIDTH;
/// represents a chunk of memory as global type table, which contains some metadata for the
......@@ -48,7 +50,15 @@ pub struct GlobalTypeTable {
/// current index for small entries
small_entry_i: usize,
/// current index for large entries
large_entry_i: usize
large_entry_i: usize,
/// full entries
full_entries: RwLock<HashMap<usize, FullTypeEntry>>
}
#[derive(Clone)]
pub struct FullTypeEntry {
pub fix: Vec<WordType>,
pub var: Vec<WordType>
}
const SMALL_ENTRY_CAP: usize = 1 << SMALL_ID_WIDTH;
......@@ -90,6 +100,13 @@ impl GlobalTypeTable {
let meta: &mut GlobalTypeTable = unsafe { meta_addr.to_ptr_mut().as_mut().unwrap() };
meta.small_entry_i = 0;
meta.large_entry_i = SMALL_ENTRY_CAP;
unsafe {
use std::ptr;
ptr::write(
&mut meta.full_entries as *mut RwLock<HashMap<usize, FullTypeEntry>>,
RwLock::new(HashMap::new())
)
}
// save mmap
*mmap_lock = Some(mmap);
......@@ -142,6 +159,23 @@ impl GlobalTypeTable {
panic!("large type entries overflow the global type table")
}
}
pub fn insert_full_entry(entry: FullTypeEntry) -> usize {
let meta = GlobalTypeTable::table_meta();
let mut lock = meta.full_entries.write().unwrap();
let id = lock.len();
lock.insert(id, entry);
id
}
pub fn get_full_type(id: usize) -> FullTypeEntry {
let meta = GlobalTypeTable::table_meta();
let lock = meta.full_entries.read().unwrap();
debug_assert!(lock.contains_key(&id));
lock.get(&id).unwrap().clone()
}
}
#[cfg(test)]
......
......@@ -273,6 +273,14 @@ pub struct LargeObjectEncode {
}
impl LargeObjectEncode {
#[inline(always)]
pub fn new(size: u64, tyid: u32, hybrid_len: u32) -> LargeObjectEncode {
LargeObjectEncode {
size,
tyid,
hybrid_len
}
}
#[inline(always)]
pub fn size(self) -> usize {
(self.size << 8) as usize
......
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate mu_gc;
extern crate mu_utils;
extern crate log;
use self::mu_gc::*;
use self::mu_gc::heap;
use self::mu_gc::heap::*;
use self::mu_gc::heap::immix::*;
use self::mu_gc::heap::gc::*;
use self::mu_gc::objectmodel::sidemap::*;
use self::mu_utils::*;
use std::sync::atomic::Ordering;
#[allow(dead_code)]
pub const SPACIOUS_SPACE_SIZE: usize = 500 << 20; // 500mb
#[allow(dead_code)]
pub const LIMITED_SPACE_SIZE: usize = 20 << 20; // 20mb
#[allow(dead_code)]
pub const SMALL_SPACE_SIZE: usize = 1 << 19; // 512kb
#[test]
pub fn test_freelist_linkedlist() {
const FREELIST_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
const OBJECT_SIZE: usize = 4096;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = 4;
start_logging_trace();
gc_init(GCConfig {
immix_tiny_size: 0,
immix_normal_size: 0,
lo_size: FREELIST_SPACE_SIZE,
n_gcthreads: 1,
enable_gc: true
});
let header = {
let mut fix = vec![WordType::NonRef; 512];
fix[0] = WordType::Ref;
let id = GlobalTypeTable::insert_full_entry(FullTypeEntry { fix, var: vec![] });
LargeObjectEncode::new(OBJECT_SIZE as u64, id as u32, 0)
};
println!("Header: {:?}", header);
let lo_space = get_space_freelist();
let mutator = new_mutator();