Commit 7c205d27 authored by qinsoon's avatar qinsoon

global type table

parent ed3861b2
......@@ -35,7 +35,7 @@ mu_utils = {path = "../utils"}
time = "*"
lazy_static = "*"
log = "*"
simple_logger = "*"
stderrlog = "*"
aligned_alloc = "*"
crossbeam = "*"
field-offset = "*"
......
......@@ -74,7 +74,7 @@ extern crate mu_utils as utils;
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate simple_logger;
extern crate stderrlog;
extern crate aligned_alloc;
extern crate crossbeam;
#[macro_use]
......@@ -449,3 +449,15 @@ pub extern "C" fn get_gc_type_encode(id: u32) -> u64 {
objectmodel::gen_gctype_encode(gctype)
}
}
pub fn start_logging_trace() {
match stderrlog::new().verbosity(4).init() {
Ok(()) => { info!("logger initialized") }
Err(e) => {
error!(
"failed to init logger, probably already initialized: {:?}",
e
)
}
}
}
\ No newline at end of file
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Mutex;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::mem;
use utils::mem::memmap;
use utils::math;
use utils::Address;
use objectmodel::sidemap::TypeID;
use objectmodel::sidemap::N_TYPES;
use objectmodel::sidemap::type_encode::TypeEncode;
use objectmodel::sidemap::object_encode::SMALL_ID_WIDTH;
/// represents a chunk of memory as global type table, which contains some metadata for the
/// type table and all the type encoding entries.
///
/// The memory looks like this
///
/// |----------------|
/// | metadata(this) |
/// | ... |
/// |----------------| <- global_type_table points to this
/// | | at next 128 bytes alignment (size of TypeEncoding)
/// | small entries |
/// | ... |
/// | ... |
/// |----------------| (8192 entries = 1 << 13 (SMALL_ID_WIDTH) )
/// | large entries |
/// | ... |
/// | ... |
/// |________________|
///
#[repr(C, packed)]
struct GlobalTypeTable {
/// current index for small entries
small_entry_i: usize,
/// current index for large entries
large_entry_i: usize
}
const SMALL_ENTRY_CAP: usize = 1 << SMALL_ID_WIDTH;
const LARGE_ENTRY_CAP: usize = N_TYPES;
/// storing a pointer to the actual type table
static global_type_table_ptr: AtomicUsize = ATOMIC_USIZE_INIT;
/// storing a pointer to the metadata of the type table
static global_type_table_meta: AtomicUsize = ATOMIC_USIZE_INIT;
/// save Mmap to keep the memory map alive
// it is okay to use lock here, as we won't actually access this field
lazy_static!{
static ref gtt_mmap: Mutex<Option<memmap::Mmap>> = Mutex::new(None);
}
impl GlobalTypeTable {
pub fn init() {
let mut mmap_lock = gtt_mmap.lock().unwrap();
assert!(mmap_lock.is_none());
let entry_size = mem::size_of::<TypeEncode>();
let metadata_size = math::align_up(mem::size_of::<GlobalTypeTable>(), entry_size);
let mmap = match memmap::Mmap::anonymous(
metadata_size + N_TYPES * entry_size,
memmap::Protection::ReadWrite
) {
Ok(m) => m,
Err(_) => panic!("failed to mmap for global type table")
};
info!("Global Type Table allocated at {:?}", mmap.ptr());
// start address of metadata
let meta_addr = Address::from_ptr::<u8>(mmap.ptr());
global_type_table_meta.store(meta_addr.as_usize(), Ordering::Relaxed);
// actual table
let table_addr = meta_addr + metadata_size;
global_type_table_ptr.store(table_addr.as_usize(), Ordering::Relaxed);
// initialize meta
let meta: &mut GlobalTypeTable = unsafe { meta_addr.to_ptr_mut().as_mut().unwrap() };
meta.small_entry_i = 0;
meta.large_entry_i = SMALL_ENTRY_CAP;
// save mmap
*mmap_lock = Some(mmap);
trace!("Global Type Table initialization done");
}
#[inline(always)]
fn table_meta() -> &'static mut GlobalTypeTable {
unsafe { mem::transmute(global_type_table_meta.load(Ordering::Relaxed)) }
}
#[inline(always)]
fn table() -> &'static mut [TypeEncode; N_TYPES] {
unsafe { mem::transmute(global_type_table_ptr.load(Ordering::Relaxed)) }
}
pub fn insert_small_entry(entry: TypeEncode) -> TypeID {
let mut meta = GlobalTypeTable::table_meta();
let mut table = GlobalTypeTable::table();
if meta.small_entry_i < SMALL_ENTRY_CAP {
let id = meta.small_entry_i;
table[id] = entry;
meta.small_entry_i += 1;
id
} else {
panic!("small type entries overflow the global type table")
}
}
pub fn insert_large_entry(entry: TypeEncode) -> TypeID {
let mut meta = GlobalTypeTable::table_meta();
let mut table = GlobalTypeTable::table();
if meta.large_entry_i < LARGE_ENTRY_CAP {
let id = meta.large_entry_i;
table[id] = entry;
meta.large_entry_i += 1;
id
} else {
panic!("large type entries overflow the global type table")
}
}
}
#[cfg(test)]
mod global_type_table_test {
use super::*;
use objectmodel::sidemap::type_encode::WordType::*;
use start_logging_trace;
#[test]
fn test_insert() {
start_logging_trace();
GlobalTypeTable::init();
let ty = {
let mut fix_ty = [0; 63];
fix_ty[0] = 0b11100100u8;
fix_ty[1] = 0b00011011u8;
fix_ty[2] = 0b11100100u8;
TypeEncode::new(12, fix_ty, 0, [0; 63])
};
let tyid = GlobalTypeTable::insert_small_entry(ty) as usize;
let ref loaded_ty = GlobalTypeTable::table()[tyid];
assert_eq!(loaded_ty.fix_ty(0), NonRef);
assert_eq!(loaded_ty.fix_ty(1), Ref);
assert_eq!(loaded_ty.fix_ty(2), WeakRef);
assert_eq!(loaded_ty.fix_ty(3), TaggedRef);
assert_eq!(loaded_ty.fix_ty(4), TaggedRef);
assert_eq!(loaded_ty.fix_ty(5), WeakRef);
assert_eq!(loaded_ty.fix_ty(6), Ref);
assert_eq!(loaded_ty.fix_ty(7), NonRef);
assert_eq!(loaded_ty.fix_ty(8), NonRef);
assert_eq!(loaded_ty.fix_ty(9), Ref);
assert_eq!(loaded_ty.fix_ty(10), WeakRef);
assert_eq!(loaded_ty.fix_ty(11), TaggedRef);
}
}
......@@ -93,10 +93,13 @@ pub const MINIMAL_OBJECT_SIZE: ByteSize = 16;
pub const OBJECT_HEADER_SIZE: ByteSize = 0;
pub const OBJECT_HEADER_OFFSET: ByteOffset = 0;
pub type TypeID = u32;
/// Type ID (but we never use more than 23 bits of it)
pub type TypeID = usize;
pub const N_TYPES: usize = 1 << 23;
pub mod object_encode;
pub mod type_encode;
pub mod global_type_table;
#[inline(always)]
pub fn header_is_object_start(hdr: u64) -> bool {
......
......@@ -106,6 +106,8 @@ pub struct SmallObjectEncode {
w: u16
}
pub const SMALL_ID_WIDTH: usize = 13;
impl SmallObjectEncode {
#[inline(always)]
pub fn is_small(self) -> bool {
......@@ -114,13 +116,13 @@ impl SmallObjectEncode {
#[inline(always)]
pub fn size(self) -> usize {
debug_assert!(self.is_small());
let size = ((self.w >> 13) & 0b11u16) << 3;
let size = ((self.w >> SMALL_ID_WIDTH) & 0b11u16) << 3;
(32 + size) as usize
}
#[inline(always)]
pub fn type_id(self) -> TypeID {
debug_assert!(self.is_small());
(self.w & 0b0001111111111111u16) as u32
(self.w & (1u16 << (SMALL_ID_WIDTH + 1) - 1)) as usize
}
}
......@@ -193,7 +195,7 @@ impl MediumObjectEncode {
#[inline(always)]
pub fn type_id(self) -> TypeID {
debug_assert!(self.is_medium());
self.d >> 8
(self.d >> 8) as usize
}
}
......@@ -247,7 +249,7 @@ mod medium_object_encoding {
pub struct LargeObjectEncode {
size: u64,
tyid: u32,
unused: u32
hybrid_len: u32
}
impl LargeObjectEncode {
......@@ -257,7 +259,11 @@ impl LargeObjectEncode {
}
#[inline(always)]
pub fn type_id(self) -> TypeID {
self.tyid
self.tyid as usize
}
#[inline(always)]
pub fn hybrid_len(self) -> usize {
self.hybrid_len as usize
}
}
......
......@@ -38,6 +38,14 @@ pub struct TypeEncode {
}
impl TypeEncode {
pub fn new(fix_len: u8, fix_ty: [u8; 63], var_len: u8, var_ty: [u8; 63]) -> TypeEncode {
TypeEncode {
fix_len,
fix_ty,
var_len,
var_ty
}
}
#[inline(always)]
pub fn fix_len(&self) -> u8 {
self.fix_len
......@@ -109,4 +117,4 @@ mod type_encoding {
assert_eq!(encode.fix_ty(10), WeakRef);
assert_eq!(encode.fix_ty(11), TaggedRef);
}
}
\ No newline at end of file
}
......@@ -14,23 +14,15 @@
extern crate mu_gc as gc;
extern crate mu_utils as utils;
extern crate simple_logger;
extern crate log;
use self::gc::start_logging_trace;
use self::log::LogLevel;
use self::gc::heap;
use self::gc::objectmodel;
use self::utils::Address;
use std::sync::atomic::Ordering;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {}
Err(_) => {}
}
}
const OBJECT_SIZE: usize = 24;
const OBJECT_ALIGN: usize = 8;
......@@ -105,7 +97,7 @@ fn test_exhaust_alloc_large() {
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
let mut mutator = gc::new_mutator();
start_logging();
start_logging_trace();
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
......@@ -126,7 +118,7 @@ fn test_alloc_large_lo_trigger_gc() {
gc::gc_init(SMALL_SPACE_SIZE, 4096 * 10, 8, true);
let mut mutator = gc::new_mutator();
start_logging();
start_logging_trace();
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
......@@ -149,7 +141,7 @@ fn test_alloc_large_both_trigger_gc() {
gc::gc_init(SMALL_SPACE_SIZE, 4096 * 10, 8, true);
let mut mutator = gc::new_mutator();
start_logging();
start_logging_trace();
// this will exhaust the lo space
for _ in 0..10 {
......
......@@ -15,20 +15,10 @@
extern crate mu_gc as gc;
extern crate mu_utils as utils;
use self::gc::start_logging_trace;
use std::ptr;
extern crate simple_logger;
extern crate log;
use self::log::LogLevel;
use std::fmt;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {}
Err(_) => {}
}
}
#[derive(Copy, Clone)]
struct Node {
next: *mut Node,
......@@ -173,7 +163,7 @@ fn create_linked_list() {
heap::gc::set_low_water_mark();
}
start_logging();
start_logging_trace();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::print_gc_context();
......@@ -203,7 +193,7 @@ fn linked_list_heap_dump() {
heap::gc::set_low_water_mark();
}
start_logging();
start_logging_trace();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::print_gc_context();
......@@ -242,7 +232,7 @@ fn linked_list_survive_gc() {
heap::gc::set_low_water_mark();
}
start_logging();
start_logging_trace();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::print_gc_context();
......
......@@ -23,6 +23,7 @@ extern crate mu_gc as gc;
extern crate mu_utils as utils;
extern crate time;
use self::gc::start_logging_trace;
use self::gc::heap;
use self::gc::heap::immix::ImmixMutatorLocal;
use self::gc::heap::immix::ImmixSpace;
......@@ -34,15 +35,6 @@ use std::mem::size_of;
use std::sync::atomic::Ordering;
extern crate log;
extern crate simple_logger;
use self::log::LogLevel;
pub fn start_logging() {
match simple_logger::init_with_level(LogLevel::Trace) {
Ok(_) => {}
Err(_) => {}
}
}
const IMMIX_SPACE_SIZE: usize = 40 << 20;
const LO_SPACE_SIZE: usize = 40 << 20;
......@@ -171,7 +163,7 @@ fn start() {
heap::gc::set_low_water_mark();
}
start_logging();
start_logging_trace();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::print_gc_context();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment