WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 92a7cd8c authored by qinsoon's avatar qinsoon
Browse files

compiler cooperate to init object header

parent 0cdbaac9
......@@ -14,6 +14,7 @@ use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass;
use compiler::backend;
use compiler::backend::BackendTypeInfo;
use compiler::backend::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
......@@ -1327,7 +1328,7 @@ impl <'a> InstructionSelection {
let const_size = self.make_value_int_const(size as u64, vm);
self.emit_alloc_sequence(const_size, ty_align, node, f_content, f_context, vm);
self.emit_alloc_sequence(&ty_info, const_size, ty_align, node, f_content, f_context, vm);
}
Instruction_::NewHybrid(ref ty, var_len) => {
......@@ -1417,7 +1418,7 @@ impl <'a> InstructionSelection {
}
};
self.emit_alloc_sequence(actual_size, ty_align, node, f_content, f_context, vm);
self.emit_alloc_sequence(&ty_info, actual_size, ty_align, node, f_content, f_context, vm);
}
Instruction_::Throw(op_index) => {
......@@ -1481,13 +1482,13 @@ impl <'a> InstructionSelection {
})
}
fn emit_alloc_sequence (&mut self, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
fn emit_alloc_sequence (&mut self, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
if size.is_int_const() {
// size known at compile time, we can choose to emit alloc_small or large now
if size.extract_int_const() > mm::LARGE_OBJECT_THRESHOLD as u64 {
self.emit_alloc_sequence_large(size, align, node, f_content, f_context, vm);
self.emit_alloc_sequence_large(ty_info, size, align, node, f_content, f_context, vm);
} else {
self.emit_alloc_sequence_small(size, align, node, f_content, f_context, vm);
self.emit_alloc_sequence_small(ty_info, size, align, node, f_content, f_context, vm);
}
} else {
// size is unknown at compile time
......@@ -1508,7 +1509,7 @@ impl <'a> InstructionSelection {
self.backend.emit_jg(blk_alloc_large.clone());
// alloc small here
let tmp_res = self.emit_alloc_sequence_small(size.clone(), align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence_small(ty_info, size.clone(), align, node, f_content, f_context, vm);
self.backend.emit_jmp(blk_alloc_large_end.clone());
......@@ -1522,7 +1523,7 @@ impl <'a> InstructionSelection {
self.backend.start_block(blk_alloc_large.clone());
self.backend.set_block_livein(blk_alloc_large.clone(), &vec![size.clone()]);
let tmp_res = self.emit_alloc_sequence_large(size, align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence_large(ty_info, size, align, node, f_content, f_context, vm);
self.backend.end_block(blk_alloc_large.clone());
self.backend.set_block_liveout(blk_alloc_large.clone(), &vec![tmp_res]);
......@@ -1533,7 +1534,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_alloc_sequence_large (&mut self, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_alloc_sequence_large (&mut self, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let tmp_res = self.get_result_value(node);
// ASM: %tl = get_thread_local()
......@@ -1549,15 +1550,24 @@ impl <'a> InstructionSelection {
self.emit_runtime_entry(
&entrypoints::ALLOC_LARGE,
vec![tmp_allocator, size.clone(), const_align],
vec![tmp_allocator.clone(), size.clone(), const_align],
Some(vec![tmp_res.clone()]),
Some(node), f_content, f_context, vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node), f_content, f_context, vm
);
tmp_res
}
fn emit_alloc_sequence_small (&mut self, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_alloc_sequence_small (&mut self, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// emit immix allocation fast path
// ASM: %tl = get_thread_local()
......@@ -1634,7 +1644,7 @@ impl <'a> InstructionSelection {
self.emit_runtime_entry(
&entrypoints::ALLOC_SLOW,
vec![tmp_allocator, size.clone(), const_align],
vec![tmp_allocator.clone(), size.clone(), const_align],
Some(vec![
tmp_res.clone()
]),
......@@ -1649,6 +1659,15 @@ impl <'a> InstructionSelection {
self.backend.start_block(allocend.clone());
self.current_block = Some(allocend.clone());
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node), f_content, f_context, vm
);
tmp_res
}
......@@ -1888,8 +1907,10 @@ impl <'a> InstructionSelection {
stack_args.push(arg.clone());
}
} else {
// put the constant to memory
unimplemented!()
// FIXME: put the constant to memory
let int_const = arg.extract_int_const() as i64;
self.backend.emit_mov_r64_imm64(&arg_gpr, int_const);
gpr_arg_count += 1;
}
} else if arg.is_mem() {
unimplemented!()
......
......@@ -4,15 +4,15 @@ use std::sync::Arc;
use utils::POINTER_SIZE;
use utils::ByteSize;
use std::usize;
pub const GCTYPE_INIT_ID: usize = usize::MAX;
use std::u32;
pub const GCTYPE_INIT_ID: u32 = u32::MAX;
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct GCType {
pub id: usize,
pub id: u32,
pub size: ByteSize,
pub non_repeat_refs: Option<RefPattern>,
pub repeat_refs : Option<RepeatingRefPattern>
pub repeat_refs : Option<RepeatingRefPattern>,
}
impl GCType {
......@@ -21,7 +21,7 @@ impl GCType {
id: GCTYPE_INIT_ID,
size: size,
non_repeat_refs: None,
repeat_refs : None
repeat_refs : None,
}
}
......@@ -144,7 +144,7 @@ mod tests {
size : 16
},
count : 10
})
}),
};
// array(10) of array(10) of struct {ref, int64}
......
......@@ -99,21 +99,6 @@ impl FreeListSpace {
}
}
#[cfg(feature = "use-sidemap")]
pub fn init_object(&self, addr: Address, encode: u64) {
unsafe {
*self.alloc_map().offset((addr.diff(self.start) >> LOG_POINTER_SIZE) as isize) = encode as u8;
objectmodel::mark_as_untraced(self.trace_map(), self.start, addr, objectmodel::load_mark_state());
}
}
#[cfg(not(feature = "use-sidemap"))]
pub fn init_object(&self, addr: Address, encode: u64) {
unsafe {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
......
......@@ -176,10 +176,12 @@ impl ImmixMutatorLocal {
#[inline(always)]
#[cfg(feature = "use-sidemap")]
pub fn init_object(&mut self, addr: Address, encode: u64) {
unsafe {
*self.alloc_map.offset((addr.diff(self.space_start) >> LOG_POINTER_SIZE) as isize) = encode as u8;
objectmodel::mark_as_untraced(self.trace_map, self.space_start, addr, self.mark_state);
}
// unsafe {
// *self.alloc_map.offset((addr.diff(self.space_start) >> LOG_POINTER_SIZE) as isize) = encode as u8;
// objectmodel::mark_as_untraced(self.trace_map, self.space_start, addr, self.mark_state);
// }
unimplemented!()
}
#[inline(always)]
......
......@@ -74,7 +74,7 @@ pub extern fn add_gc_type(mut ty: GCType) -> Arc<GCType> {
let mut gc_guard = MY_GC.write().unwrap();
let mut gc = gc_guard.as_mut().unwrap();
let index = gc.gc_types.len();
let index = gc.gc_types.len() as u32;
ty.id = index;
let ty = Arc::new(ty);
......@@ -84,6 +84,14 @@ pub extern fn add_gc_type(mut ty: GCType) -> Arc<GCType> {
ty
}
#[no_mangle]
pub extern fn get_gc_type_encode(id: u32) -> u64 {
let gc_lock = MY_GC.read().unwrap();
let ref gctype = gc_lock.as_ref().unwrap().gc_types[id as usize];
objectmodel::gen_gctype_encode(gctype)
}
#[no_mangle]
pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
// set this line to turn on certain level of debugging info
......@@ -174,8 +182,8 @@ pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize)
}
#[no_mangle]
#[inline(always)]
pub extern fn init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u64) {
#[inline(never)]
pub extern fn muentry_init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u64) {
unsafe {&mut *mutator}.init_object(obj.to_address(), encode);
}
......@@ -196,12 +204,6 @@ pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize,
unsafe {ret.to_object_reference()}
}
#[no_mangle]
#[allow(unused_variables)]
pub extern fn muentry_init_large_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u64) {
MY_GC.read().unwrap().as_ref().unwrap().lo_space.init_object(obj.to_address(), encode);
}
// force gc
#[no_mangle]
pub extern fn force_gc() {
......
......@@ -22,12 +22,14 @@
/// | start? | trace? | fix? | hybrid length (29bits ~ 500M) | gc type ID (32bits) |
/// 0
use common::gctype::GCType;
use utils::ByteSize;
use utils::ByteOffset;
use utils::bit_utils;
use utils::{Address, ObjectReference};
use utils::POINTER_SIZE;
use utils::LOG_POINTER_SIZE;
pub const OBJECT_HEADER_SIZE : ByteSize = 8;
pub const OBJECT_HEADER_OFFSET : ByteOffset = - (OBJECT_HEADER_SIZE as ByteOffset);
......@@ -37,11 +39,46 @@ pub const BIT_IS_TRACED : usize = 62;
pub const BIT_IS_FIX_SIZE : usize = 61;
pub const BIT_HAS_REF_MAP : usize = 60;
pub const REF_MAP_LENGTH : usize = 32;
pub const MASK_REF_MAP : u64 = 0xFFFFFFFFu64;
pub const MASK_GCTYPE_ID : u64 = 0xFFFFFFFFu64;
pub const MASK_HYBRID_LENGTH: u64 = 0x1FFFFFFF00000000u64;
pub const SHR_HYBRID_LENGTH : usize = 32;
pub fn gen_gctype_encode(ty: &GCType) -> u64 {
let mut ret = 0u64;
if ty.repeat_refs.is_some() {
// var sized
let len = ty.repeat_refs.as_ref().unwrap().count;
// encode length
ret = ret | (( (len as u64) << SHR_HYBRID_LENGTH) & MASK_HYBRID_LENGTH);
// encode gc id
ret = ret | (ty.id as u64);
} else {
// fix sized
ret = ret | (1 << BIT_IS_FIX_SIZE);
// encode ref map?
if ty.size < REF_MAP_LENGTH * POINTER_SIZE {
// encode ref map
let offsets = ty.gen_ref_offsets();
let mut ref_map = 0;
for offset in offsets {
ref_map = ref_map | (1 << (offset >> LOG_POINTER_SIZE));
}
ret = ret | (ref_map & MASK_REF_MAP);
} else {
ret = ret | (ty.id as u64);
}
}
ret
}
#[allow(unused_variables)]
pub fn print_object(obj: Address) {
let mut cursor = obj;
......@@ -147,6 +184,9 @@ pub fn header_get_gctype_id(hdr: u64) -> u32 {
#[cfg(test)]
mod tests {
use super::*;
use common::gctype::*;
use utils::POINTER_SIZE;
use std::sync::Arc;
#[test]
fn fixsize_header_refmap() {
......@@ -210,4 +250,129 @@ mod tests {
assert_eq!(header_get_hybrid_length(hdr), 128);
assert_eq!(header_get_gctype_id(hdr), 0xff);
}
#[test]
fn gctype_to_encode1() {
// linked list: struct {ref, int64}
let a = GCType{
id: 0,
size: 16,
non_repeat_refs: Some(RefPattern::Map{
offsets: vec![0],
size: 16
}),
repeat_refs : None
};
println!("gctype: {:?}", a);
let encode = gen_gctype_encode(&a);
println!("encode: {:64b}", encode);
assert!(header_is_fix_size(encode));
assert_eq!(header_get_ref_map(encode), 0b1);
}
#[test]
fn gctype_to_encode2() {
// doubly linked list: struct {ref, ref, int64, int64}
let a = GCType{
id: 0,
size: 32,
non_repeat_refs: Some(RefPattern::Map{
offsets: vec![0, 8],
size: 32
}),
repeat_refs : None
};
println!("gctype: {:?}", a);
let encode = gen_gctype_encode(&a);
println!("encode: {:64b}", encode);
assert!(header_is_fix_size(encode));
assert_eq!(header_get_ref_map(encode), 0b11);
}
#[test]
fn gctype_to_encode3() {
// a struct of 64 references
const N_REF : usize = 64;
let a = GCType{
id: 999,
size: N_REF * POINTER_SIZE,
non_repeat_refs: Some(RefPattern::Map{
offsets: (0..N_REF).map(|x| x * POINTER_SIZE).collect(),
size: N_REF * POINTER_SIZE
}),
repeat_refs : None
};
println!("gctype: {:?}", a);
let encode = gen_gctype_encode(&a);
println!("encode: {:64b}", encode);
assert!(header_is_fix_size(encode));
assert_eq!(header_get_gctype_id(encode), 999);
}
#[test]
fn gctype_to_encode4() {
// array of struct {ref, int64} with length 10
let a = GCType {
id: 1,
size: 160,
non_repeat_refs: None,
repeat_refs : Some(RepeatingRefPattern {
pattern: RefPattern::Map{
offsets: vec![0],
size : 16
},
count : 10
}),
};
println!("gctype: {:?}", a);
let encode = gen_gctype_encode(&a);
println!("encode: {:64b}", encode);
assert!(!header_is_fix_size(encode));
assert_eq!(header_get_hybrid_length(encode), 10);
assert_eq!(header_get_gctype_id(encode), 1);
}
#[test]
fn gctype_to_encode5() {
// array of struct {ref, int64} with length 10
let b = GCType {
id: 1,
size: 160,
non_repeat_refs: None,
repeat_refs : Some(RepeatingRefPattern {
pattern: RefPattern::Map{
offsets: vec![0],
size : 16
},
count : 10
}),
};
// array(10) of array(10) of struct {ref, int64}
let a = GCType {
id: 2,
size: 1600,
non_repeat_refs: None,
repeat_refs : Some(RepeatingRefPattern {
pattern: RefPattern::NestedType(vec![Arc::new(b.clone()).clone()]),
count : 10
})
};
println!("gctype: {:?}", a);
let encode = gen_gctype_encode(&a);
println!("encode: {:64b}", encode);
assert!(!header_is_fix_size(encode));
assert_eq!(header_get_hybrid_length(encode), 10);
assert_eq!(header_get_gctype_id(encode), 2);
}
}
\ No newline at end of file
......@@ -30,6 +30,9 @@ pub fn flip(mark: u8) -> u8 {
// sidemap object model
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::gen_gctype_encode;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::OBJECT_HEADER_SIZE;
#[cfg(feature = "use-sidemap")]
......@@ -52,6 +55,9 @@ pub use self::sidemap::get_ref_byte;
// header
#[cfg(not(feature = "use-sidemap"))]
pub use self::header::gen_gctype_encode;
// flag bit
#[cfg(not(feature = "use-sidemap"))]
pub use self::header::BIT_HAS_REF_MAP;
......
use std::sync::atomic;
use common::gctype::GCType;
use utils::{Address, ObjectReference};
use utils::{LOG_POINTER_SIZE, POINTER_SIZE};
use utils::bit_utils;
......@@ -6,6 +7,11 @@ use utils::{ByteSize, ByteOffset};
pub const OBJECT_HEADER_SIZE : ByteSize = 0;
pub const OBJECT_HEADER_OFFSET : ByteOffset = 0;
pub fn gen_gctype_encode(ty: &GCType) -> u64 {
unimplemented!()
}
#[allow(unused_variables)]
pub fn print_object(obj: Address, space_start: Address, trace_map: *mut u8, alloc_map: *mut u8) {
let mut cursor = obj;
......
......@@ -84,7 +84,7 @@ fn test_exhaust_alloc_large() {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
}
mutator.destroy();
......@@ -105,7 +105,7 @@ fn test_alloc_large_lo_trigger_gc() {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
if roots < KEEP_N_ROOTS {
gc::add_to_root(res);
......@@ -129,12 +129,12 @@ fn test_alloc_large_both_trigger_gc() {
mutator.yieldpoint();
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
}
// this will trigger a gc, and allocate it in the collected space
let res = gc::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
gc::muentry_init_large_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
gc::muentry_init_object(&mut mutator, res, FIXSIZE_NOREF_ENCODE);
// this will trigger gcs for immix space
for _ in 0..100000 {
......
......@@ -62,6 +62,17 @@ lazy_static! {
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_alloc_large")),
jit: RwLock::new(None)
};
// impl/decl: gc/lib.rs
pub static ref INIT_OBJ : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![],
arg_tys: vec![ADDRESS_TYPE.clone(), ADDRESS_TYPE.clone(), UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_init_object")),
jit: RwLock::new(None)
};
// impl/decl: exception.rs
pub static ref THROW_EXCEPTION : RuntimeEntrypoint = RuntimeEntrypoint {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment