To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

Commit 5b25bf30 authored by qinsoon's avatar qinsoon
Browse files

straddle object for immix space

parent 8ce5c1ce
......@@ -430,11 +430,13 @@ pub fn steal_trace_object(
offset += POINTER_SIZE as ByteOffset;
}
// for variable part
trace_if!(TRACE_GC, " -var part-");
while offset < type_size {
for i in 0..type_encode.var_len() {
trace_word(type_encode.var_ty(i), obj, offset, local_queue, job_sender);
offset += POINTER_SIZE as ByteOffset;
if type_encode.var_len() != 0 {
trace_if!(TRACE_GC, " -var part-");
while offset < type_size {
for i in 0..type_encode.var_len() {
trace_word(type_encode.var_ty(i), obj, offset, local_queue, job_sender);
offset += POINTER_SIZE as ByteOffset;
}
}
}
trace_if!(TRACE_GC, " -done-");
......
......@@ -21,7 +21,7 @@ use utils::Address;
use utils::ByteSize;
use std::*;
const TRACE_ALLOC: bool = false;
const TRACE_ALLOC: bool = true;
#[repr(C)]
pub struct ImmixAllocator {
......@@ -111,16 +111,7 @@ impl ImmixAllocator {
);
if end > self.limit {
if size > BYTES_IN_LINE {
trace_if!(TRACE_ALLOC, "Mutator: overflow alloc()");
self.overflow_alloc(size, align)
} else {
trace_if!(
TRACE_ALLOC,
"Mutator: fastpath alloc: try_alloc_from_local()"
);
self.try_alloc_from_local(size, align)
}
self.alloc_slow(size, align)
} else {
self.cursor = end;
start
......@@ -128,6 +119,28 @@ impl ImmixAllocator {
}
#[inline(never)]
pub fn alloc_slow(&mut self, size: usize, align: usize) -> Address {
if size > BYTES_IN_LINE {
trace_if!(TRACE_ALLOC, "Mutator: overflow alloc()");
self.overflow_alloc(size, align)
} else {
trace_if!(
TRACE_ALLOC,
"Mutator: fastpath alloc: try_alloc_from_local()"
);
self.try_alloc_from_local(size, align)
}
}
#[inline(always)]
pub fn post_alloc(&mut self, obj: Address, size: usize, align: usize) {
if size > BYTES_IN_LINE {
let index = self.space.get_word_index(obj);
let slot = self.space.get_gc_byte_slot(index);
unsafe { slot.store(slot.load::<u8>() | GC_STRADDLE_BIT) }
}
}
pub fn overflow_alloc(&mut self, size: usize, align: usize) -> Address {
let start = self.large_cursor.align_up(align);
let end = start + size;
......@@ -181,7 +194,6 @@ impl ImmixAllocator {
}
}
#[inline(never)]
pub fn try_alloc_from_local(&mut self, size: usize, align: usize) -> Address {
if self.line < LINES_IN_BLOCK {
let opt_next_available_line = {
......@@ -213,12 +225,7 @@ impl ImmixAllocator {
self.block().set_line_mark(line, LineMark::FreshAlloc);
}
// allocate fast path
let start = self.cursor.align_up(align);
let end = start + size;
self.cursor = end;
start
self.alloc(size, align)
}
None => self.alloc_from_global(size, align, false)
}
......@@ -246,9 +253,14 @@ impl ImmixAllocator {
if request_large {
self.large_cursor = b.mem_start();
self.limit = b.mem_start() + BYTES_IN_BLOCK;
self.large_limit = b.mem_start() + BYTES_IN_BLOCK;
self.large_block = Some(b);
trace!(
"Mutator: slowpath: new large_block starting from 0x{:x}",
self.large_cursor
);
return self.alloc(size, align);
} else {
self.cursor = b.mem_start();
......@@ -261,7 +273,7 @@ impl ImmixAllocator {
self.cursor
);
return self.try_alloc_from_local(size, align);
return self.alloc(size, align);
}
}
None => {
......
......@@ -16,7 +16,9 @@ use common::ptr::*;
use heap::*;
use heap::immix::*;
use heap::gc;
use objectmodel::sidemap::*;
use utils::*;
use utils::bit_utils;
use utils::mem::memmap;
use utils::mem::memsec;
......@@ -258,12 +260,11 @@ impl ImmixSpace {
}
#[inline(always)]
pub fn mark_line_alive(addr: Address) {
let mut space: Raw<ImmixSpace> = unsafe { Raw::from_addr(addr.mask(SPACE_LOWBITS_MASK)) };
let index = space.get_line_mark_index(addr);
space.set_line_mark(index, LineMark::Live);
if index < (space.cur_blocks << LOG_LINES_IN_BLOCK) - 1 {
space.set_line_mark(index + 1, LineMark::ConservLive);
pub fn mark_line_conservative(&mut self, addr: Address) {
let index = self.get_line_mark_index(addr);
self.set_line_mark(index, LineMark::Live);
if index < (self.cur_blocks << LOG_LINES_IN_BLOCK) - 1 {
self.set_line_mark(index + 1, LineMark::ConservLive);
}
}
......@@ -366,8 +367,8 @@ impl ImmixSpace {
// erase gc bytes
let words = self.cur_size >> LOG_POINTER_SIZE;
unsafe {
memsec::memzero(&mut self.gc_byte_table[0] as *mut u8, words);
for i in 0..words {
self.gc_byte_table[i] = bit_utils::clear_bit_u8(self.gc_byte_table[i], GC_MARK_BIT);
}
}
......@@ -581,13 +582,50 @@ impl ImmixBlock {
#[inline(always)]
pub fn mark_object_traced(obj: ObjectReference) {
let obj_addr = obj.to_address();
let mut space = ImmixSpace::get(obj_addr);
// mark object
let addr = ImmixSpace::get_gc_byte_slot_static(obj_addr);
unsafe { addr.store(1u8) }
let obj_index = space.get_word_index(obj_addr);
let slot = space.get_gc_byte_slot(obj_index);
let gc_byte = unsafe { slot.load::<u8>() };
unsafe {
slot.store(gc_byte | GC_MARK_BIT);
}
if is_straddle_object(gc_byte) {
// we need to know object size, and mark multiple lines
let size = {
use std::mem::transmute;
let type_slot = space.get_type_byte_slot(obj_index);
let med_encode = unsafe { type_slot.load::<MediumObjectEncode>() };
let small_encode: &SmallObjectEncode = unsafe { transmute(&med_encode) };
if small_encode.is_small() {
small_encode.size()
} else {
med_encode.size()
}
};
let start_line = space.get_line_mark_index(obj_addr);
let end_line = start_line + (size >> LOG_BYTES_IN_LINE);
for i in start_line..end_line {
space.set_line_mark(i, LineMark::Live);
}
trace!(
" marking line for straddle object (line {} - {} alive)",
start_line,
end_line
);
} else {
// mark current line, and conservatively mark the next line
space.mark_line_conservative(obj_addr);
trace!(" marking line for normal object (conservatively)");
}
}
// mark line
ImmixSpace::mark_line_alive(obj_addr);
#[inline(always)]
fn is_straddle_object(gc_byte: u8) -> bool {
(gc_byte & GC_STRADDLE_BIT) == GC_STRADDLE_BIT
}
#[inline(always)]
......
......@@ -45,8 +45,6 @@ pub use self::immix_space::is_object_traced;
// | ...... |
// |__________________|
// 64KB Immix Block
pub const LOG_BYTES_IN_BLOCK: usize = 16;
pub const BYTES_IN_BLOCK: ByteSize = 1 << LOG_BYTES_IN_BLOCK;
......@@ -86,6 +84,9 @@ pub const OFFSET_META_TYPE_TABLE: ByteOffset =
pub const OFFSET_MEM_START: ByteOffset =
OFFSET_META_TYPE_TABLE + BYTES_META_TYPE_TABLE as ByteOffset;
pub const GC_STRADDLE_BIT: u8 = 0b1000_0000u8;
pub const GC_MARK_BIT: u8 = 0b0000_0001u8;
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum LineMark {
......
......@@ -294,6 +294,11 @@ pub extern "C" fn yieldpoint_slow(mutator: *mut Mutator) {
unsafe { mutator.as_mut().unwrap() }.yieldpoint_slow()
}
#[inline(always)]
fn mutator_ref(m: *mut Mutator) -> &'static mut Mutator {
unsafe { &mut *m }
}
/// allocates an object in the immix space
#[inline(always)]
#[no_mangle]
......@@ -302,12 +307,8 @@ pub extern "C" fn muentry_alloc_tiny(
size: usize,
align: usize
) -> ObjectReference {
unsafe {
(&mut *mutator)
.tiny
.alloc(size, align)
.to_object_reference()
}
let m = mutator_ref(mutator);
unsafe { m.tiny.alloc(size, align).to_object_reference() }
}
#[inline(always)]
......@@ -317,12 +318,10 @@ pub extern "C" fn muentry_alloc_normal(
size: usize,
align: usize
) -> ObjectReference {
unsafe {
(&mut *mutator)
.normal
.alloc(size, align)
.to_object_reference()
}
let m = mutator_ref(mutator);
let res = m.normal.alloc(size, align);
m.normal.post_alloc(res, size, align);
unsafe { res.to_object_reference() }
}
/// allocates an object with slowpath in the immix space
......@@ -333,7 +332,8 @@ pub extern "C" fn muentry_alloc_tiny_slow(
size: usize,
align: usize
) -> Address {
unsafe { (&mut *mutator).tiny.try_alloc_from_local(size, align) }
let m = mutator_ref(mutator);
m.tiny.alloc_slow(size, align)
}
/// allocates an object with slowpath in the immix space
......@@ -344,7 +344,10 @@ pub extern "C" fn muentry_alloc_normal_slow(
size: usize,
align: usize
) -> Address {
unsafe { (&mut *mutator).normal.try_alloc_from_local(size, align) }
let m = mutator_ref(mutator);
let res = m.normal.alloc_slow(size, align);
m.normal.post_alloc(res, size, align);
res
}
/// allocates an object in the freelist space (large object space)
......
......@@ -184,3 +184,122 @@ pub fn test_normal_immix_hybrid() {
drop_mutator(mutator);
gc_destroy();
}
#[test]
pub fn test_normal_immix_straddle() {
const IMMIX_SPACE_SIZE: usize = SMALL_SPACE_SIZE;
const OBJECT_SIZE: usize = 1024; // 4 lines
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = 4;
start_logging_trace();
gc_init(GCConfig {
immix_tiny_size: 0,
immix_normal_size: IMMIX_SPACE_SIZE,
lo_size: 0,
n_gcthreads: 1,
enable_gc: true
});
let header = {
let ty_encode = TypeEncode::new(64, [0; 63], 0, [0; 63]);
let id = GlobalTypeTable::insert_large_entry(ty_encode);
let raw_encode = ((id << 8) | 0b1111000usize) as u32;
MediumObjectEncode::new(raw_encode)
};
println!("Header: {:?}", header);
let (_, normal_space) = get_spaces();
let mutator = new_mutator();
// alloc 4 objects
let mut objects = vec![];
for _ in 0..WORK_LOAD {
let res = muentry_alloc_normal(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_medium_object(mutator, res, header);
objects.push(res);
}
for obj in objects.iter() {
add_to_root(*obj);
}
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 1);
assert_eq!(normal_space.last_gc_used_lines, 16);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 2);
assert_eq!(normal_space.last_gc_used_lines, 16);
for obj in objects.iter() {
remove_root(*obj);
}
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 3);
assert_eq!(normal_space.last_gc_used_lines, 0);
}
#[test]
pub fn test_normal_immix_mix() {
const IMMIX_SPACE_SIZE: usize = SMALL_SPACE_SIZE;
const STRADDLE_OBJECT_SIZE: usize = 1024; // 4 lines
const NORMAL_OBJECT_SIZE: usize = 64;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = 4;
start_logging_trace();
gc_init(GCConfig {
immix_tiny_size: 0,
immix_normal_size: IMMIX_SPACE_SIZE,
lo_size: 0,
n_gcthreads: 1,
enable_gc: true
});
let straddle_header = {
let ty_encode = TypeEncode::new(64, [0; 63], 0, [0; 63]);
let id = GlobalTypeTable::insert_large_entry(ty_encode);
let raw_encode = ((id << 8) | 0b1111000usize) as u32;
MediumObjectEncode::new(raw_encode)
};
let normal_header = {
let ty_encode = TypeEncode::new(8, [0; 63], 0, [0; 63]);
let id = GlobalTypeTable::insert_large_entry(ty_encode);
let raw_encode = ((id << 8) | 0usize) as u32;
MediumObjectEncode::new(raw_encode)
};
println!("Straddle Header: {:?}", straddle_header);
println!("Normal Header: {:?}", normal_header);
let (_, normal_space) = get_spaces();
let mutator = new_mutator();
// alloc 4 straddle objects and 1 normal object
let mut objects = vec![];
for _ in 0..WORK_LOAD {
let res = muentry_alloc_normal(mutator, STRADDLE_OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_medium_object(mutator, res, straddle_header);
objects.push(res);
}
let res = muentry_alloc_normal(mutator, NORMAL_OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_medium_object(mutator, res, normal_header);
objects.push(res);
for obj in objects.iter() {
add_to_root(*obj);
}
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 1);
assert_eq!(normal_space.last_gc_used_lines, 18);
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 2);
assert_eq!(normal_space.last_gc_used_lines, 18);
for obj in objects.iter() {
remove_root(*obj);
}
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 3);
assert_eq!(normal_space.last_gc_used_lines, 0);
}
\ No newline at end of file
......@@ -25,6 +25,21 @@ pub fn lower_bits_u8(value: u8, len: usize) -> u8 {
value & ((1 << len) - 1)
}
#[inline(always)]
pub fn set_bit_u8(val: u8, mask: u8) -> u8 {
val | mask
}
#[inline(always)]
pub fn clear_bit_u8(val: u8, mask: u8) -> u8 {
val & !mask
}
#[inline(always)]
pub fn test_bit_u8(val: u8, mask: u8) -> bool {
(val & mask) == mask
}
/// sets the nth bit (count from least significant bit) as val
/// (treat the val as boolean, either 1 or 0)
#[inline(always)]
......@@ -65,10 +80,23 @@ mod tests {
let value: u8 = 0b1100_0011;
assert_eq!(test_nth_bit_u8(value, 6, 1), true);
assert_eq!(lower_bits_u8(value, 6), 0b00_0011);
}
#[test]
pub fn test_u8_bits2() {
let mut val = 0u8;
let mask = 0b0000_0001u8;
val = set_bit_u8(val, mask);
assert_eq!(val, 1);
assert!(test_bit_u8(val, mask));
val = clear_bit_u8(val, mask);
assert_eq!(val, 0);
assert!(!test_bit_u8(val, mask));
}
#[test]
pub fn test_set_bit() {
let a = 0b0000u64;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment