Commit 9cd0463b authored by qinsoon's avatar qinsoon

refactored the code

parent 0d6b3fc2
......@@ -23,19 +23,16 @@ pub struct FreelistAllocator {
mutator: *mut Mutator
}
impl FreelistAllocator {
pub fn new(space: Raw<FreelistSpace>) -> FreelistAllocator {
FreelistAllocator {
space,
mutator: ptr::null_mut()
}
}
impl Allocator for FreelistAllocator {
fn prepare_for_gc(&mut self) {}
fn reset_after_gc(&mut self) {}
fn destroy(&mut self) {}
pub fn set_mutator(&mut self, mutator: *mut Mutator) {
fn set_mutator(&mut self, mutator: *mut Mutator) {
self.mutator = mutator;
}
pub fn alloc(&mut self, size: ByteSize, align: ByteSize) -> Address {
fn alloc(&mut self, size: ByteSize, align: ByteSize) -> Address {
loop {
unsafe { &mut *self.mutator }.yieldpoint();
......@@ -48,6 +45,15 @@ impl FreelistAllocator {
}
}
}
}
impl FreelistAllocator {
pub fn new(space: Raw<FreelistSpace>) -> FreelistAllocator {
FreelistAllocator {
space,
mutator: ptr::null_mut()
}
}
pub fn init_object(&mut self, addr: Address, encode: LargeObjectEncode) {
let slot = self.space.get_type_encode_slot(addr);
......
......@@ -15,7 +15,6 @@
use common::ptr::*;
use heap::*;
use objectmodel::sidemap::*;
use utils::*;
use utils::mem::memmap;
use utils::mem::memsec::memzero;
......@@ -75,6 +74,99 @@ impl RawMemoryMetadata for FreelistSpace {
}
}
impl Space for FreelistSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.cur_end
}
#[inline(always)]
#[allow(unused_variables)]
fn is_valid_object(&self, addr: Address) -> bool {
true
}
fn destroy(&mut self) {}
fn prepare_for_gc(&mut self) {
// erase page mark
unsafe {
memzero(
&mut self.page_mark_table[0] as *mut PageMark,
self.cur_pages
);
}
}
fn sweep(&mut self) {
debug!("=== {:?} Sweep ===", self.desc);
debug_assert_eq!(self.n_used_pages() + self.n_usable_pages(), self.cur_pages);
let mut free_pages = 0;
let mut used_pages = 0;
{
let mut used_nodes = self.used_nodes.lock().unwrap();
let mut usable_nodes = self.usable_nodes.lock().unwrap();
let mut all_nodes: Vec<FreelistNode> = {
let mut ret = vec![];
ret.append(&mut used_nodes);
ret.append(&mut usable_nodes);
ret
};
debug_assert_eq!(all_nodes.len(), self.cur_pages);
while !all_nodes.is_empty() {
let node: FreelistNode = all_nodes.pop().unwrap();
let index = self.get_page_index(node.addr);
if self.page_mark_table[index] == PageMark::Live {
used_pages += node.size >> LOG_BYTES_IN_PAGE;
used_nodes.push(node);
} else {
free_pages += node.size >> LOG_BYTES_IN_PAGE;
usable_nodes.push(node);
}
}
}
if cfg!(debug_assertions) {
debug!("free pages = {} of {} total", free_pages, self.cur_pages);
debug!("used pages = {} of {} total", used_pages, self.cur_pages);
}
self.last_gc_free_pages = free_pages;
self.last_gc_used_pages = used_pages;
if self.n_used_pages() == self.total_pages && self.total_pages != 0 {
use std::process;
println!("Out of memory in Freelist Space");
process::exit(1);
}
debug_assert_eq!(self.n_used_pages() + self.n_usable_pages(), self.cur_pages);
trace!("=======================");
}
#[inline(always)]
fn mark_object_traced(&mut self, obj: ObjectReference) {
let index = self.get_page_index(obj.to_address());
self.page_mark_table[index] = PageMark::Live;
}
#[inline(always)]
fn is_object_traced(&self, obj: ObjectReference) -> bool {
let index = self.get_page_index(obj.to_address());
self.page_mark_table[index] == PageMark::Live
}
}
impl FreelistSpace {
pub fn new(desc: SpaceDescriptor, space_size: ByteSize) -> Raw<FreelistSpace> {
let mut anon_mmap = match memmap::MmapMut::map_anon(
......@@ -137,23 +229,6 @@ impl FreelistSpace {
space
}
pub fn cleanup(&self) {}
#[inline(always)]
pub fn get(addr: Address) -> Raw<FreelistSpace> {
unsafe { Raw::from_addr(addr.mask(SPACE_LOWBITS_MASK)) }
}
pub fn prepare_for_gc(&mut self) {
// erase page mark
unsafe {
memzero(
&mut self.page_mark_table[0] as *mut PageMark,
self.cur_pages
);
}
}
#[inline(always)]
pub fn get_page_index(&self, obj: Address) -> usize {
(obj - self.mem_start()) >> LOG_BYTES_IN_PAGE
......@@ -228,69 +303,6 @@ impl FreelistSpace {
ret
}
pub fn sweep(&mut self) {
debug!("=== {:?} Sweep ===", self.desc);
debug_assert_eq!(self.n_used_pages() + self.n_usable_pages(), self.cur_pages);
let mut free_pages = 0;
let mut used_pages = 0;
{
let mut used_nodes = self.used_nodes.lock().unwrap();
let mut usable_nodes = self.usable_nodes.lock().unwrap();
let mut all_nodes: Vec<FreelistNode> = {
let mut ret = vec![];
ret.append(&mut used_nodes);
ret.append(&mut usable_nodes);
ret
};
debug_assert_eq!(all_nodes.len(), self.cur_pages);
while !all_nodes.is_empty() {
let node: FreelistNode = all_nodes.pop().unwrap();
let index = self.get_page_index(node.addr);
if self.page_mark_table[index] == PageMark::Live {
used_pages += (node.size >> LOG_BYTES_IN_PAGE);
used_nodes.push(node);
} else {
free_pages += (node.size >> LOG_BYTES_IN_PAGE);
usable_nodes.push(node);
}
}
}
if cfg!(debug_assertions) {
debug!("free pages = {} of {} total", free_pages, self.cur_pages);
debug!("used pages = {} of {} total", used_pages, self.cur_pages);
}
self.last_gc_free_pages = free_pages;
self.last_gc_used_pages = used_pages;
if self.n_used_pages() == self.total_pages && self.total_pages != 0 {
use std::process;
println!("Out of memory in Freelist Space");
process::exit(1);
}
debug_assert_eq!(self.n_used_pages() + self.n_usable_pages(), self.cur_pages);
trace!("=======================");
}
#[inline(always)]
pub fn mark_object_traced(&mut self, obj: ObjectReference) {
let index = self.get_page_index(obj.to_address());
self.page_mark_table[index] = PageMark::Live;
}
#[inline(always)]
pub fn is_object_traced(&self, obj: ObjectReference) -> bool {
let index = self.get_page_index(obj.to_address());
self.page_mark_table[index] == PageMark::Live
}
pub fn get_type_encode(&self, obj: ObjectReference) -> LargeObjectEncode {
let index = self.get_page_index(obj.to_address());
self.page_encode_table[index]
......@@ -335,22 +347,6 @@ impl FreelistSpace {
}
}
impl Space for FreelistSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.cur_end
}
#[inline(always)]
#[allow(unused_variables)]
fn is_valid_object(&self, addr: Address) -> bool {
true
}
}
#[repr(C)]
pub struct FreelistNode {
size: ByteSize,
......@@ -359,6 +355,7 @@ pub struct FreelistNode {
#[repr(u8)]
#[derive(Copy, Clone, Debug, PartialEq)]
#[allow(dead_code)] // we do not explicitly use Free, but we zero the page marks
pub enum PageMark {
Free = 0,
Live
......
......@@ -13,7 +13,6 @@
// limitations under the License.
use heap::*;
use heap::freelist::*;
use objectmodel;
use objectmodel::sidemap::*;
use MY_GC;
......@@ -375,11 +374,14 @@ pub fn steal_trace_object(
) {
match SpaceDescriptor::get(obj) {
SpaceDescriptor::ImmixTiny => {
let mut space = ImmixSpace::get::<ImmixSpace>(obj.to_address());
// mark current object traced
immix::mark_object_traced(obj);
space.mark_object_traced(obj);
let encode = unsafe {
ImmixSpace::get_type_byte_slot_static(obj.to_address()).load::<TinyObjectEncode>()
space
.get_type_byte_slot(space.get_word_index(obj.to_address()))
.load::<TinyObjectEncode>()
};
trace_if!(TRACE_GC, " trace tiny obj: {} ({:?})", obj, encode);
......@@ -394,12 +396,13 @@ pub fn steal_trace_object(
}
}
SpaceDescriptor::ImmixNormal => {
let mut space = ImmixSpace::get::<ImmixSpace>(obj.to_address());
//mark current object traced
immix::mark_object_traced(obj);
space.mark_object_traced(obj);
// get type encode
let (type_encode, type_size): (&TypeEncode, ByteOffset) = {
let type_slot = ImmixSpace::get_type_byte_slot_static(obj.to_address());
let type_slot = space.get_type_byte_slot(space.get_word_index(obj.to_address()));
let encode = unsafe { type_slot.load::<MediumObjectEncode>() };
let small_encode: &SmallObjectEncode = unsafe { transmute(&encode) };
......@@ -444,7 +447,7 @@ pub fn steal_trace_object(
trace_if!(TRACE_GC, " -done-");
}
SpaceDescriptor::Freelist => {
let mut space = FreelistSpace::get(obj.to_address());
let mut space = FreelistSpace::get::<FreelistSpace>(obj.to_address());
space.mark_object_traced(obj);
let encode = space.get_type_encode(obj);
......@@ -498,12 +501,13 @@ fn trace_word(
match SpaceDescriptor::get(edge) {
SpaceDescriptor::ImmixTiny | SpaceDescriptor::ImmixNormal => {
if !immix::is_object_traced(edge) {
let space = ImmixSpace::get::<ImmixSpace>(edge.to_address());
if !space.is_object_traced(edge) {
steal_process_edge(edge, local_queue, job_sender);
}
}
SpaceDescriptor::Freelist => {
let space = FreelistSpace::get(edge.to_address());
let space = FreelistSpace::get::<FreelistSpace>(edge.to_address());
if !space.is_object_traced(edge) {
debug!("edge {} is not traced, trace it", edge);
steal_process_edge(edge, local_queue, job_sender);
......
......@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use heap::Mutator;
use heap::immix::*;
use heap::*;
use heap::immix::ImmixSpace;
use heap::immix::immix_space::ImmixBlock;
use objectmodel;
......@@ -46,49 +45,27 @@ lazy_static! {
pub static ref LIMIT_OFFSET : usize = offset_of!(ImmixAllocator=>limit).get_byte_offset();
}
impl ImmixAllocator {
pub fn reset(&mut self) -> () {
unsafe {
// should not use Address::zero() other than initialization
self.cursor = Address::zero();
self.limit = Address::zero();
self.large_cursor = Address::zero();
self.large_limit = Address::zero();
}
self.line = LINES_IN_BLOCK;
self.block = None;
self.large_block = None;
}
pub fn reset_after_gc(&mut self) {
impl Allocator for ImmixAllocator {
fn reset_after_gc(&mut self) {
self.reset();
}
pub fn new(space: Raw<ImmixSpace>) -> ImmixAllocator {
ImmixAllocator {
cursor: unsafe { Address::zero() },
limit: unsafe { Address::zero() },
line: LINES_IN_BLOCK,
block: None,
large_cursor: unsafe { Address::zero() },
large_limit: unsafe { Address::zero() },
large_block: None,
space,
mutator: ptr::null_mut()
}
fn prepare_for_gc(&mut self) {
self.return_block(true);
self.return_block(false);
}
pub fn set_mutator(&mut self, mutator: *mut Mutator) {
fn set_mutator(&mut self, mutator: *mut Mutator) {
self.mutator = mutator;
}
pub fn destroy(&mut self) {
fn destroy(&mut self) {
self.return_block(true);
self.return_block(false);
}
#[inline(always)]
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
fn alloc(&mut self, size: usize, align: usize) -> Address {
// this part of code will slow down allocation
let align = objectmodel::check_alignment(align);
// end
......@@ -117,6 +94,35 @@ impl ImmixAllocator {
start
}
}
}
impl ImmixAllocator {
fn reset(&mut self) -> () {
unsafe {
// should not use Address::zero() other than initialization
self.cursor = Address::zero();
self.limit = Address::zero();
self.large_cursor = Address::zero();
self.large_limit = Address::zero();
}
self.line = LINES_IN_BLOCK;
self.block = None;
self.large_block = None;
}
pub fn new(space: Raw<ImmixSpace>) -> ImmixAllocator {
ImmixAllocator {
cursor: unsafe { Address::zero() },
limit: unsafe { Address::zero() },
line: LINES_IN_BLOCK,
block: None,
large_cursor: unsafe { Address::zero() },
large_limit: unsafe { Address::zero() },
large_block: None,
space,
mutator: ptr::null_mut()
}
}
#[inline(never)]
pub fn alloc_slow(&mut self, size: usize, align: usize) -> Address {
......@@ -283,10 +289,7 @@ impl ImmixAllocator {
}
}
pub fn prepare_for_gc(&mut self) {
self.return_block(true);
self.return_block(false);
}
fn return_block(&mut self, request_large: bool) {
if request_large {
......
This diff is collapsed.
......@@ -24,9 +24,6 @@ pub use self::immix_mutator::ImmixAllocator;
pub use self::immix_mutator::CURSOR_OFFSET;
pub use self::immix_mutator::LIMIT_OFFSET;
pub use self::immix_space::mark_object_traced;
pub use self::immix_space::is_object_traced;
// Immix space
// |------------------| <- 16GB align
// | metadata |
......
......@@ -61,10 +61,21 @@ pub trait Space {
fn end(&self) -> Address;
#[inline(always)]
fn is_valid_object(&self, addr: Address) -> bool;
fn destroy(&mut self);
fn prepare_for_gc(&mut self);
fn sweep(&mut self);
#[inline(always)]
fn mark_object_traced(&mut self, obj: ObjectReference);
#[inline(always)]
fn is_object_traced(&self, obj: ObjectReference) -> bool;
#[inline(always)]
fn addr_in_space(&self, addr: Address) -> bool {
addr >= self.start() && addr < self.end()
}
#[inline(always)]
fn get<T: RawMemoryMetadata + Sized>(addr: Address) -> Raw<T> {
unsafe { Raw::from_addr(addr.mask(SPACE_LOWBITS_MASK)) }
}
}
#[allow(dead_code)]
......@@ -131,18 +142,16 @@ impl Mutator {
self.id
}
pub fn reset(&mut self) {
self.tiny.reset();
self.normal.reset();
}
pub fn reset_after_gc(&mut self) {
self.reset()
self.tiny.reset_after_gc();
self.normal.reset_after_gc();
self.lo.reset_after_gc();
}
pub fn prepare_for_gc(&mut self) {
self.tiny.prepare_for_gc();
self.normal.prepare_for_gc();
self.lo.prepare_for_gc();
}
pub fn destroy(&mut self) {
......@@ -176,6 +185,15 @@ impl Mutator {
}
}
pub trait Allocator {
fn reset_after_gc(&mut self);
fn prepare_for_gc(&mut self);
fn set_mutator(&mut self, mutator: *mut Mutator);
fn destroy(&mut self);
#[inline(always)]
fn alloc(&mut self, size: ByteSize, align: ByteSize) -> Address;
}
pub struct MutatorGlobal {
take_yield: AtomicBool,
still_blocked: AtomicBool
......
......@@ -207,9 +207,10 @@ pub extern "C" fn gc_destroy() {
objectmodel::cleanup();
let mut gc_lock = MY_GC.write().unwrap();
{
let gc = gc_lock.as_ref().unwrap();
gc.immix_tiny.cleanup();
gc.immix_normal.cleanup();
let mut gc = gc_lock.as_mut().unwrap();
gc.immix_tiny.destroy();
gc.immix_normal.destroy();
gc.lo.destroy();
}
*gc_lock = None;
}
......
......@@ -238,6 +238,9 @@ pub fn test_normal_immix_straddle() {
force_gc(mutator);
assert_eq!(GC_COUNT.load(Ordering::SeqCst), 3);
assert_eq!(normal_space.last_gc_used_lines, 0);
drop_mutator(mutator);
gc_destroy();
}
#[test]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment