GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 5145efb5 authored by qinsoon's avatar qinsoon

gc crate

parent c2243dd0
......@@ -23,7 +23,7 @@ use ast::op::*;
use ast::types::*;
use vm::VM;
use runtime::mm;
use runtime::mm::objectmodel::OBJECT_HEADER_SIZE;
use runtime::mm::OBJECT_HEADER_SIZE;
use runtime::ValueLocation;
use runtime::thread;
......
......@@ -20,8 +20,8 @@ use ast::op::*;
use ast::types::*;
use vm::VM;
use runtime::mm;
use runtime::mm::objectmodel::OBJECT_HEADER_SIZE;
use runtime::mm::objectmodel::OBJECT_HEADER_OFFSET;
use runtime::mm::OBJECT_HEADER_SIZE;
use runtime::mm::OBJECT_HEADER_OFFSET;
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
......@@ -1411,9 +1411,6 @@ impl <'a> InstructionSelection {
Some(node), f_content, f_context, vm);
}
Instruction_::CommonInst_GetAddr(op) => {
use runtime::mm::objectmodel::GC_IREF_HAS_OFFSET;
debug_assert!(!GC_IREF_HAS_OFFSET);
trace!("instsel on GETADDR");
// assume it is pinned
......
......@@ -472,9 +472,6 @@ mod tests {
#[test]
fn test_treadmill_alloc_spanblock() {
use simple_logger;
simple_logger::init().unwrap();
let space = FreeListSpace::new(BLOCK_SIZE * 20);
for i in 0..5 {
......@@ -486,9 +483,6 @@ mod tests {
#[test]
fn test_treadmill_sweep() {
use simple_logger;
simple_logger::init().unwrap();
let space = FreeListSpace::new(BLOCK_SIZE * 20);
for i in 0..5 {
......
......@@ -282,8 +282,6 @@ fn gc() {
trace!("GC finishes");
}
pub const MULTI_THREAD_TRACE_THRESHOLD : usize = 10;
pub const PUSH_BACK_THRESHOLD : usize = 50;
pub static GC_THREADS : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
......
......@@ -24,13 +24,6 @@ pub use self::immix_mutator::N_MUTATORS;
pub use self::immix_mutator::CURSOR_OFFSET;
pub use self::immix_mutator::LIMIT_OFFSET;
use std::sync::Arc;
use std::sync::RwLock;
lazy_static!{
pub static ref SHARED_SPACE : Option<Arc<RwLock<ImmixSpace>>> = None;
}
pub const LOG_BYTES_IN_LINE : usize = 8;
pub const BYTES_IN_LINE : usize = (1 << LOG_BYTES_IN_LINE);
pub const LOG_BYTES_IN_BLOCK : usize = 16;
......
......@@ -22,8 +22,6 @@ pub mod immix;
pub mod freelist;
pub mod gc;
pub const ALIGNMENT_VALUE : u8 = 1;
pub const IMMIX_SPACE_RATIO : f64 = 1.0 - LO_SPACE_RATIO;
pub const LO_SPACE_RATIO : f64 = 0.2;
pub const DEFAULT_HEAP_SIZE : usize = 500 << 20;
......@@ -97,7 +95,11 @@ pub trait Space {
}
}
#[allow(dead_code)]
pub const ALIGNMENT_VALUE : u8 = 1;
#[inline(always)]
#[allow(dead_code)]
pub fn fill_alignment_gap(start : Address, end : Address) -> () {
debug_assert!(end >= start);
unsafe {start.memset(ALIGNMENT_VALUE, end - start);}
......
This diff is collapsed.
......@@ -15,8 +15,6 @@
use std::sync::atomic;
use utils::ByteSize;
pub const GC_IREF_HAS_OFFSET : bool = false;
#[cfg(feature = "use-sidemap")]
mod sidemap;
#[cfg(not(feature = "use-sidemap"))]
......@@ -40,10 +38,6 @@ pub fn load_mark_state() -> u8 {
MARK_STATE.load(atomic::Ordering::SeqCst) as u8
}
pub fn flip(mark: u8) -> u8 {
mark ^ 1
}
#[inline(always)]
pub fn check_alignment(align: ByteSize) -> ByteSize {
if align < MINIMAL_ALIGNMENT {
......
......@@ -164,7 +164,7 @@ fn create_linked_list() {
start_logging();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::gc_stats();
gc::print_gc_context();
let mut mutator = gc::new_mutator();
......@@ -192,7 +192,7 @@ fn linked_list_heap_dump() {
start_logging();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::gc_stats();
gc::print_gc_context();
let mut mutator = gc::new_mutator();
......@@ -229,7 +229,7 @@ fn linked_list_survive_gc() {
start_logging();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::gc_stats();
gc::print_gc_context();
let mut mutator = gc::new_mutator();
......
......@@ -153,7 +153,7 @@ fn alloc(mutator: &mut ImmixMutatorLocal) -> *mut Node {
if cfg!(debug_assertions) {
unsafe {
let hdr = addr.offset(objectmodel::OBJECT_HEADER_OFFSET).load::<u64>();
let hdr = (addr + objectmodel::OBJECT_HEADER_OFFSET).load::<u64>();
assert!(objectmodel::header_is_object_start(hdr));
}
}
......@@ -168,7 +168,7 @@ fn start() {
start_logging();
gc::gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 1, true);
gc::gc_stats();
gc::print_gc_context();
let mut mutator = gc::new_mutator();
......
......@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utility crate that serves Zebu includes:
//! # Utility crate that serves Zebu
//!
//! It includes:
//! * data structures
//! * double linked list
//! * linked hashmap/set
......
......@@ -1088,12 +1088,9 @@ impl <'a> VM {
/// performs GETIREF
pub fn handle_get_iref(&self, handle_ref: APIHandleArg) -> APIHandleResult {
use runtime::mm::objectmodel::GC_IREF_HAS_OFFSET;
let (ty, addr) = handle_ref.v.as_ref();
// assume iref has the same address as ref
debug_assert!(!GC_IREF_HAS_OFFSET);
let ret = self.new_handle(APIHandle {
id: self.next_id(),
v : APIHandleValue::IRef(ty, addr)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment