GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

mod.rs 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright 2017 The Australian National University
// 
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// 
//     http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15 16 17 18 19 20
use heap::immix::MUTATORS;
use heap::immix::N_MUTATORS;
use heap::immix::ImmixMutatorLocal;
use heap::immix::ImmixSpace;
use heap::freelist::FreeListSpace;
use objectmodel;
21
use common::gctype::*;
22
use heap::Space;
qinsoon's avatar
qinsoon committed
23
use MY_GC;
qinsoon's avatar
qinsoon committed
24 25

use utils::{Address, ObjectReference};
qinsoon's avatar
qinsoon committed
26
use utils::POINTER_SIZE;
27

28
use std::sync::atomic::{AtomicIsize, AtomicBool, Ordering};
29 30
use std::sync::{Arc, Mutex, Condvar, RwLock};

qinsoon's avatar
qinsoon committed
31
use crossbeam::sync::chase_lev::*;
32 33 34 35 36 37 38 39 40 41 42 43 44 45
use std::sync::mpsc;
use std::sync::mpsc::channel;
use std::thread;

use std::sync::atomic;

lazy_static! {
    static ref STW_COND : Arc<(Mutex<usize>, Condvar)> = {
        Arc::new((Mutex::new(0), Condvar::new()))
    };
    
    static ref ROOTS : RwLock<Vec<ObjectReference>> = RwLock::new(vec![]);
}

46 47
pub static ENABLE_GC : AtomicBool = atomic::ATOMIC_BOOL_INIT;

48 49 50
static CONTROLLER : AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
const  NO_CONTROLLER : isize    = -1;

qinsoon's avatar
qinsoon committed
51
pub fn init(n_gcthreads: usize) {
52
    CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
qinsoon's avatar
qinsoon committed
53 54

    GC_THREADS.store(n_gcthreads, Ordering::SeqCst);
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
}

pub fn trigger_gc() {
    trace!("Triggering GC...");
    
    for mut m in MUTATORS.write().unwrap().iter_mut() {
        if m.is_some() {
            m.as_mut().unwrap().set_take_yield(true);
        }
    }
}

use std::os::raw::c_void;
#[cfg(target_arch = "x86_64")]
#[link(name = "gc_clib_x64")]
extern "C" {
    pub fn malloc_zero(size: usize) -> *const c_void;
    fn immmix_get_stack_ptr() -> Address;
    pub fn set_low_water_mark();
    fn get_low_water_mark() -> Address;
    fn get_registers() -> *const Address;
    fn get_registers_count() -> i32;
}

79 80 81 82 83 84 85 86 87 88 89
#[cfg(target_arch = "aarch64")]
#[link(name = "gc_clib_aarch64")]
extern "C" {
    pub fn malloc_zero(size: usize) -> *const c_void;
    fn immmix_get_stack_ptr() -> Address;
    pub fn set_low_water_mark();
    fn get_low_water_mark() -> Address;
    fn get_registers() -> *const Address;
    fn get_registers_count() -> i32;
}

90 91 92 93 94 95 96 97 98 99 100 101
#[cfg(target_arch = "x86_64")]
#[cfg(feature = "sel4-rumprun")]
#[link(name = "gc_clib_x64_sel4_rumprun")]
extern "C" {
    pub fn malloc_zero(size: usize) -> *const c_void;
    fn immmix_get_stack_ptr() -> Address;
    pub fn set_low_water_mark();
    fn get_low_water_mark() -> Address;
    fn get_registers() -> *const Address;
    fn get_registers_count() -> i32;
}

102
pub fn stack_scan() -> Vec<ObjectReference> {
103
    trace!("stack scanning...");
104
    let stack_ptr : Address = unsafe {immmix_get_stack_ptr()};
105 106 107 108 109 110 111 112 113
    
    if cfg!(debug_assertions) {
        if !stack_ptr.is_aligned_to(8) {
            use std::process;
            println!("trying to scanning stack, however the current stack pointer is 0x{:x}, which is not aligned to 8bytes", stack_ptr);
            process::exit(102);
        }
    }
    
114 115 116 117
    let low_water_mark : Address = unsafe {get_low_water_mark()};
    
    let mut cursor = stack_ptr;
    let mut ret = vec![];
118

qinsoon's avatar
qinsoon committed
119 120 121 122 123
    let gccontext_guard = MY_GC.read().unwrap();
    let gccontext = gccontext_guard.as_ref().unwrap();

    let immix_space = gccontext.immix_space.clone();
    let lo_space = gccontext.lo_space.clone();
124 125 126 127
    
    while cursor < low_water_mark {
        let value : Address = unsafe {cursor.load::<Address>()};
        
128
        if immix_space.is_valid_object(value) || lo_space.is_valid_object(value) {
129 130 131
            ret.push(unsafe {value.to_object_reference()});
        }
        
qinsoon's avatar
qinsoon committed
132
        cursor = cursor.plus(POINTER_SIZE);
133 134 135 136 137 138 139 140 141 142
    }
    
    let roots_from_stack = ret.len();
    
    let registers_count = unsafe {get_registers_count()};
    let registers = unsafe {get_registers()};
    
    for i in 0..registers_count {
        let value = unsafe {*registers.offset(i as isize)};
        
143
        if immix_space.is_valid_object(value) || lo_space.is_valid_object(value){
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
            ret.push(unsafe {value.to_object_reference()});
        }
    }
    
    let roots_from_registers = ret.len() - roots_from_stack;
    
    trace!("roots: {} from stack, {} from registers", roots_from_stack, roots_from_registers);
    
    ret
}

#[inline(never)]
pub fn sync_barrier(mutator: &mut ImmixMutatorLocal) {
    let controller_id = CONTROLLER.compare_and_swap(-1, mutator.id() as isize, Ordering::SeqCst);
    
    trace!("Mutator{} saw the controller is {}", mutator.id(), controller_id);
    
    // prepare the mutator for gc - return current block (if it has)
    mutator.prepare_for_gc();
    
    // user thread call back to prepare for gc
//    USER_THREAD_PREPARE_FOR_GC.read().unwrap()();
    
    if controller_id != NO_CONTROLLER {
168 169 170 171 172 173
        // scan its stack
        {
            let mut thread_roots = stack_scan();
            ROOTS.write().unwrap().append(&mut thread_roots);
        }

174 175 176 177
        // this thread will block
        block_current_thread(mutator);
        
        // reset current mutator
178
        mutator.reset_after_gc();
179 180 181
    } else {
        // this thread is controller
        // other threads should block
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

        // init roots
        {
            let mut roots = ROOTS.write().unwrap();
            // clear existing roots (roots from last gc)
            roots.clear();

            // add explicity roots
            let gc = MY_GC.read().unwrap();
            for objref in gc.as_ref().unwrap().roots.iter() {
                roots.push(*objref);
            }

            // scan its stack
            let mut thread_roots = stack_scan();
            roots.append(&mut thread_roots);
        }
199 200 201 202 203 204 205 206
        
        // wait for all mutators to be blocked
        let &(ref lock, ref cvar) = &*STW_COND.clone();
        let mut count = 0;
        
        trace!("expect {} mutators to park", *N_MUTATORS.read().unwrap() - 1);
        while count < *N_MUTATORS.read().unwrap() - 1 {
            let new_count = {*lock.lock().unwrap()};
207
            if new_count != count {
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
                count = new_count;
                trace!("count = {}", count);
            }
        }
        
        trace!("everyone stopped, gc will start");
        
        // roots->trace->sweep
        gc();
        
        // mutators will resume
        CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
        for mut t in MUTATORS.write().unwrap().iter_mut() {
            if t.is_some() {
                let t_mut = t.as_mut().unwrap();
                t_mut.set_take_yield(false);
                t_mut.set_still_blocked(false);
            }
        }
        // every mutator thread will reset themselves, so only reset current mutator here
228
        mutator.reset_after_gc();
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

        // resume
        {
            let mut count = lock.lock().unwrap();
            *count = 0;
            cvar.notify_all();
        }
    }
}

fn block_current_thread(mutator: &mut ImmixMutatorLocal) {
    trace!("Mutator{} blocked", mutator.id());
    
    let &(ref lock, ref cvar) = &*STW_COND.clone();
    let mut count = lock.lock().unwrap();
    *count += 1;
    
    mutator.global.set_still_blocked(true);
    
    while mutator.global.is_still_blocked() {
        count = cvar.wait(count).unwrap();
    }
    
    trace!("Mutator{} unblocked", mutator.id());
}

pub static GC_COUNT : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;

fn gc() {
258 259 260 261
    if ! ENABLE_GC.load(Ordering::SeqCst) {
        panic!("Triggering GC when GC is disabled");
    }

262 263 264 265 266 267
    GC_COUNT.store(GC_COUNT.load(atomic::Ordering::SeqCst) + 1, atomic::Ordering::SeqCst);
    
    trace!("GC starts");
    
    // creates root deque
    let mut roots : &mut Vec<ObjectReference> = &mut ROOTS.write().unwrap();
268
    trace!("total roots: {}", roots.len());
269 270 271
    
    // mark & trace
    {
qinsoon's avatar
qinsoon committed
272 273 274
        let gccontext_guard = MY_GC.read().unwrap();
        let gccontext = gccontext_guard.as_ref().unwrap();
        let (immix_space, lo_space) = (&gccontext.immix_space, &gccontext.lo_space);
275 276 277 278 279 280 281 282
        
        start_trace(&mut roots, immix_space.clone(), lo_space.clone());
    }
    
    trace!("trace done");
    
    // sweep
    {
qinsoon's avatar
qinsoon committed
283 284
        let gccontext_guard = MY_GC.read().unwrap();
        let gccontext = gccontext_guard.as_ref().unwrap();
285

qinsoon's avatar
qinsoon committed
286
        let ref immix_space = gccontext.immix_space;
287
        immix_space.sweep();
288

qinsoon's avatar
qinsoon committed
289
        let ref lo_space = gccontext.lo_space;
290
        lo_space.sweep();
291 292 293 294 295 296 297 298 299 300 301 302 303
    }
    
    objectmodel::flip_mark_state();
    trace!("GC finishes");
}

pub const MULTI_THREAD_TRACE_THRESHOLD : usize = 10;

pub const PUSH_BACK_THRESHOLD : usize = 50;
pub static GC_THREADS : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;

#[allow(unused_variables)]
#[inline(never)]
304
pub fn start_trace(work_stack: &mut Vec<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
    // creates root deque
    let (mut worker, stealer) = deque();
    
    while !work_stack.is_empty() {
        worker.push(work_stack.pop().unwrap());
    }

    loop {
        let (sender, receiver) = channel::<ObjectReference>();        
        
        let mut gc_threads = vec![];
        for _ in 0..GC_THREADS.load(atomic::Ordering::SeqCst) {
            let new_immix_space = immix_space.clone();
            let new_lo_space = lo_space.clone();
            let new_stealer = stealer.clone();
            let new_sender = sender.clone();
            let t = thread::spawn(move || {
                start_steal_trace(new_stealer, new_sender, new_immix_space, new_lo_space);
            });
            gc_threads.push(t);
        }
        
        // only stealers own sender, when all stealers quit, the following loop finishes
        drop(sender);
        
        loop {
            let recv = receiver.recv();
            match recv {
                Ok(obj) => worker.push(obj),
                Err(_) => break
            }
        }
        
        match worker.try_pop() {
            Some(obj_ref) => worker.push(obj_ref),
            None => break
        }
    }
}

#[allow(unused_variables)]
346
fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
347 348 349
    use objectmodel;
    
    let mut local_queue = vec![];
350
    let mark_state = objectmodel::load_mark_state();
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
    
    loop {
        let work = {
            if !local_queue.is_empty() {
                local_queue.pop().unwrap()
            } else {
                let work = stealer.steal();
                match work {
                    Steal::Empty => return,
                    Steal::Abort => continue,
                    Steal::Data(obj) => obj
                }
            }
        };
        
366
        steal_trace_object(work, &mut local_queue, &job_sender, mark_state, &immix_space, &lo_space);
367 368 369 370
    }
} 

#[inline(always)]
371
#[cfg(feature = "use-sidemap")]
372
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
qinsoon's avatar
qinsoon committed
373 374
    if cfg!(debug_assertions) {
        // check if this object in within the heap, if it is an object
375
        if !immix_space.is_valid_object(obj.to_address()) && !lo_space.is_valid_object(obj.to_address()){
qinsoon's avatar
qinsoon committed
376 377
            use std::process;
            
qinsoon's avatar
qinsoon committed
378 379 380
            println!("trying to trace an object that is not valid");
            println!("address: 0x{:x}", obj);
            println!("---");
381 382
            println!("immix space: {}", immix_space);
            println!("lo space: {}", lo_space);
qinsoon's avatar
qinsoon committed
383
            
qinsoon's avatar
qinsoon committed
384 385
            println!("invalid object during tracing");
            process::exit(101);
qinsoon's avatar
qinsoon committed
386 387
        }
    }
388 389 390
    
    let addr = obj.to_address();
    
391 392 393 394 395 396 397 398 399 400 401
    let (alloc_map, space_start) = if immix_space.addr_in_space(addr) {
        // mark object
        objectmodel::mark_as_traced(immix_space.trace_map(), immix_space.start(), obj, mark_state);

        // mark line
        immix_space.line_mark_table.mark_line_live(addr);

        (immix_space.alloc_map(), immix_space.start())
    } else if lo_space.addr_in_space(addr) {
        // mark object
        objectmodel::mark_as_traced(lo_space.trace_map(), lo_space.start(), obj, mark_state);
402
        trace!("mark object @ {} to {}", obj, mark_state);
403 404

        (lo_space.alloc_map(), lo_space.start())
405
    } else {
406 407 408 409 410 411
        println!("unexpected address: {}", addr);
        println!("immix space: {}", immix_space);
        println!("lo space   : {}", lo_space);

        panic!("error during tracing object")
    };
412 413 414
    
    let mut base = addr;
    loop {
415
        let value = objectmodel::get_ref_byte(alloc_map, space_start, obj);
416
        let (ref_bits, short_encode) = (bit_utils::lower_bits_u8(value, objectmodel::REF_BITS_LEN), bit_utils::test_nth_bit_u8(value, objectmodel::SHORT_ENCODE_BIT));
417
        match ref_bits {
418 419 420
            0b0000_0000 => {

            },
421
            0b0000_0001 => {
422
                steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
423 424
            },            
            0b0000_0011 => {
425 426
                steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
427 428
            },
            0b0000_1111 => {
429 430 431 432
                steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 16,local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 24,local_queue, job_sender, mark_state, immix_space, lo_space);
433 434
            },            
            _ => {
435 436
                error!("unexpected ref_bits patterns: {:b}", ref_bits);
                unimplemented!()
437 438
            }
        }
439

440 441 442
        if short_encode {
            return;
        } else {
qinsoon's avatar
qinsoon committed
443
            base = base.plus(objectmodel::REF_BITS_LEN * POINTER_SIZE);
444 445 446 447 448
        } 
    }
}

#[inline(always)]
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
    if cfg!(debug_assertions) {
        // check if this object in within the heap, if it is an object
        if !immix_space.is_valid_object(obj.to_address()) && !lo_space.is_valid_object(obj.to_address()){
            use std::process;

            println!("trying to trace an object that is not valid");
            println!("address: 0x{:x}", obj);
            println!("---");
            println!("immix space: {}", immix_space);
            println!("lo space: {}", lo_space);

            println!("invalid object during tracing");
            process::exit(101);
        }
    }

    let addr = obj.to_address();

    // mark object
    objectmodel::mark_as_traced(obj, mark_state);

    if immix_space.addr_in_space(addr) {
        // mark line
        immix_space.line_mark_table.mark_line_live(addr);
    } else if lo_space.addr_in_space(addr) {
        // do nothing
    } else {
        println!("unexpected address: {}", addr);
        println!("immix space: {}", immix_space);
        println!("lo space   : {}", lo_space);

        panic!("error during tracing object")
    }

485 486 487
    // this part of code has some duplication with code in objectdump
    // FIXME: remove the duplicate code - use 'Tracer' trait

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
    let hdr = unsafe {addr.offset(objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};

    if objectmodel::header_is_fix_size(hdr) {
        // fix sized type
        if objectmodel::header_has_ref_map(hdr) {
            // has ref map
            let ref_map = objectmodel::header_get_ref_map(hdr);

            match ref_map {
                0 => {

                },
                0b0000_0001 => {
                    steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                }
                0b0000_0011 => {
                    steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
                },
                0b0000_1111 => {
                    steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 16,local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 24,local_queue, job_sender, mark_state, immix_space, lo_space);
                },
                _ => {
514 515 516 517 518 519 520 521 522 523 524 525
                    warn!("ref bits fall into slow path: {:b}", ref_map);

                    let mut i = 0;
                    while i < objectmodel::REF_MAP_LENGTH {
                        let has_ref : bool = ((ref_map >> i) & 1) == 1;

                        if has_ref {
                            steal_process_edge(addr, i * POINTER_SIZE, local_queue, job_sender, mark_state, immix_space, lo_space);
                        }

                        i += 1;
                    }
526 527 528 529
                }
            }
        } else {
            // by type ID
530 531 532 533 534 535 536 537
            let gctype_id = objectmodel::header_get_gctype_id(hdr);

            let gc_lock = MY_GC.read().unwrap();
            let gctype : Arc<GCType> = gc_lock.as_ref().unwrap().gc_types[gctype_id as usize].clone();

            for offset in gctype.gen_ref_offsets() {
                steal_process_edge(addr, offset, local_queue, job_sender, mark_state, immix_space, lo_space);
            }
538 539 540
        }
    } else {
        // hybrids
541
        let gctype_id = objectmodel::header_get_gctype_id(hdr);
qinsoon's avatar
qinsoon committed
542
        let var_length = objectmodel::header_get_hybrid_length(hdr);
543 544 545 546

        let gc_lock = MY_GC.read().unwrap();
        let gctype : Arc<GCType> = gc_lock.as_ref().unwrap().gc_types[gctype_id as usize].clone();

qinsoon's avatar
qinsoon committed
547
        for offset in gctype.gen_hybrid_ref_offsets(var_length) {
548 549
            steal_process_edge(addr, offset, local_queue, job_sender, mark_state, immix_space, lo_space);
        }
550 551 552 553 554
    }
}

#[inline(always)]
#[cfg(feature = "use-sidemap")]
555
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
qinsoon's avatar
qinsoon committed
556 557 558 559
    let field_addr = base.plus(offset);
    let edge = unsafe{field_addr.load::<ObjectReference>()};
    
    if cfg!(debug_assertions) {
qinsoon's avatar
qinsoon committed
560
        use std::process;        
qinsoon's avatar
qinsoon committed
561
        // check if this object in within the heap, if it is an object
562
        if !edge.to_address().is_zero() && !immix_space.is_valid_object(edge.to_address()) && !lo_space.is_valid_object(edge.to_address()) {
qinsoon's avatar
qinsoon committed
563
            println!("trying to follow an edge that is not a valid object");
qinsoon's avatar
qinsoon committed
564
            println!("edge address: 0x{:x} from 0x{:x}", edge, field_addr);
qinsoon's avatar
qinsoon committed
565 566
            println!("base address: 0x{:x}", base);
            println!("---");
567 568 569 570 571 572 573 574 575 576 577
            if immix_space.addr_in_space(base) {
                objectmodel::print_object(base, immix_space.start(), immix_space.trace_map(), immix_space.alloc_map());
                println!("---");
                println!("immix space:{}", immix_space);
            } else if lo_space.addr_in_space(base) {
                objectmodel::print_object(base, lo_space.start(), lo_space.trace_map(), lo_space.alloc_map());
                println!("---");
                println!("lo space:{}", lo_space);
            } else {
                println!("not in immix/lo space")
            }
qinsoon's avatar
qinsoon committed
578
            
qinsoon's avatar
qinsoon committed
579 580
            println!("invalid object during tracing");
            process::exit(101);
qinsoon's avatar
qinsoon committed
581 582
        }
    }
583

584 585 586 587 588 589 590 591 592 593 594 595
    if !edge.to_address().is_zero() {
        if immix_space.addr_in_space(edge.to_address()) && !objectmodel::is_traced(immix_space.trace_map(), immix_space.start(), edge, mark_state) {
            if local_queue.len() >= PUSH_BACK_THRESHOLD {
                job_sender.send(edge).unwrap();
            } else {
                local_queue.push(edge);
            }
        } else if lo_space.addr_in_space(edge.to_address()) && !objectmodel::is_traced(lo_space.trace_map(), lo_space.start(), edge, mark_state) {
            if local_queue.len() >= PUSH_BACK_THRESHOLD {
                job_sender.send(edge).unwrap();
            } else {
                local_queue.push(edge);
596 597 598
            }
        }
    }
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
}

#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
    let field_addr = base.plus(offset);
    let edge = unsafe {field_addr.load::<ObjectReference>()};

    if cfg!(debug_assertions) {
        use std::process;
        // check if this object in within the heap, if it is an object
        if !edge.to_address().is_zero() && !immix_space.is_valid_object(edge.to_address()) && !lo_space.is_valid_object(edge.to_address()) {
            println!("trying to follow an edge that is not a valid object");
            println!("edge address: 0x{:x} from 0x{:x}", edge, field_addr);
            println!("base address: 0x{:x}", base);
            println!("---");
            if immix_space.addr_in_space(base) {
                objectmodel::print_object(base);
                objectmodel::print_object(edge.to_address());
                println!("---");
                println!("immix space:{}", immix_space);
            } else if lo_space.addr_in_space(base) {
                objectmodel::print_object(base);
                println!("---");
                println!("lo space:{}", lo_space);
            } else {
                println!("not in immix/lo space")
            }

            println!("invalid object during tracing");
            process::exit(101);
        }
    }

    if !edge.to_address().is_zero() {
        if !objectmodel::is_traced(edge, mark_state) {
            if local_queue.len() >= PUSH_BACK_THRESHOLD {
                job_sender.send(edge).unwrap();
            } else {
                local_queue.push(edge);
            }
        }
    }
642
}