To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

mod.rs 21.9 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
// Copyright 2017 The Australian National University
// 
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// 
//     http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
16
17
18
19
20
use heap::immix::MUTATORS;
use heap::immix::N_MUTATORS;
use heap::immix::ImmixMutatorLocal;
use heap::immix::ImmixSpace;
use heap::freelist::FreeListSpace;
use objectmodel;
21
use common::gctype::*;
22
use heap::Space;
qinsoon's avatar
qinsoon committed
23
use MY_GC;
qinsoon's avatar
qinsoon committed
24
25

use utils::{Address, ObjectReference};
qinsoon's avatar
qinsoon committed
26
use utils::POINTER_SIZE;
27

28
use std::sync::atomic::{AtomicIsize, AtomicBool, Ordering};
29
30
use std::sync::{Arc, Mutex, Condvar, RwLock};

qinsoon's avatar
qinsoon committed
31
use crossbeam::sync::chase_lev::*;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
use std::sync::mpsc;
use std::sync::mpsc::channel;
use std::thread;

use std::sync::atomic;

lazy_static! {
    static ref STW_COND : Arc<(Mutex<usize>, Condvar)> = {
        Arc::new((Mutex::new(0), Condvar::new()))
    };
    
    static ref ROOTS : RwLock<Vec<ObjectReference>> = RwLock::new(vec![]);
}

46
47
pub static ENABLE_GC : AtomicBool = atomic::ATOMIC_BOOL_INIT;

48
49
50
static CONTROLLER : AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
const  NO_CONTROLLER : isize    = -1;

qinsoon's avatar
qinsoon committed
51
pub fn init(n_gcthreads: usize) {
52
    CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
qinsoon's avatar
qinsoon committed
53
54

    GC_THREADS.store(n_gcthreads, Ordering::SeqCst);
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
}

pub fn trigger_gc() {
    trace!("Triggering GC...");
    
    for mut m in MUTATORS.write().unwrap().iter_mut() {
        if m.is_some() {
            m.as_mut().unwrap().set_take_yield(true);
        }
    }
}

use std::os::raw::c_void;
#[cfg(target_arch = "x86_64")]
#[link(name = "gc_clib_x64")]
extern "C" {
    pub fn malloc_zero(size: usize) -> *const c_void;
    fn immmix_get_stack_ptr() -> Address;
    pub fn set_low_water_mark();
    fn get_low_water_mark() -> Address;
    fn get_registers() -> *const Address;
    fn get_registers_count() -> i32;
}

79
80
81
82
83
84
85
86
87
88
89
#[cfg(target_arch = "aarch64")]
#[link(name = "gc_clib_aarch64")]
extern "C" {
    pub fn malloc_zero(size: usize) -> *const c_void;
    fn immmix_get_stack_ptr() -> Address;
    pub fn set_low_water_mark();
    fn get_low_water_mark() -> Address;
    fn get_registers() -> *const Address;
    fn get_registers_count() -> i32;
}

90
pub fn stack_scan() -> Vec<ObjectReference> {
91
    trace!("stack scanning...");
92
    let stack_ptr : Address = unsafe {immmix_get_stack_ptr()};
93
94
95
96
97
98
99
100
101
    
    if cfg!(debug_assertions) {
        if !stack_ptr.is_aligned_to(8) {
            use std::process;
            println!("trying to scanning stack, however the current stack pointer is 0x{:x}, which is not aligned to 8bytes", stack_ptr);
            process::exit(102);
        }
    }
    
102
103
104
105
    let low_water_mark : Address = unsafe {get_low_water_mark()};
    
    let mut cursor = stack_ptr;
    let mut ret = vec![];
106

qinsoon's avatar
qinsoon committed
107
108
109
110
111
    let gccontext_guard = MY_GC.read().unwrap();
    let gccontext = gccontext_guard.as_ref().unwrap();

    let immix_space = gccontext.immix_space.clone();
    let lo_space = gccontext.lo_space.clone();
112
113
114
115
    
    while cursor < low_water_mark {
        let value : Address = unsafe {cursor.load::<Address>()};
        
116
        if immix_space.is_valid_object(value) || lo_space.is_valid_object(value) {
117
118
119
            ret.push(unsafe {value.to_object_reference()});
        }
        
120
        cursor = cursor + POINTER_SIZE;
121
122
123
124
125
126
127
128
129
130
    }
    
    let roots_from_stack = ret.len();
    
    let registers_count = unsafe {get_registers_count()};
    let registers = unsafe {get_registers()};
    
    for i in 0..registers_count {
        let value = unsafe {*registers.offset(i as isize)};
        
131
        if immix_space.is_valid_object(value) || lo_space.is_valid_object(value){
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
            ret.push(unsafe {value.to_object_reference()});
        }
    }
    
    let roots_from_registers = ret.len() - roots_from_stack;
    
    trace!("roots: {} from stack, {} from registers", roots_from_stack, roots_from_registers);
    
    ret
}

#[inline(never)]
pub fn sync_barrier(mutator: &mut ImmixMutatorLocal) {
    let controller_id = CONTROLLER.compare_and_swap(-1, mutator.id() as isize, Ordering::SeqCst);
    
    trace!("Mutator{} saw the controller is {}", mutator.id(), controller_id);
    
    // prepare the mutator for gc - return current block (if it has)
    mutator.prepare_for_gc();
    
    // user thread call back to prepare for gc
//    USER_THREAD_PREPARE_FOR_GC.read().unwrap()();
    
    if controller_id != NO_CONTROLLER {
156
157
158
159
160
161
        // scan its stack
        {
            let mut thread_roots = stack_scan();
            ROOTS.write().unwrap().append(&mut thread_roots);
        }

162
163
164
165
        // this thread will block
        block_current_thread(mutator);
        
        // reset current mutator
166
        mutator.reset_after_gc();
167
168
169
    } else {
        // this thread is controller
        // other threads should block
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186

        // init roots
        {
            let mut roots = ROOTS.write().unwrap();
            // clear existing roots (roots from last gc)
            roots.clear();

            // add explicity roots
            let gc = MY_GC.read().unwrap();
            for objref in gc.as_ref().unwrap().roots.iter() {
                roots.push(*objref);
            }

            // scan its stack
            let mut thread_roots = stack_scan();
            roots.append(&mut thread_roots);
        }
187
188
189
190
191
192
193
194
        
        // wait for all mutators to be blocked
        let &(ref lock, ref cvar) = &*STW_COND.clone();
        let mut count = 0;
        
        trace!("expect {} mutators to park", *N_MUTATORS.read().unwrap() - 1);
        while count < *N_MUTATORS.read().unwrap() - 1 {
            let new_count = {*lock.lock().unwrap()};
195
            if new_count != count {
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
                count = new_count;
                trace!("count = {}", count);
            }
        }
        
        trace!("everyone stopped, gc will start");
        
        // roots->trace->sweep
        gc();
        
        // mutators will resume
        CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
        for mut t in MUTATORS.write().unwrap().iter_mut() {
            if t.is_some() {
                let t_mut = t.as_mut().unwrap();
                t_mut.set_take_yield(false);
                t_mut.set_still_blocked(false);
            }
        }
        // every mutator thread will reset themselves, so only reset current mutator here
216
        mutator.reset_after_gc();
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245

        // resume
        {
            let mut count = lock.lock().unwrap();
            *count = 0;
            cvar.notify_all();
        }
    }
}

fn block_current_thread(mutator: &mut ImmixMutatorLocal) {
    trace!("Mutator{} blocked", mutator.id());
    
    let &(ref lock, ref cvar) = &*STW_COND.clone();
    let mut count = lock.lock().unwrap();
    *count += 1;
    
    mutator.global.set_still_blocked(true);
    
    while mutator.global.is_still_blocked() {
        count = cvar.wait(count).unwrap();
    }
    
    trace!("Mutator{} unblocked", mutator.id());
}

pub static GC_COUNT : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;

fn gc() {
246
247
248
249
    if ! ENABLE_GC.load(Ordering::SeqCst) {
        panic!("Triggering GC when GC is disabled");
    }

250
251
252
253
254
255
    GC_COUNT.store(GC_COUNT.load(atomic::Ordering::SeqCst) + 1, atomic::Ordering::SeqCst);
    
    trace!("GC starts");
    
    // creates root deque
    let mut roots : &mut Vec<ObjectReference> = &mut ROOTS.write().unwrap();
256
    trace!("total roots: {}", roots.len());
257
258
259
    
    // mark & trace
    {
qinsoon's avatar
qinsoon committed
260
261
262
        let gccontext_guard = MY_GC.read().unwrap();
        let gccontext = gccontext_guard.as_ref().unwrap();
        let (immix_space, lo_space) = (&gccontext.immix_space, &gccontext.lo_space);
263
264
265
266
267
268
269
270
        
        start_trace(&mut roots, immix_space.clone(), lo_space.clone());
    }
    
    trace!("trace done");
    
    // sweep
    {
qinsoon's avatar
qinsoon committed
271
272
        let gccontext_guard = MY_GC.read().unwrap();
        let gccontext = gccontext_guard.as_ref().unwrap();
273

qinsoon's avatar
qinsoon committed
274
        let ref immix_space = gccontext.immix_space;
275
        immix_space.sweep();
276

qinsoon's avatar
qinsoon committed
277
        let ref lo_space = gccontext.lo_space;
278
        lo_space.sweep();
279
280
281
282
283
284
285
286
287
288
289
290
291
    }
    
    objectmodel::flip_mark_state();
    trace!("GC finishes");
}

pub const MULTI_THREAD_TRACE_THRESHOLD : usize = 10;

pub const PUSH_BACK_THRESHOLD : usize = 50;
pub static GC_THREADS : atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;

#[allow(unused_variables)]
#[inline(never)]
292
pub fn start_trace(work_stack: &mut Vec<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
    // creates root deque
    let (mut worker, stealer) = deque();
    
    while !work_stack.is_empty() {
        worker.push(work_stack.pop().unwrap());
    }

    loop {
        let (sender, receiver) = channel::<ObjectReference>();        
        
        let mut gc_threads = vec![];
        for _ in 0..GC_THREADS.load(atomic::Ordering::SeqCst) {
            let new_immix_space = immix_space.clone();
            let new_lo_space = lo_space.clone();
            let new_stealer = stealer.clone();
            let new_sender = sender.clone();
            let t = thread::spawn(move || {
                start_steal_trace(new_stealer, new_sender, new_immix_space, new_lo_space);
            });
            gc_threads.push(t);
        }
        
        // only stealers own sender, when all stealers quit, the following loop finishes
        drop(sender);
        
        loop {
            let recv = receiver.recv();
            match recv {
                Ok(obj) => worker.push(obj),
                Err(_) => break
            }
        }
        
        match worker.try_pop() {
            Some(obj_ref) => worker.push(obj_ref),
            None => break
        }
    }
}

#[allow(unused_variables)]
334
fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<ObjectReference>, immix_space: Arc<ImmixSpace>, lo_space: Arc<FreeListSpace>) {
335
336
337
    use objectmodel;
    
    let mut local_queue = vec![];
338
    let mark_state = objectmodel::load_mark_state();
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
    
    loop {
        let work = {
            if !local_queue.is_empty() {
                local_queue.pop().unwrap()
            } else {
                let work = stealer.steal();
                match work {
                    Steal::Empty => return,
                    Steal::Abort => continue,
                    Steal::Data(obj) => obj
                }
            }
        };
        
354
        steal_trace_object(work, &mut local_queue, &job_sender, mark_state, &immix_space, &lo_space);
355
356
357
358
    }
} 

#[inline(always)]
359
#[cfg(feature = "use-sidemap")]
360
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
qinsoon's avatar
qinsoon committed
361
362
    if cfg!(debug_assertions) {
        // check if this object in within the heap, if it is an object
363
        if !immix_space.is_valid_object(obj.to_address()) && !lo_space.is_valid_object(obj.to_address()){
qinsoon's avatar
qinsoon committed
364
365
            use std::process;
            
qinsoon's avatar
qinsoon committed
366
367
368
            println!("trying to trace an object that is not valid");
            println!("address: 0x{:x}", obj);
            println!("---");
369
370
            println!("immix space: {}", immix_space);
            println!("lo space: {}", lo_space);
qinsoon's avatar
qinsoon committed
371
            
qinsoon's avatar
qinsoon committed
372
373
            println!("invalid object during tracing");
            process::exit(101);
qinsoon's avatar
qinsoon committed
374
375
        }
    }
376
377
378
    
    let addr = obj.to_address();
    
379
380
381
382
383
384
385
386
387
388
389
    let (alloc_map, space_start) = if immix_space.addr_in_space(addr) {
        // mark object
        objectmodel::mark_as_traced(immix_space.trace_map(), immix_space.start(), obj, mark_state);

        // mark line
        immix_space.line_mark_table.mark_line_live(addr);

        (immix_space.alloc_map(), immix_space.start())
    } else if lo_space.addr_in_space(addr) {
        // mark object
        objectmodel::mark_as_traced(lo_space.trace_map(), lo_space.start(), obj, mark_state);
390
        trace!("mark object @ {} to {}", obj, mark_state);
391
392

        (lo_space.alloc_map(), lo_space.start())
393
    } else {
394
395
396
397
398
399
        println!("unexpected address: {}", addr);
        println!("immix space: {}", immix_space);
        println!("lo space   : {}", lo_space);

        panic!("error during tracing object")
    };
400
401
402
    
    let mut base = addr;
    loop {
403
        let value = objectmodel::get_ref_byte(alloc_map, space_start, obj);
404
        let (ref_bits, short_encode) = (bit_utils::lower_bits_u8(value, objectmodel::REF_BITS_LEN), bit_utils::test_nth_bit_u8(value, objectmodel::SHORT_ENCODE_BIT));
405
        match ref_bits {
406
407
408
            0b0000_0000 => {

            },
409
            0b0000_0001 => {
410
                steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
411
412
            },            
            0b0000_0011 => {
413
414
                steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
415
416
            },
            0b0000_1111 => {
417
418
419
420
                steal_process_edge(base, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 16,local_queue, job_sender, mark_state, immix_space, lo_space);
                steal_process_edge(base, 24,local_queue, job_sender, mark_state, immix_space, lo_space);
421
422
            },            
            _ => {
423
424
                error!("unexpected ref_bits patterns: {:b}", ref_bits);
                unimplemented!()
425
426
            }
        }
427

428
429
430
        if short_encode {
            return;
        } else {
qinsoon's avatar
qinsoon committed
431
            base = base.plus(objectmodel::REF_BITS_LEN * POINTER_SIZE);
432
433
434
435
436
        } 
    }
}

#[inline(always)]
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
    if cfg!(debug_assertions) {
        // check if this object in within the heap, if it is an object
        if !immix_space.is_valid_object(obj.to_address()) && !lo_space.is_valid_object(obj.to_address()){
            use std::process;

            println!("trying to trace an object that is not valid");
            println!("address: 0x{:x}", obj);
            println!("---");
            println!("immix space: {}", immix_space);
            println!("lo space: {}", lo_space);

            println!("invalid object during tracing");
            process::exit(101);
        }
    }

    let addr = obj.to_address();

    // mark object
    objectmodel::mark_as_traced(obj, mark_state);

    if immix_space.addr_in_space(addr) {
        // mark line
        immix_space.line_mark_table.mark_line_live(addr);
    } else if lo_space.addr_in_space(addr) {
        // do nothing
    } else {
        println!("unexpected address: {}", addr);
        println!("immix space: {}", immix_space);
        println!("lo space   : {}", lo_space);

        panic!("error during tracing object")
    }

473
474
475
    // this part of code has some duplication with code in objectdump
    // FIXME: remove the duplicate code - use 'Tracer' trait

476
    let hdr = unsafe {(addr + objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501

    if objectmodel::header_is_fix_size(hdr) {
        // fix sized type
        if objectmodel::header_has_ref_map(hdr) {
            // has ref map
            let ref_map = objectmodel::header_get_ref_map(hdr);

            match ref_map {
                0 => {

                },
                0b0000_0001 => {
                    steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                }
                0b0000_0011 => {
                    steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
                },
                0b0000_1111 => {
                    steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 16,local_queue, job_sender, mark_state, immix_space, lo_space);
                    steal_process_edge(addr, 24,local_queue, job_sender, mark_state, immix_space, lo_space);
                },
                _ => {
502
503
504
505
506
507
508
509
510
511
512
513
                    warn!("ref bits fall into slow path: {:b}", ref_map);

                    let mut i = 0;
                    while i < objectmodel::REF_MAP_LENGTH {
                        let has_ref : bool = ((ref_map >> i) & 1) == 1;

                        if has_ref {
                            steal_process_edge(addr, i * POINTER_SIZE, local_queue, job_sender, mark_state, immix_space, lo_space);
                        }

                        i += 1;
                    }
514
515
516
517
                }
            }
        } else {
            // by type ID
518
519
520
521
522
523
524
525
            let gctype_id = objectmodel::header_get_gctype_id(hdr);

            let gc_lock = MY_GC.read().unwrap();
            let gctype : Arc<GCType> = gc_lock.as_ref().unwrap().gc_types[gctype_id as usize].clone();

            for offset in gctype.gen_ref_offsets() {
                steal_process_edge(addr, offset, local_queue, job_sender, mark_state, immix_space, lo_space);
            }
526
527
528
        }
    } else {
        // hybrids
529
        let gctype_id = objectmodel::header_get_gctype_id(hdr);
qinsoon's avatar
qinsoon committed
530
        let var_length = objectmodel::header_get_hybrid_length(hdr);
531
532
533
534

        let gc_lock = MY_GC.read().unwrap();
        let gctype : Arc<GCType> = gc_lock.as_ref().unwrap().gc_types[gctype_id as usize].clone();

qinsoon's avatar
qinsoon committed
535
        for offset in gctype.gen_hybrid_ref_offsets(var_length) {
536
537
            steal_process_edge(addr, offset, local_queue, job_sender, mark_state, immix_space, lo_space);
        }
538
539
540
541
542
    }
}

#[inline(always)]
#[cfg(feature = "use-sidemap")]
543
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
qinsoon's avatar
qinsoon committed
544
545
546
547
    let field_addr = base.plus(offset);
    let edge = unsafe{field_addr.load::<ObjectReference>()};
    
    if cfg!(debug_assertions) {
qinsoon's avatar
qinsoon committed
548
        use std::process;        
qinsoon's avatar
qinsoon committed
549
        // check if this object in within the heap, if it is an object
550
        if !edge.to_address().is_zero() && !immix_space.is_valid_object(edge.to_address()) && !lo_space.is_valid_object(edge.to_address()) {
qinsoon's avatar
qinsoon committed
551
            println!("trying to follow an edge that is not a valid object");
qinsoon's avatar
qinsoon committed
552
            println!("edge address: 0x{:x} from 0x{:x}", edge, field_addr);
qinsoon's avatar
qinsoon committed
553
554
            println!("base address: 0x{:x}", base);
            println!("---");
555
556
557
558
559
560
561
562
563
564
565
            if immix_space.addr_in_space(base) {
                objectmodel::print_object(base, immix_space.start(), immix_space.trace_map(), immix_space.alloc_map());
                println!("---");
                println!("immix space:{}", immix_space);
            } else if lo_space.addr_in_space(base) {
                objectmodel::print_object(base, lo_space.start(), lo_space.trace_map(), lo_space.alloc_map());
                println!("---");
                println!("lo space:{}", lo_space);
            } else {
                println!("not in immix/lo space")
            }
qinsoon's avatar
qinsoon committed
566
            
qinsoon's avatar
qinsoon committed
567
568
            println!("invalid object during tracing");
            process::exit(101);
qinsoon's avatar
qinsoon committed
569
570
        }
    }
571

572
573
574
575
576
577
578
579
580
581
582
583
    if !edge.to_address().is_zero() {
        if immix_space.addr_in_space(edge.to_address()) && !objectmodel::is_traced(immix_space.trace_map(), immix_space.start(), edge, mark_state) {
            if local_queue.len() >= PUSH_BACK_THRESHOLD {
                job_sender.send(edge).unwrap();
            } else {
                local_queue.push(edge);
            }
        } else if lo_space.addr_in_space(edge.to_address()) && !objectmodel::is_traced(lo_space.trace_map(), lo_space.start(), edge, mark_state) {
            if local_queue.len() >= PUSH_BACK_THRESHOLD {
                job_sender.send(edge).unwrap();
            } else {
                local_queue.push(edge);
584
585
586
            }
        }
    }
587
588
589
590
591
}

#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
592
    let field_addr = base + offset;
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
    let edge = unsafe {field_addr.load::<ObjectReference>()};

    if cfg!(debug_assertions) {
        use std::process;
        // check if this object in within the heap, if it is an object
        if !edge.to_address().is_zero() && !immix_space.is_valid_object(edge.to_address()) && !lo_space.is_valid_object(edge.to_address()) {
            println!("trying to follow an edge that is not a valid object");
            println!("edge address: 0x{:x} from 0x{:x}", edge, field_addr);
            println!("base address: 0x{:x}", base);
            println!("---");
            if immix_space.addr_in_space(base) {
                objectmodel::print_object(base);
                objectmodel::print_object(edge.to_address());
                println!("---");
                println!("immix space:{}", immix_space);
            } else if lo_space.addr_in_space(base) {
                objectmodel::print_object(base);
                println!("---");
                println!("lo space:{}", lo_space);
            } else {
                println!("not in immix/lo space")
            }

            println!("invalid object during tracing");
            process::exit(101);
        }
    }

    if !edge.to_address().is_zero() {
        if !objectmodel::is_traced(edge, mark_state) {
            if local_queue.len() >= PUSH_BACK_THRESHOLD {
                job_sender.send(edge).unwrap();
            } else {
                local_queue.push(edge);
            }
        }
    }
630
}