Commit 15a77cb1 authored by qinsoon's avatar qinsoon

add an implementation using header, gcbench not working

parent 2c140a07
Pipeline #237 failed with stage
in 24 minutes and 25 seconds
......@@ -7,6 +7,10 @@ build = "build.rs"
[lib]
crate-type = ["rlib"]
[features]
default = []
use-sidemap = []
[build-dependencies]
gcc = "0.3"
......
......@@ -66,6 +66,8 @@ impl FreeListSpace {
// every block is 'BLOCK_SIZE' aligned, usually we do not need to align
assert!(BLOCK_SIZE % align == 0);
let size = size + objectmodel::OBJECT_HEADER_SIZE;
let blocks_needed = if size % BLOCK_SIZE == 0 {
size / BLOCK_SIZE
} else {
......@@ -90,16 +92,40 @@ impl FreeListSpace {
trace!("after allocation, space: {}", self);
}
res
if res.is_zero() {
res
} else {
res.offset(-objectmodel::OBJECT_HEADER_OFFSET)
}
}
pub fn init_object(&self, addr: Address, encode: u8) {
#[cfg(feature = "use-sidemap")]
pub fn init_object(&self, addr: Address, encode: u64) {
unsafe {
*self.alloc_map().offset((addr.diff(self.start) >> LOG_POINTER_SIZE) as isize) = encode;
*self.alloc_map().offset((addr.diff(self.start) >> LOG_POINTER_SIZE) as isize) = encode as u8;
objectmodel::mark_as_untraced(self.trace_map(), self.start, addr, objectmodel::load_mark_state());
}
}
#[cfg(not(feature = "use-sidemap"))]
pub fn init_object(&self, addr: Address, encode: u64) {
unsafe {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
objectmodel::is_traced(self.trace_map(), self.start, unsafe { addr.to_object_reference() }, mark_state)
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
objectmodel::is_traced(unsafe{addr.to_object_reference()}, mark_state)
}
pub fn sweep(&self) {
trace!("going to sweep treadmill space");
if TRACE_TREADMILL {
......@@ -127,7 +153,9 @@ impl FreeListSpace {
nodes_scanned += 1;
if objectmodel::is_traced(trace_map, self.start, unsafe { addr.to_object_reference() }, mark_state) {
let traced = self.is_traced(addr, mark_state);
if traced {
// this object is alive
alive_nodes_scanned += 1;
......@@ -246,6 +274,16 @@ impl Treadmill {
fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
let ref from_space = self.spaces[self.from];
if self.from_space_next + n_blocks <= from_space.len() {
// zero blocks
for i in 0..n_blocks {
let block_i = self.from_space_next + i;
let block_start = from_space[block_i].payload;
Treadmill::zeroing_block(block_start);
}
// return first block
// FIXME: the blocks may not be contiguous!!! we cannot allocate multiple blocks
let ret = from_space[self.from_space_next].payload;
self.from_space_next += n_blocks;
......@@ -254,6 +292,14 @@ impl Treadmill {
unsafe {Address::zero()}
}
}
fn zeroing_block(start: Address) {
use utils::mem::memsec;
unsafe {
memsec::memzero(start.to_ptr_mut::<u8>(), BLOCK_SIZE);
}
}
}
impl fmt::Display for Treadmill {
......@@ -308,7 +354,7 @@ mod tests {
let space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..10 {
let ret = space.alloc(BLOCK_SIZE, 8);
let ret = space.alloc(BLOCK_SIZE / 2, 8);
println!("Allocation{}: {}", i, ret);
}
}
......@@ -329,7 +375,7 @@ mod tests {
let space = FreeListSpace::new(BLOCK_SIZE * 10);
for i in 0..20 {
let ret = space.alloc(BLOCK_SIZE, 8);
let ret = space.alloc(BLOCK_SIZE / 2, 8);
println!("Allocation{}: {}", i, ret);
}
}
......
......@@ -122,14 +122,16 @@ pub fn sync_barrier(mutator: &mut ImmixMutatorLocal) {
// prepare the mutator for gc - return current block (if it has)
mutator.prepare_for_gc();
// scan its stack
let mut thread_roots = stack_scan();
ROOTS.write().unwrap().append(&mut thread_roots);
// user thread call back to prepare for gc
// USER_THREAD_PREPARE_FOR_GC.read().unwrap()();
if controller_id != NO_CONTROLLER {
// scan its stack
{
let mut thread_roots = stack_scan();
ROOTS.write().unwrap().append(&mut thread_roots);
}
// this thread will block
block_current_thread(mutator);
......@@ -138,6 +140,23 @@ pub fn sync_barrier(mutator: &mut ImmixMutatorLocal) {
} else {
// this thread is controller
// other threads should block
// init roots
{
let mut roots = ROOTS.write().unwrap();
// clear existing roots (roots from last gc)
roots.clear();
// add explicity roots
let gc = MY_GC.read().unwrap();
for objref in gc.as_ref().unwrap().roots.iter() {
roots.push(*objref);
}
// scan its stack
let mut thread_roots = stack_scan();
roots.append(&mut thread_roots);
}
// wait for all mutators to be blocked
let &(ref lock, ref cvar) = &*STW_COND.clone();
......@@ -203,6 +222,7 @@ fn gc() {
// creates root deque
let mut roots : &mut Vec<ObjectReference> = &mut ROOTS.write().unwrap();
trace!("total roots: {}", roots.len());
// mark & trace
{
......@@ -305,6 +325,7 @@ fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender:mpsc::Sender<
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
if cfg!(debug_assertions) {
// check if this object in within the heap, if it is an object
......@@ -349,7 +370,7 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
let mut base = addr;
loop {
let value = objectmodel::get_ref_byte(alloc_map, space_start, obj);
let (ref_bits, short_encode) = (bit_utils::lower_bits(value, objectmodel::REF_BITS_LEN), bit_utils::test_nth_bit(value, objectmodel::SHORT_ENCODE_BIT));
let (ref_bits, short_encode) = (bit_utils::lower_bits_u8(value, objectmodel::REF_BITS_LEN), bit_utils::test_nth_bit_u8(value, objectmodel::SHORT_ENCODE_BIT));
match ref_bits {
0b0000_0000 => {
......@@ -382,6 +403,84 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
if cfg!(debug_assertions) {
// check if this object in within the heap, if it is an object
if !immix_space.is_valid_object(obj.to_address()) && !lo_space.is_valid_object(obj.to_address()){
use std::process;
println!("trying to trace an object that is not valid");
println!("address: 0x{:x}", obj);
println!("---");
println!("immix space: {}", immix_space);
println!("lo space: {}", lo_space);
println!("invalid object during tracing");
process::exit(101);
}
}
let addr = obj.to_address();
// mark object
objectmodel::mark_as_traced(obj, mark_state);
if immix_space.addr_in_space(addr) {
// mark line
immix_space.line_mark_table.mark_line_live(addr);
} else if lo_space.addr_in_space(addr) {
// do nothing
} else {
println!("unexpected address: {}", addr);
println!("immix space: {}", immix_space);
println!("lo space : {}", lo_space);
panic!("error during tracing object")
}
let hdr = unsafe {addr.offset(objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
if objectmodel::header_is_fix_size(hdr) {
// fix sized type
if objectmodel::header_has_ref_map(hdr) {
// has ref map
let ref_map = objectmodel::header_get_ref_map(hdr);
match ref_map {
0 => {
},
0b0000_0001 => {
steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
}
0b0000_0011 => {
steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(addr, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
},
0b0000_1111 => {
steal_process_edge(addr, 0, local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(addr, 8, local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(addr, 16,local_queue, job_sender, mark_state, immix_space, lo_space);
steal_process_edge(addr, 24,local_queue, job_sender, mark_state, immix_space, lo_space);
},
_ => {
error!("unexpected ref_bits patterns: {:b}", ref_map);
unimplemented!()
}
}
} else {
// by type ID
unimplemented!()
}
} else {
// hybrids
unimplemented!()
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
let field_addr = base.plus(offset);
let edge = unsafe{field_addr.load::<ObjectReference>()};
......@@ -426,4 +525,47 @@ pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<Obj
}
}
}
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
let field_addr = base.plus(offset);
let edge = unsafe {field_addr.load::<ObjectReference>()};
if cfg!(debug_assertions) {
use std::process;
// check if this object in within the heap, if it is an object
if !edge.to_address().is_zero() && !immix_space.is_valid_object(edge.to_address()) && !lo_space.is_valid_object(edge.to_address()) {
println!("trying to follow an edge that is not a valid object");
println!("edge address: 0x{:x} from 0x{:x}", edge, field_addr);
println!("base address: 0x{:x}", base);
println!("---");
if immix_space.addr_in_space(base) {
objectmodel::print_object(base);
objectmodel::print_object(edge.to_address());
println!("---");
println!("immix space:{}", immix_space);
} else if lo_space.addr_in_space(base) {
objectmodel::print_object(base);
println!("---");
println!("lo space:{}", lo_space);
} else {
println!("not in immix/lo space")
}
println!("invalid object during tracing");
process::exit(101);
}
}
if !edge.to_address().is_zero() {
if !objectmodel::is_traced(edge, mark_state) {
if local_queue.len() >= PUSH_BACK_THRESHOLD {
job_sender.send(edge).unwrap();
} else {
local_queue.push(edge);
}
}
}
}
\ No newline at end of file
......@@ -144,7 +144,9 @@ impl ImmixMutatorLocal {
pub fn alloc(&mut self, size: usize, align: usize) -> Address {
let start = self.cursor.align_up(align);
let end = start.plus(size);
let size = size + objectmodel::OBJECT_HEADER_SIZE;
if end > self.limit {
let ret = self.try_alloc_from_local(size, align);
......@@ -156,7 +158,7 @@ impl ImmixMutatorLocal {
}
}
ret
ret.offset(-objectmodel::OBJECT_HEADER_OFFSET)
} else {
if cfg!(debug_assertions) {
if !start.is_aligned_to(align) {
......@@ -167,21 +169,25 @@ impl ImmixMutatorLocal {
}
self.cursor = end;
start
start.offset(-objectmodel::OBJECT_HEADER_OFFSET)
}
}
#[inline(always)]
pub fn init_object(&mut self, addr: Address, encode: u8) {
#[cfg(feature = "use-sidemap")]
pub fn init_object(&mut self, addr: Address, encode: u64) {
unsafe {
*self.alloc_map.offset((addr.diff(self.space_start) >> LOG_POINTER_SIZE) as isize) = encode;
*self.alloc_map.offset((addr.diff(self.space_start) >> LOG_POINTER_SIZE) as isize) = encode as u8;
objectmodel::mark_as_untraced(self.trace_map, self.space_start, addr, self.mark_state);
}
}
#[inline(never)]
pub fn init_object_no_inline(&mut self, addr: Address, encode: u8) {
self.init_object(addr, encode);
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn init_object(&mut self, addr: Address, encode: u64) {
unsafe {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
#[inline(never)]
......@@ -231,7 +237,10 @@ impl ImmixMutatorLocal {
let new_block : Option<Box<ImmixBlock>> = self.space.get_next_usable_block();
match new_block {
Some(b) => {
Some(mut b) => {
// zero the block
b.lazy_zeroing();
self.block = Some(b);
self.cursor = self.block().start();
self.limit = self.block().start();
......
......@@ -5,6 +5,7 @@ use common::AddressMap;
use heap::gc::malloc_zero;
use utils::mem::memmap;
use utils::mem::memsec;
use std::*;
use std::collections::LinkedList;
......@@ -62,6 +63,10 @@ impl LineMarkTable {
debug_assert!(index <= self.len);
unsafe {*self.ptr.offset(index as isize) = value};
}
pub fn index_to_address(&self, index: usize) -> Address {
self.space_start.plus(index << immix::LOG_BYTES_IN_LINE)
}
#[inline(always)]
pub fn mark_line_live(&self, addr: Address) {
......@@ -348,6 +353,20 @@ impl ImmixBlock {
}
i
}
pub fn lazy_zeroing(&mut self) {
let line_mark_table = self.line_mark_table();
for i in 0..line_mark_table.len {
if line_mark_table.get(i) == immix::LineMark::Free {
let line_start : Address = self.start.plus(i << immix::LOG_BYTES_IN_LINE);
// zero the line
unsafe {
memsec::memzero(line_start.to_ptr_mut::<u8>(), immix::BYTES_IN_LINE);
}
}
}
}
pub fn id(&self) -> usize {
self.id
......
......@@ -33,6 +33,7 @@ pub trait Space {
fn trace_map(&self) -> *mut u8;
#[inline(always)]
#[cfg(feature = "use-sidemap")]
fn is_valid_object(&self, addr: Address) -> bool {
let start = self.start();
let end = self.end();
......@@ -43,7 +44,33 @@ pub trait Space {
let index = (addr.diff(start) >> LOG_POINTER_SIZE) as isize;
if !bit_utils::test_nth_bit(unsafe {*self.alloc_map().offset(index)}, objectmodel::OBJ_START_BIT) {
// use side map
if !bit_utils::test_nth_bit_u8(unsafe { *self.alloc_map().offset(index) }, objectmodel::OBJ_START_BIT) {
return false;
}
if !addr.is_aligned_to(POINTER_SIZE) {
return false;
}
true
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
fn is_valid_object(&self, addr: Address) -> bool {
let start = self.start();
let end = self.end();
if addr >= end || addr < start {
return false;
}
let index = (addr.diff(start) >> LOG_POINTER_SIZE) as isize;
// use header
let hdr = unsafe {addr.offset(objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
if !objectmodel::header_is_object_start(hdr) {
return false;
}
......
......@@ -22,6 +22,8 @@ use heap::immix::ImmixMutatorLocal;
use heap::freelist;
use heap::freelist::FreeListSpace;
use utils::LinkedHashSet;
use std::fmt;
use std::sync::Arc;
use std::sync::RwLock;
......@@ -37,7 +39,8 @@ pub struct GC {
immix_space: Arc<ImmixSpace>,
lo_space : Arc<FreeListSpace>,
gc_types : Vec<Arc<GCType>>
gc_types : Vec<Arc<GCType>>,
roots : LinkedHashSet<ObjectReference>
}
impl fmt::Debug for GC {
......@@ -106,7 +109,8 @@ pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
immix_space: immix_space,
lo_space: lo_space,
gc_types: vec![]
gc_types: vec![],
roots : LinkedHashSet::new()
});
info!("heap is {} bytes (immix: {} bytes, lo: {} bytes) . ", immix_size + lo_size, immix_size, lo_size);
......@@ -132,6 +136,20 @@ extern "C" {
pub fn set_low_water_mark();
}
// explicitly control roots
#[no_mangle]
pub extern fn add_to_root(obj: ObjectReference) {
let mut gc = MY_GC.write().unwrap();
gc.as_mut().unwrap().roots.insert(obj);
}
#[no_mangle]
pub extern fn remove_root(obj: ObjectReference) {
let mut gc = MY_GC.write().unwrap();
gc.as_mut().unwrap().roots.remove(&obj);
}
#[no_mangle]
#[inline(always)]
pub extern fn yieldpoint(mutator: *mut ImmixMutatorLocal) {
......@@ -153,27 +171,29 @@ pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize)
#[no_mangle]
#[inline(always)]
pub extern fn init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u8) {
pub extern fn init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u64) {
unsafe {&mut *mutator}.init_object(obj.to_address(), encode);
}
#[no_mangle]
#[inline(never)]
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = unsafe {&mut *mutator}.try_alloc_from_local(size, align);
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}", mutator, size, align, ret);
unsafe {ret.to_object_reference()}
}
#[no_mangle]
pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {}) = {}", mutator, size, align, ret);
unsafe {ret.to_object_reference()}
}
#[no_mangle]
#[allow(unused_variables)]
pub extern fn muentry_init_large_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u8) {
pub extern fn muentry_init_large_object(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u64) {
MY_GC.read().unwrap().as_ref().unwrap().lo_space.init_object(obj.to_address(), encode);
}
\ No newline at end of file
/// * use 1 word (64bits) header
/// * header is before an object reference
/// * for fix-sized types
/// MSB 1 bit - is object start
/// 1 bit - trace bit
/// 1 bit - is fix-sized (set for fix-sized types)
/// 1 bit - is reference map encoded?
/// ... (unused)
/// 16 bits -
/// fix-sized with reference map
/// | start? | trace? | fix? | ref map? | (unused bits) ... | reference map (32bits) |
/// 1 1
/// fix-sized with ID
/// | start? | trace? | fix? | ref map? | (unused bits) ... | gc type ID (32bits) |
/// 1 0
/// var-sized
/// | start? | trace? | fix? | hybrid length (29bits ~ 500M) | gc type ID (32bits) |
/// 0
use utils::ByteSize;
use utils::ByteOffset;
use utils::bit_utils;
use utils::{Address, ObjectReference};
use utils::POINTER_SIZE;
pub const OBJECT_HEADER_SIZE : ByteSize = 8;
pub const OBJECT_HEADER_OFFSET : ByteOffset = - (OBJECT_HEADER_SIZE as ByteOffset);
pub const BIT_IS_OBJ_START : usize = 63;
pub const BIT_IS_TRACED : usize = 62;
pub const BIT_IS_FIX_SIZE : usize = 61;
pub const BIT_HAS_REF_MAP : usize = 60;
pub const MASK_REF_MAP : u64 = 0xFFFFFFFFu64;
pub const MASK_GCTYPE_ID : u64 = 0xFFFFFFFFu64;
pub const MASK_HYBRID_LENGTH: u64 = 0x1FFFFFFF00000000u64;
pub const SHR_HYBRID_LENGTH : usize = 32;
#[allow(unused_variables)]
pub fn print_object(obj: Address) {
let mut cursor = obj;
trace!("OBJECT 0x{:x}", obj);
let hdr = unsafe {cursor.offset(OBJECT_HEADER_OFFSET).load::<u64>()};
trace!("- is object start? {}", header_is_object_start(hdr));
trace!("- is traced? {}", header_is_traced(hdr));
if header_is_fix_size(hdr) {
trace!("- is fix sized? true");
if header_has_ref_map(hdr) {
trace!("- has ref map: {:b}", header_get_ref_map(hdr));
} else {
trace!("- has type ID: {}", header_get_gctype_id(hdr));
}
} else {
trace!("more info about hybrid, not implemented");
}
trace!("0x{:x} | val: 0x{:15x} | hdr: {:b}",
cursor, unsafe{cursor.load::<u64>()}, hdr);
cursor = cursor.plus(POINTER_SIZE);
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
}
#[inline(always)]
pub fn mark_as_traced(obj: ObjectReference, mark_state: u8) {
unsafe {
let hdr_addr = obj.to_address().offset(OBJECT_HEADER_OFFSET);
hdr_addr.store(bit_utils::set_nth_bit_u64(hdr_addr.load::<u64>(), BIT_IS_TRACED, mark_state));
}
}
#[inline(always)]
pub fn mark_as_untraced(addr: Address, mark_state: u8) {
unsafe {
let hdr_addr = addr.offset(OBJECT_HEADER_OFFSET);
hdr_addr.store(bit_utils::set_nth_bit_u64(hdr_addr.load::<u64>(), BIT_IS_TRACED, mark_state ^ 1));
}
}
#[inline(always)]
pub fn is_traced(obj: ObjectReference, mark_state: u8) -> bool {
unsafe {
let hdr = obj.to_address().offset(OBJECT_HEADER_OFFSET).load::<u64>();
bit_utils::test_nth_bit_u64(hdr, BIT_IS_TRACED)
}
}
#[inline(always)]
pub fn header_is_object_start(hdr: u64) -> bool {
bit_utils::test_nth_bit_u64(hdr, BIT_IS_OBJ_START)
}
#[inline(always)]
pub fn header_is_fix_size(hdr: u64) -> bool {
bit_utils::test_nth_bit_u64(hdr, BIT_IS_FIX_SIZE)
}
#[inline(always)]
pub fn header_is_traced(hdr: u64) -> bool {
bit_utils::test_nth_bit_u64(hdr, BIT_IS_TRACED)
}
#[inline(always)]
pub fn header_has_ref_map(hdr: u64) -> bool {
bit_utils::test_nth_bit_u64(hdr, BIT_HAS_REF_MAP)
}
#[inline(always)]
pub fn header_get_ref_map(hdr: u64) -> u32 {
(hdr & MASK_REF_MAP) as u32
}
#[inline(always)]
pub fn header_get_hybrid_length(hdr: u64) -> u32 {
((hdr & MASK_HYBRID_LENGTH) >> SHR_HYBRID_LENGTH) as u32
}
#[inline(always)]
pub fn header_get_gctype_id(hdr: u64) -> u32 {
(hdr & MASK_GCTYPE_ID) as u32
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fixsize_header_refmap() {
let hdr_bin = 0b10110000_00000000_00000000_00000000_00000000_00000000_00000000_00000011u64;
let hdr_hex = 0xb000000000000003u64;
println!("");
println!("binary: {:b}", hdr_bin);