Commit 65b51d5f authored by qinsoon's avatar qinsoon

[wip] make hybrid work

parent b89f2e04
......@@ -1327,8 +1327,18 @@ impl <'a> InstructionSelection {
let ty_align= ty_info.alignment;
let const_size = self.make_value_int_const(size as u64, vm);
self.emit_alloc_sequence(&ty_info, const_size, ty_align, node, f_content, f_context, vm);
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), &ty_info, const_size, ty_align, node, f_content, f_context, vm);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node), f_content, f_context, vm
);
}
Instruction_::NewHybrid(ref ty, var_len) => {
......@@ -1356,7 +1366,7 @@ impl <'a> InstructionSelection {
};
// actual size = fix_part_size + var_ty_size * len
let actual_size = {
let (actual_size, length) = {
let ops = inst.ops.read().unwrap();
let ref var_len = ops[var_len];
......@@ -1364,7 +1374,10 @@ impl <'a> InstructionSelection {
let var_len = self.node_iimm_to_i32(var_len);
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
self.make_value_int_const(actual_size as u64, vm)
(
self.make_value_int_const(actual_size as u64, vm),
self.make_value_int_const(var_len as u64, vm)
)
} else {
let tmp_actual_size = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
......@@ -1414,11 +1427,21 @@ impl <'a> InstructionSelection {
}
}
tmp_actual_size
(tmp_actual_size, tmp_var_len)
}
};
self.emit_alloc_sequence(&ty_info, actual_size, ty_align, node, f_content, f_context, vm);
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), &ty_info, actual_size, ty_align, node, f_content, f_context, vm);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
Some(node), f_content, f_context, vm
);
}
Instruction_::Throw(op_index) => {
......@@ -1482,13 +1505,13 @@ impl <'a> InstructionSelection {
})
}
fn emit_alloc_sequence (&mut self, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
fn emit_alloc_sequence (&mut self, tmp_allocator: P<Value>, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
if size.is_int_const() {
// size known at compile time, we can choose to emit alloc_small or large now
if size.extract_int_const() > mm::LARGE_OBJECT_THRESHOLD as u64 {
self.emit_alloc_sequence_large(ty_info, size, align, node, f_content, f_context, vm);
self.emit_alloc_sequence_large(tmp_allocator, ty_info, size, align, node, f_content, f_context, vm)
} else {
self.emit_alloc_sequence_small(ty_info, size, align, node, f_content, f_context, vm);
self.emit_alloc_sequence_small(tmp_allocator, ty_info, size, align, node, f_content, f_context, vm)
}
} else {
// size is unknown at compile time
......@@ -1509,7 +1532,7 @@ impl <'a> InstructionSelection {
self.backend.emit_jg(blk_alloc_large.clone());
// alloc small here
let tmp_res = self.emit_alloc_sequence_small(ty_info, size.clone(), align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence_small(tmp_allocator.clone(), ty_info, size.clone(), align, node, f_content, f_context, vm);
self.backend.emit_jmp(blk_alloc_large_end.clone());
......@@ -1523,20 +1546,20 @@ impl <'a> InstructionSelection {
self.backend.start_block(blk_alloc_large.clone());
self.backend.set_block_livein(blk_alloc_large.clone(), &vec![size.clone()]);
let tmp_res = self.emit_alloc_sequence_large(ty_info, size, align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence_large(tmp_allocator.clone(), ty_info, size, align, node, f_content, f_context, vm);
self.backend.end_block(blk_alloc_large.clone());
self.backend.set_block_liveout(blk_alloc_large.clone(), &vec![tmp_res]);
self.backend.set_block_liveout(blk_alloc_large.clone(), &vec![tmp_res.clone()]);
// alloc_large_end:
self.backend.start_block(blk_alloc_large_end.clone());
self.current_block = Some(blk_alloc_large_end.clone());
tmp_res
}
}
fn emit_alloc_sequence_large (&mut self, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let tmp_res = self.get_result_value(node);
fn emit_get_allocator (&mut self, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// ASM: %tl = get_thread_local()
let tmp_tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
......@@ -1545,6 +1568,12 @@ impl <'a> InstructionSelection {
let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_immoffset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
tmp_allocator
}
fn emit_alloc_sequence_large (&mut self, tmp_allocator: P<Value>, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let tmp_res = self.get_result_value(node);
// ASM: %tmp_res = call muentry_alloc_large(%allocator, size, align)
let const_align = self.make_value_int_const(align as u64, vm);
......@@ -1555,19 +1584,10 @@ impl <'a> InstructionSelection {
Some(node), f_content, f_context, vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node), f_content, f_context, vm
);
tmp_res
}
fn emit_alloc_sequence_small (&mut self, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_alloc_sequence_small (&mut self, tmp_allocator: P<Value>, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// emit immix allocation fast path
// ASM: %tl = get_thread_local()
......@@ -1635,9 +1655,6 @@ impl <'a> InstructionSelection {
self.backend.set_block_livein(slowpath.clone(), &vec![size.clone()]);
// arg1: allocator address
let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_immoffset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// arg2: size
// arg3: align
let const_align= self.make_value_int_const(align as u64, vm);
......@@ -1659,15 +1676,6 @@ impl <'a> InstructionSelection {
self.backend.start_block(allocend.clone());
self.current_block = Some(allocend.clone());
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node), f_content, f_context, vm
);
tmp_res
}
......
......@@ -6,7 +6,7 @@ pub mod code_emission;
use ast::types;
use utils::ByteSize;
use runtime::mm;
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern, RepeatingRefPattern};
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
pub type Word = usize;
pub const WORD_SIZE : ByteSize = 8;
......@@ -128,12 +128,11 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
size : ele_ty.size * len,
alignment : ele_ty.alignment,
struct_layout: None,
gc_type : mm::add_gc_type(GCType::new(GCTYPE_INIT_ID,
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
ele_ty.size * len,
ele_ty.alignment,
None,
Some(RepeatingRefPattern{
pattern: RefPattern::NestedType(vec![ele_ty.gc_type]),
Some(RefPattern::Repeat{
pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
count : len
})
))
......@@ -173,10 +172,8 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
}
// 2. fix gc type
let mut gctype = ret.gc_type.as_ref().clone();
gctype.repeat_refs = Some(RepeatingRefPattern {
pattern: RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]),
count : 0
});
gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
gctype.var_size = Some(var_ele_ty.size);
ret.gc_type = mm::add_gc_type(gctype);
ret
......@@ -246,7 +243,7 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
size : size,
alignment : struct_align,
struct_layout: Some(offsets),
gc_type : mm::add_gc_type(GCType::new(GCTYPE_INIT_ID,
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
size,
struct_align,
Some(if use_ref_offsets {
......@@ -256,8 +253,7 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
}
} else {
RefPattern::NestedType(gc_types)
}),
None))
})))
}
}
......@@ -277,6 +273,10 @@ pub struct BackendTypeInfo {
}
impl BackendTypeInfo {
pub fn is_hybrid(&self) -> bool {
self.gc_type.is_hybrid()
}
pub fn get_field_offset(&self, index: usize) -> ByteSize {
if self.struct_layout.is_some() {
let layout = self.struct_layout.as_ref().unwrap();
......
......@@ -11,63 +11,123 @@ pub const GCTYPE_INIT_ID: u32 = u32::MAX;
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct GCType {
pub id: u32,
pub size: ByteSize,
alignment: ByteSize,
pub non_repeat_refs: Option<RefPattern>,
pub repeat_refs : Option<RepeatingRefPattern>,
pub alignment: ByteSize,
pub fix_size: ByteSize,
pub fix_refs: Option<RefPattern>,
pub var_refs: Option<RefPattern>,
pub var_size: Option<ByteSize>
}
impl GCType {
pub fn new(id: u32, size: ByteSize, alignment: ByteSize, non_repeat_refs: Option<RefPattern>, repeat_refs: Option<RepeatingRefPattern>) -> GCType {
pub fn new_fix(id: u32, size: ByteSize, alignment: ByteSize, fix_refs: Option<RefPattern>) -> GCType {
GCType {
id: id,
alignment: objectmodel::check_alignment(alignment),
fix_refs: fix_refs,
fix_size: size,
var_refs: None,
var_size: None
}
}
pub fn new_hybrid(id: u32, size: ByteSize, alignment: ByteSize, fix_refs: Option<RefPattern>, var_refs: Option<RefPattern>, var_size: ByteSize) -> GCType {
GCType {
id: id,
size: size,
alignment: objectmodel::check_alignment(alignment),
non_repeat_refs: non_repeat_refs,
repeat_refs: repeat_refs
fix_refs: fix_refs,
fix_size: size,
var_refs: var_refs,
var_size: Some(var_size)
}
}
pub fn new_noreftype(size: ByteSize, align: ByteSize) -> GCType {
GCType {
id: GCTYPE_INIT_ID,
size: size,
alignment: align,
non_repeat_refs: None,
repeat_refs : None,
fix_refs: None,
fix_size: size,
var_refs: None,
var_size: None,
}
}
pub fn new_reftype() -> GCType {
GCType {
id: GCTYPE_INIT_ID,
size: POINTER_SIZE,
alignment: POINTER_SIZE,
non_repeat_refs: Some(RefPattern::Map{
fix_refs: Some(RefPattern::Map{
offsets: vec![0],
size: POINTER_SIZE
}),
repeat_refs: None
fix_size: POINTER_SIZE,
var_refs: None,
var_size: None
}
}
#[inline(always)]
pub fn is_hybrid(&self) -> bool {
self.var_size.is_some()
}
pub fn size(&self) -> ByteSize {
self.fix_size
}
pub fn size_hybrid(&self, length: u32) -> ByteSize {
assert!(self.var_size.is_some());
self.fix_size + self.var_size.unwrap() * (length as usize)
}
#[allow(unused_assignments)]
pub fn gen_ref_offsets(&self) -> Vec<ByteSize> {
let mut ret = vec![];
let mut cur_offset = 0;
match self.non_repeat_refs {
match self.fix_refs {
Some(ref pattern) => {
cur_offset = pattern.append_offsets(cur_offset, &mut ret);
}
None => {}
}
if self.repeat_refs.is_some() {
let repeat_refs = self.repeat_refs.as_ref().unwrap();
ret
}
pub fn gen_hybrid_ref_offsets(&self, length: u32) -> Vec<ByteSize> {
debug_assert!(self.is_hybrid());
let mut ret = vec![];
let mut cur_offset = 0;
cur_offset = repeat_refs.append_offsets(cur_offset, &mut ret);
// fix part
match self.fix_refs {
Some(ref pattern) => {
cur_offset = pattern.append_offsets(cur_offset, &mut ret);
},
None => {}
}
// var part
if self.var_refs.is_some() {
let ref var_part = self.var_refs.as_ref().unwrap();
for _ in 0..length {
cur_offset = var_part.append_offsets(cur_offset, &mut ret);
}
}
ret
......@@ -80,10 +140,30 @@ pub enum RefPattern {
offsets: Vec<ByteSize>,
size : usize
},
NestedType(Vec<Arc<GCType>>)
NestedType(Vec<Arc<GCType>>),
Repeat{
pattern: Box<RefPattern>,
count: usize
}
}
impl RefPattern {
pub fn size(&self) -> ByteSize {
match self {
&RefPattern::Map {size, ..} => size,
&RefPattern::NestedType(ref vec) => {
let mut size = 0;
for ty in vec.iter() {
size += ty.size();
}
size
},
&RefPattern::Repeat{ref pattern, count} => {
pattern.size() * count
}
}
}
pub fn append_offsets(&self, base: ByteSize, vec: &mut Vec<ByteSize>) -> ByteSize {
match self {
&RefPattern::Map{ref offsets, size} => {
......@@ -102,30 +182,21 @@ impl RefPattern {
vec.append(&mut nested_offset);
cur_base += ty.size;
cur_base += ty.size();
}
cur_base
}
}
}
}
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct RepeatingRefPattern {
pub pattern: RefPattern,
pub count: usize
}
},
&RefPattern::Repeat{ref pattern, count} => {
let mut cur_base = base;
impl RepeatingRefPattern {
pub fn append_offsets(&self, base: ByteSize, vec: &mut Vec<ByteSize>) -> ByteSize {
let mut cur_base = base;
for _ in 0..count {
cur_base = pattern.append_offsets(cur_base, vec);
}
for _ in 0..self.count {
cur_base = self.pattern.append_offsets(cur_base, vec);
cur_base
}
}
cur_base
}
}
......@@ -139,40 +210,49 @@ mod tests {
// linked list: struct {ref, int64}
let a = GCType{
id: 0,
size: 16,
alignment: 8,
non_repeat_refs: Some(RefPattern::Map{
fix_size: 16,
fix_refs: Some(RefPattern::Map{
offsets: vec![0],
size: 16
}),
repeat_refs : None
var_size: None,
var_refs: None
};
// array of struct {ref, int64} with length 10
let b = GCType {
id: 1,
size: 160,
alignment: 8,
non_repeat_refs: None,
repeat_refs : Some(RepeatingRefPattern {
pattern: RefPattern::Map{
fix_size: 160,
fix_refs: Some(RefPattern::Repeat {
pattern: Box::new(RefPattern::Map{
offsets: vec![0],
size : 16
},
count : 10
}),
count: 10
}),
var_size: None,
var_refs: None
};
// array(10) of array(10) of struct {ref, int64}
let c = GCType {
id: 2,
size: 1600,
alignment: 8,
non_repeat_refs: None,
repeat_refs : Some(RepeatingRefPattern {
pattern: RefPattern::NestedType(vec![Arc::new(b.clone()).clone()]),
fix_size: 1600,
fix_refs: Some(RefPattern::Repeat {
pattern: Box::new(RefPattern::NestedType(vec![Arc::new(b.clone()).clone()])),
count : 10
})
}),
var_size: None,
var_refs: None
};
vec![a, b, c]
......@@ -183,6 +263,51 @@ mod tests {
create_types();
}
#[test]
fn test_hybrid_type() {
// hybrid { fix: ref, int } { var: int }
let a = GCType {
id: 10,
alignment: 8,
fix_size: 16,
fix_refs: Some(RefPattern::Map {
offsets: vec![0],
size: 16
}),
var_size: Some(8),
var_refs: None
};
assert_eq!(a.gen_hybrid_ref_offsets(5), vec![0]);
assert_eq!(a.size_hybrid(5), 56);
}
#[test]
fn test_hybrid_type2() {
// hybrid { fix: ref, int } { var: ref }
let a = GCType {
id: 10,
alignment: 8,
fix_size: 16,
fix_refs: Some(RefPattern::Map {
offsets: vec![0],
size: 16
}),
var_size: Some(8),
var_refs: Some(RefPattern::Map {
offsets: vec![0],
size: 8
})
};
assert_eq!(a.gen_hybrid_ref_offsets(5), vec![0, 16, 24, 32, 40, 48]);
assert_eq!(a.size_hybrid(5), 56);
}
#[test]
fn test_ref_offsets() {
let vec = create_types();
......@@ -193,10 +318,13 @@ mod tests {
let int = GCType {
id: 3,
size: 8,
alignment: 8,
non_repeat_refs: None,
repeat_refs: None
fix_size: 8,
fix_refs: None,
var_size: None,
var_refs: None
};
assert_eq!(int.gen_ref_offsets(), vec![]);
......
......@@ -91,13 +91,14 @@ impl HeapDump {
ObjectDump {
reference_addr: obj,
mem_start : hdr_addr,
mem_size : gctype.size + objectmodel::OBJECT_HEADER_SIZE,
mem_size : gctype.size() + objectmodel::OBJECT_HEADER_SIZE,
reference_offsets: gctype.gen_ref_offsets()
}
}
} else {
// hybrids - same as above
let gctype_id = objectmodel::header_get_gctype_id(hdr);
let gctype_id = objectmodel::header_get_gctype_id(hdr);
let var_length = objectmodel::header_get_hybrid_length(hdr);
trace!("var sized, type id as {}", gctype_id);
......@@ -107,8 +108,8 @@ impl HeapDump {
ObjectDump {
reference_addr: obj,
mem_start : hdr_addr,
mem_size : gctype.size + objectmodel::OBJECT_HEADER_SIZE,
reference_offsets: gctype.gen_ref_offsets()
mem_size : gctype.size_hybrid(var_length) + objectmodel::OBJECT_HEADER_SIZE,
reference_offsets: gctype.gen_hybrid_ref_offsets(var_length)
}
}
}
......
......@@ -496,11 +496,12 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
} else {
// hybrids
let gctype_id = objectmodel::header_get_gctype_id(hdr);
let var_length = objectmodel::header_get_hybrid_length(hdr);
let gc_lock = MY_GC.read().unwrap();
let gctype : Arc<GCType> = gc_lock.as_ref().unwrap().gc_types[gctype_id as usize].clone();
for offset in gctype.gen_ref_offsets() {
for offset in gctype.gen_hybrid_ref_offsets(var_length) {
steal_process_edge(addr, offset, local_queue, job_sender, mark_state, immix_space, lo_space);
}
}
......
......@@ -185,7 +185,6 @@ impl ImmixMutatorLocal {
unimplemented!()
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn init_object(&mut self, addr: Address, encode: u64) {
......@@ -193,6 +192,20 @@ impl ImmixMutatorLocal {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
pub fn init_hybrid(&mut self, addr: Address, encode: u64, len: u64) {
unimplemented!()
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn init_hybrid(&mut self, addr: Address, encode: u64, len: u64) {
let encode = encode | ((len << objectmodel::SHR_HYBRID_LENGTH) & objectmodel::MASK_HYBRID_LENGTH);
unsafe {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
#[inline(never)]
pub fn try_alloc_from_local(&mut self, size : usize, align: usize) -> Address {
......
......@@ -91,7 +91,11 @@ pub extern fn get_gc_type_encode(id: u32) -> u64 {
let gc_lock = MY_GC.read().unwrap();
let ref gctype = gc_lock.as_ref().unwrap().gc_types[id as usize];
objectmodel::gen_gctype_encode(gctype)
if gctype.is_hybrid() {
objectmodel::gen_hybrid_gctype_encode(gctype, 0) // fake length
} else {
objectmodel::gen_gctype_encode(gctype)
}
}
#[no_mangle]
......@@ -189,6 +193,12 @@ pub extern fn muentry_init_object(mutator: *mut ImmixMutatorLocal, obj: ObjectRe
unsafe {&mut *mutator}.init_object(obj.to_address(), encode);
}
#[no_mangle]
#[inline(never)]
pub extern fn muentry_init_hybrid(mutator: *mut ImmixMutatorLocal, obj: ObjectReference, encode: u64, length: u64) {
unsafe {&mut *mutator}.init_hybrid(obj.to_address(), encode, length);
}
#[no_mangle]
#[inline(never)]
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
......
......@@ -56,41 +56,47 @@ pub const MASK_OBJ_SIZE : u64 = 0x0FFFFFFF00000000u64;
pub const SHR_OBJ_SIZE : usize = 32;
pub fn gen_gctype_encode(ty: &GCType) -> u64 {
assert!(!ty.is_hybrid());
let mut ret = 0u64;
if ty.repeat_refs.is_some() {
// var sized
let len = ty.repeat_refs.as_ref().unwrap().count;
// fix sized
ret = ret | (1 << BIT_IS_FIX_SIZE);
// encode length
ret = ret | (( (len as u64) << SHR_HYBRID_LENGTH) & MASK_HYBRID_LENGTH);
// encode gc id
ret = ret | (ty.id as u64);
// encode ref map?
if ty.size() < REF_MAP_LENGTH * POINTER_SIZE {
// has ref map
ret = ret | (1 << BIT_HAS_REF_MAP);
// encode ref map
let offsets = ty.gen_ref_offsets();
let mut ref_map = 0;