Commit cae72305 authored by qinsoon's avatar qinsoon

[wip] properly deal with HEADER_SIZE and HEADER_OFFSET

parent d40b09d2
...@@ -1793,7 +1793,9 @@ impl <'a> InstructionSelection { ...@@ -1793,7 +1793,9 @@ impl <'a> InstructionSelection {
fn emit_alloc_sequence (&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> { fn emit_alloc_sequence (&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
if size.is_int_const() { if size.is_int_const() {
// size known at compile time, we can choose to emit alloc_small or large now // size known at compile time, we can choose to emit alloc_small or large now
if size.extract_int_const() > mm::LARGE_OBJECT_THRESHOLD as u64 { let size_i = size.extract_int_const();
if size_i + OBJECT_HEADER_SIZE as u64 > mm::LARGE_OBJECT_THRESHOLD as u64 {
self.emit_alloc_sequence_large(tmp_allocator, size, align, node, f_content, f_context, vm) self.emit_alloc_sequence_large(tmp_allocator, size, align, node, f_content, f_context, vm)
} else { } else {
self.emit_alloc_sequence_small(tmp_allocator, size, align, node, f_content, f_context, vm) self.emit_alloc_sequence_small(tmp_allocator, size, align, node, f_content, f_context, vm)
...@@ -1813,7 +1815,15 @@ impl <'a> InstructionSelection { ...@@ -1813,7 +1815,15 @@ impl <'a> InstructionSelection {
let blk_alloc_large = format!("{}_alloc_large", node.id()); let blk_alloc_large = format!("{}_alloc_large", node.id());
let blk_alloc_large_end = format!("{}_alloc_large_end", node.id()); let blk_alloc_large_end = format!("{}_alloc_large_end", node.id());
self.backend.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size); if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&size_with_hdr, &size);
self.backend.emit_add_r_imm(&size_with_hdr, OBJECT_HEADER_SIZE as i32);
self.backend.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size_with_hdr);
} else {
self.backend.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size);
}
self.backend.emit_jg(blk_alloc_large.clone()); self.backend.emit_jg(blk_alloc_large.clone());
self.finish_block(); self.finish_block();
...@@ -1900,7 +1910,7 @@ impl <'a> InstructionSelection { ...@@ -1900,7 +1910,7 @@ impl <'a> InstructionSelection {
// ASM: add %size, %start -> %end // ASM: add %size, %start -> %end
// or lea size(%start) -> %end // or lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm); let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
if size.is_int_const() { let size = if size.is_int_const() {
let mut offset = size.extract_int_const() as i32; let mut offset = size.extract_int_const() as i32;
if OBJECT_HEADER_SIZE != 0 { if OBJECT_HEADER_SIZE != 0 {
...@@ -1908,6 +1918,8 @@ impl <'a> InstructionSelection { ...@@ -1908,6 +1918,8 @@ impl <'a> InstructionSelection {
} }
self.emit_lea_base_immoffset(&tmp_end, &tmp_start, offset, vm); self.emit_lea_base_immoffset(&tmp_end, &tmp_start, offset, vm);
self.make_value_int_const(offset as u64, vm)
} else { } else {
self.backend.emit_mov_r_r(&tmp_end, &tmp_start); self.backend.emit_mov_r_r(&tmp_end, &tmp_start);
if OBJECT_HEADER_SIZE != 0 { if OBJECT_HEADER_SIZE != 0 {
...@@ -1915,7 +1927,9 @@ impl <'a> InstructionSelection { ...@@ -1915,7 +1927,9 @@ impl <'a> InstructionSelection {
self.backend.emit_add_r_imm(&size, OBJECT_HEADER_SIZE as i32); self.backend.emit_add_r_imm(&size, OBJECT_HEADER_SIZE as i32);
} }
self.backend.emit_add_r_r(&tmp_end, &size); self.backend.emit_add_r_r(&tmp_end, &size);
}
size
};
// check with limit // check with limit
// ASM: cmp %end, [%tl + allocator_offset + limit_offset] // ASM: cmp %end, [%tl + allocator_offset + limit_offset]
...@@ -1976,6 +1990,11 @@ impl <'a> InstructionSelection { ...@@ -1976,6 +1990,11 @@ impl <'a> InstructionSelection {
Some(node), f_content, f_context, vm Some(node), f_content, f_context, vm
); );
if OBJECT_HEADER_OFFSET != 0 {
// ASM: lea -HEADER_OFFSET(%res) -> %result
self.emit_lea_base_immoffset(&tmp_res, &tmp_res, - OBJECT_HEADER_OFFSET as i32, vm);
}
// end block (no liveout other than result) // end block (no liveout other than result)
self.backend.end_block(slowpath.clone()); self.backend.end_block(slowpath.clone());
self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]); self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
......
...@@ -278,8 +278,8 @@ impl ImmixMutatorLocal { ...@@ -278,8 +278,8 @@ impl ImmixMutatorLocal {
self.line = 0; self.line = 0;
trace!("Mutator{}: slowpath: new block starting from 0x{:x}", self.id, self.cursor); trace!("Mutator{}: slowpath: new block starting from 0x{:x}", self.id, self.cursor);
return self.alloc(size, align); return self.try_alloc_from_local(size, align);
}, },
None => {continue; } None => {continue; }
} }
......
...@@ -191,6 +191,7 @@ pub extern fn yieldpoint_slow(mutator: *mut ImmixMutatorLocal) { ...@@ -191,6 +191,7 @@ pub extern fn yieldpoint_slow(mutator: *mut ImmixMutatorLocal) {
#[no_mangle] #[no_mangle]
#[inline(always)] #[inline(always)]
/// size doesn't include HEADER_SIZE, return value is offset by HEADER_OFFSET
pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference { pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let addr = unsafe {&mut *mutator}.alloc(size, align); let addr = unsafe {&mut *mutator}.alloc(size, align);
unsafe {addr.to_object_reference()} unsafe {addr.to_object_reference()}
...@@ -210,6 +211,8 @@ pub extern fn muentry_init_hybrid(mutator: *mut ImmixMutatorLocal, obj: ObjectRe ...@@ -210,6 +211,8 @@ pub extern fn muentry_init_hybrid(mutator: *mut ImmixMutatorLocal, obj: ObjectRe
#[no_mangle] #[no_mangle]
#[inline(never)] #[inline(never)]
/// this function is supposed to be called by an inlined fastpath
/// size _includes_ HEADER_SIZE, return value is _NOT_ offset by HEADER_OFFSET
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference { pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let ret = unsafe {&mut *mutator}.try_alloc_from_local(size, align); let ret = unsafe {&mut *mutator}.try_alloc_from_local(size, align);
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}", mutator, size, align, ret); trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}", mutator, size, align, ret);
...@@ -218,6 +221,7 @@ pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, a ...@@ -218,6 +221,7 @@ pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, a
} }
#[no_mangle] #[no_mangle]
/// size doesn't include HEADER_SIZE, return value is offset by HEADER_OFFSET
pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference { pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone()); let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {}) = {}", mutator, size, align, ret); trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {}) = {}", mutator, size, align, ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment