GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 8825e1d3 authored by qinsoon's avatar qinsoon

for unknown size allocation, do calculation in runtime

parent 87048244
......@@ -3424,69 +3424,22 @@ impl<'a> InstructionSelection {
)
}
} else {
// size is unknown at compile time
// we need to emit both alloc small and alloc large,
// and it is decided at runtime
// emit: cmp size, THRESHOLD
// emit: jg ALLOC_LARGE
// emit: >> small object alloc
// emit: jmp ALLOC_LARGE_END
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(&node.name(), "alloc_large");
let blk_alloc_large_end = make_block_name(&node.name(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
// if the header size is not zero, we need to calculate a total size to alloc
let size_with_hdr = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_r(&size_with_hdr, &size);
self.backend
.emit_add_r_imm(&size_with_hdr, OBJECT_HEADER_SIZE as i32);
self.backend
.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size_with_hdr);
} else {
self.backend
.emit_cmp_imm_r(mm::LARGE_OBJECT_THRESHOLD as i32, &size);
}
self.backend.emit_jg(blk_alloc_large.clone());
self.finish_block();
let block_name = make_block_name(&node.name(), "allocsmall");
self.start_block(block_name);
// directly call 'alloc'
let tmp_res = self.get_result_value(node);
// alloc small here
self.emit_alloc_sequence_small(
tmp_allocator.clone(),
size.clone(),
align,
node,
f_content,
f_context,
vm
);
self.backend.emit_jmp(blk_alloc_large_end.clone());
// finishing current block
self.finish_block();
let const_align = self.make_int_const(align as u64, vm);
// alloc_large:
self.start_block(blk_alloc_large.clone());
self.emit_alloc_sequence_large(
tmp_allocator.clone(),
size,
align,
node,
self.emit_runtime_entry(
&entrypoints::ALLOC_ANY,
vec![tmp_allocator.clone(), size.clone(), const_align],
Some(vec![tmp_res.clone()]),
Some(node),
f_content,
f_context,
vm
);
self.finish_block();
// alloc_large_end:
self.start_block(blk_alloc_large_end.clone());
self.get_result_value(node)
tmp_res
}
}
......
......@@ -267,7 +267,6 @@ pub fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> Obje
/// allocates an object in the immix space
// size doesn't include HEADER_SIZE
#[no_mangle]
#[inline(never)]
pub extern "C" fn muentry_alloc_fast(
mutator: *mut ImmixMutatorLocal,
size: usize,
......@@ -332,6 +331,21 @@ pub extern "C" fn muentry_alloc_large(
unsafe { ret.to_object_reference() }
}
#[no_mangle]
// size doesn't include HEADER_SIZE
pub extern "C" fn muentry_alloc_any(
mutator: *mut ImmixMutatorLocal,
size: usize,
align: usize
) -> ObjectReference {
let actual_size = size + OBJECT_HEADER_SIZE;
if actual_size >= LARGE_OBJECT_THRESHOLD {
muentry_alloc_fast(mutator, actual_size, align)
} else {
muentry_alloc_large(mutator, actual_size, align)
}
}
/// initializes a fix-sized object
#[no_mangle]
#[inline(never)]
......
......@@ -96,6 +96,10 @@ lazy_static! {
"muentry_alloc_large",
vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
pub static ref ALLOC_ANY : RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_alloc_any",
vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
pub static ref INIT_OBJ : RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_init_object",
vec![ADDRESS_TYPE.clone(), ADDRESS_TYPE.clone(), UINT64_TYPE.clone()],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment