GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 86c5407d authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Made x86-64 use malloc aswell

parent 605a8a5f
......@@ -1596,15 +1596,13 @@ impl<'a> InstructionSelection {
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
make_value_int_const(actual_size as u64, vm)
} else {
let tmp_actual_size =
make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
let tmp_actual_size = self.emit_ireg(var_len, f_content, f_context, vm);
// tmp_actual_size = tmp_var_len*var_ty_size
// tmp_actual_size = var_len*var_ty_size
emit_mul_u64(
self.backend.as_mut(),
&tmp_actual_size,
&tmp_var_len,
&tmp_actual_size,
var_ty_size as u64
);
// tmp_actual_size = tmp_var_len*var_ty_size + fix_part_size
......
......@@ -1707,32 +1707,17 @@ impl<'a> InstructionSelection {
Instruction_::New(ref ty) => {
trace!("instsel on NEW");
assert!(!ty.is_hybrid());
let tmp_res = self.get_result_value(node);
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align = ty_info.alignment;
let const_size = self.make_int_const(size as u64, vm);
// get allocator
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
// allocate
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
const_size,
ty_align,
node,
f_content,
f_context,
vm
);
let const_size = self.make_int_const(ty_info.size as u64, vm);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
&entrypoints::FAKE_NEW,
vec![const_size],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
......@@ -1742,20 +1727,12 @@ impl<'a> InstructionSelection {
Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on NEWHYBRID");
assert!(ty.is_hybrid());
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size;
let var_ty_size = match ty_info.elem_size {
Some(sz) => sz,
None => {
panic!("expect HYBRID type here with elem_size, found {}", ty_info)
}
};
let tmp_res = self.get_result_value(node);
// compute actual size (size = fix_part_size + var_ty_size * len)
let (actual_size, length) = {
let actual_size = {
let ref ops = inst.ops;
let ref var_len = ops[var_len];
......@@ -1764,26 +1741,13 @@ impl<'a> InstructionSelection {
// at compile time
let var_len = self.node_iimm_to_i32(var_len);
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
(
self.make_int_const(actual_size as u64, vm),
self.make_int_const(var_len as u64, vm)
)
self.make_int_const(actual_size as u64, vm),
} else {
// otherwise we need compute it at runtime
let tmp_actual_size =
self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
let tmp_actual_size = self.emit_ireg(var_len, f_content, f_context, vm);
match math::is_power_of_two(var_ty_size) {
Some(shift) => {
// if the varpart type size is power of two, we can use
// shift to compute the size
// use tmp_actual_size as result
// we do not want to change tmp_var_len
self.backend.emit_mov_r_r(&tmp_actual_size, &tmp_var_len);
if shift != 0 {
// a shift-left will get the total size of var part
self.backend
......@@ -1802,7 +1766,7 @@ impl<'a> InstructionSelection {
.emit_mov_r_imm(&x86_64::RAX, var_ty_size as i32);
// mul tmp_var_len, rax -> rdx:rax
self.backend.emit_mul_r(&tmp_var_len);
self.backend.emit_mul_r(&tmp_actual_size);
// add with fix-part size
self.backend
......@@ -1813,33 +1777,20 @@ impl<'a> InstructionSelection {
}
}
(tmp_actual_size, tmp_var_len)
tmp_actual_size
}
};
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_content,
f_context,
vm
);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
&entrypoints::FAKE_NEW,
vec![actual_size],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
vm
);
}
/*Instruction_::AllocA(ref ty) => {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment