GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 08160d23 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Use malloc for new (note: this dosn't work)

parent d0f1788a
......@@ -1580,38 +1580,13 @@ impl<'a> InstructionSelection {
Instruction_::New(ref ty) => {
trace!("instsel on NEW");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {
panic!("cannot use NEW for hybrid, use NEWHYBRID instead")
}
_ => {}
}
}
let tmp_res = self.get_result_value(node, 0);
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align = ty_info.alignment;
let const_size = make_value_int_const(size as u64, vm);
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
const_size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
let size = make_value_int_const(ty_info.size as u64, vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
&entrypoints::FAKE_NEW,
vec![size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
......@@ -1620,45 +1595,28 @@ impl<'a> InstructionSelection {
Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on NEWHYBRID");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {}
_ => {
panic!(
"NEWHYBRID is only for allocating hybrid types, \
use NEW for others"
)
}
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size;
let var_ty_size = ty_info.elem_size.unwrap();
// actual size = fix_part_size + var_ty_size * len
let (actual_size, length) = {
let actual_size = {
let ref ops = inst.ops;
let ref var_len = ops[var_len];
if match_node_int_imm(var_len) {
let var_len = node_imm_to_u64(var_len);
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
(
make_value_int_const(actual_size as u64, vm),
make_value_int_const(var_len as u64, vm)
)
make_value_int_const(actual_size as u64, vm)
} else {
let tmp_actual_size =
make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
let tmp_actual_size = self.emit_ireg(var_len, f_content, f_context, vm);
// tmp_actual_size = tmp_var_len*var_ty_size
// tmp_actual_size = var_len*var_ty_size
emit_mul_u64(
self.backend.as_mut(),
&tmp_actual_size,
&tmp_var_len,
&tmp_actual_size,
var_ty_size as u64
);
// tmp_actual_size = tmp_var_len*var_ty_size + fix_part_size
......@@ -1668,27 +1626,15 @@ impl<'a> InstructionSelection {
&tmp_actual_size,
fix_part_size as u64
);
(tmp_actual_size, tmp_var_len)
tmp_actual_size
}
};
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
let tmp_res = self.get_result_value(node, 0);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
&entrypoints::FAKE_NEW,
vec![actual_size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
......
......@@ -116,6 +116,11 @@ lazy_static! {
"muentry_unpin_object",
vec![ADDRESS_TYPE.clone()],
vec![]);
pub static ref FAKE_NEW : RuntimeEntrypoint = RuntimeEntrypoint::new(
"malloc",
vec![UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
}
// decl: exception.rs
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment