Commit c5500391 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Implemented malloc on x86-64

parent ecfe0a36
......@@ -1582,15 +1582,40 @@ impl<'a> InstructionSelection {
trace!("instsel on NEW");
let tmp_res = self.get_result_value(node, 0);
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let size = make_value_int_const(ty_info.size as u64, vm);
self.emit_runtime_entry(
&entrypoints::FAKE_NEW,
vec![size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
);
if vm.vm_options.flag_use_malloc {
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
);
} else {
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
size,
ty_align,
node,
f_context,
vm
);
let encode =
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node),
f_context,
vm
);
}
}
Instruction_::NewHybrid(ref ty, var_len) => {
......@@ -1598,16 +1623,20 @@ impl<'a> InstructionSelection {
let ty_info = vm.get_backend_type_info(ty.id());
let fix_part_size = ty_info.size;
let var_ty_size = ty_info.elem_size.unwrap();
let ty_align = ty_info.alignment;
// actual size = fix_part_size + var_ty_size * len
let actual_size = {
let (actual_size, length) = {
let ref ops = inst.ops;
let ref var_len = ops[var_len];
if match_node_int_imm(var_len) {
let var_len = node_imm_to_u64(var_len);
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
make_value_int_const(actual_size as u64, vm)
(
make_value_int_const(actual_size as u64, vm),
make_value_int_const(var_len as u64, vm)
)
} else {
let var_len = self.emit_ireg(var_len, f_content, f_context, vm);
emit_zext(self.backend.as_mut(), &var_len); // this will zero
......@@ -1627,19 +1656,45 @@ impl<'a> InstructionSelection {
&tmp_actual_size,
fix_part_size as u64
);
tmp_actual_size
(tmp_actual_size, var_len)
}
};
let tmp_res = self.get_result_value(node, 0);
self.emit_runtime_entry(
&entrypoints::FAKE_NEW,
vec![actual_size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
);
if vm.vm_options.flag_use_malloc {
let tmp_res = self.get_result_value(node, 0);
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![actual_size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
);
} else {
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
Some(node),
f_context,
vm
);
}
}
Instruction_::AllocA(ref ty) => {
......
......@@ -1723,30 +1723,42 @@ impl<'a> InstructionSelection {
let ty_align = ty_info.alignment;
let const_size = self.make_int_const(size as u64, vm);
// get allocator
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
// allocate
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
const_size,
ty_align,
node,
f_content,
f_context,
vm
);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node),
f_content,
f_context,
vm
);
if vm.vm_options.flag_use_malloc {
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![const_size],
None,
Some(node),
f_content,
f_context,
vm
);
} else {
// get allocator
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
// allocate
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
const_size,
ty_align,
node,
f_content,
f_context,
vm
);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node),
f_content,
f_context,
vm
);
}
}
Instruction_::NewHybrid(ref ty, var_len) => {
......@@ -1826,29 +1838,41 @@ impl<'a> InstructionSelection {
}
};
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_content,
f_context,
vm
);
if vm.vm_options.flag_use_malloc {
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![actual_size],
None,
Some(node),
f_content,
f_context,
vm
);
} else {
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_content,
f_context,
vm
);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
Some(node),
f_content,
f_context,
vm
);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
Some(node),
f_content,
f_context,
vm
);
}
}
/*Instruction_::AllocA(ref ty) => {
......
......@@ -117,7 +117,7 @@ lazy_static! {
vec![ADDRESS_TYPE.clone()],
vec![]);
pub static ref FAKE_NEW : RuntimeEntrypoint = RuntimeEntrypoint::new(
pub static ref MALLOC : RuntimeEntrypoint = RuntimeEntrypoint::new(
"malloc",
vec![UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
......
......@@ -34,6 +34,7 @@ Compiler:
--disable-inline disable compiler function inlining
--disable-regalloc-validate disable register allocation validation
--disable-ir-validate disable IR validation
--use-malloc Use malloc (for testing purposes only)
--emit-debug-info emit debugging information
AOT Compiler:
......@@ -64,6 +65,7 @@ pub struct VMOptions {
pub flag_disable_inline: bool,
pub flag_disable_regalloc_validate: bool,
pub flag_disable_ir_validate: bool,
pub flag_use_malloc: bool,
pub flag_emit_debug_info: bool,
// AOT compiler
......@@ -156,6 +158,11 @@ impl VMOptions {
ret.flag_disable_regalloc_validate = true;
}
if !ret.flag_use_malloc {
warn!("use-malloc is forced to true (opposite to user setting)");
ret.flag_use_malloc = true;
}
if cfg!(target_os = "macos") {
if !ret.flag_aot_link_static {
warn!("link-statically is forced to true (opposite to user setting)");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment