GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 10e9b6f1 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Merge branch 'malloc' into threadlocal_reg2

parents 044d9d4b 84cfb281
......@@ -1571,34 +1571,35 @@ impl<'a> InstructionSelection {
Instruction_::New(ref ty) => {
trace!("instsel on NEW");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {
panic!("cannot use NEW for hybrid, use NEWHYBRID instead")
}
_ => {}
}
}
let tmp_res = self.get_result_value(node, 0);
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align = ty_info.alignment;
let size = make_value_int_const(ty_info.size as u64, vm);
let const_size = make_value_int_const(size as u64, vm);
if !vm.vm_options.flag_use_alloc {
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
);
} else {
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
const_size,
size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
let encode = make_value_int_const(
mm::get_gc_type_encode(ty_info.gc_type.id),
vm
);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
......@@ -1608,25 +1609,14 @@ impl<'a> InstructionSelection {
vm
);
}
}
Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on NEWHYBRID");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {}
_ => {
panic!(
"NEWHYBRID is only for allocating hybrid types, \
use NEW for others"
)
}
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size;
let var_ty_size = ty_info.elem_size.unwrap();
let ty_align = ty_info.alignment;
// actual size = fix_part_size + var_ty_size * len
let (actual_size, length) = {
......@@ -1641,15 +1631,16 @@ impl<'a> InstructionSelection {
make_value_int_const(var_len as u64, vm)
)
} else {
let var_len = self.emit_ireg(var_len, f_content, f_context, vm);
emit_zext(self.backend.as_mut(), &var_len); // this will zero
let tmp_actual_size =
make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
// tmp_actual_size = tmp_var_len*var_ty_size
// tmp_actual_size = var_len*var_ty_size
emit_mul_u64(
self.backend.as_mut(),
&tmp_actual_size,
&tmp_var_len,
&cast_value(&var_len, &UINT64_TYPE),
var_ty_size as u64
);
// tmp_actual_size = tmp_var_len*var_ty_size + fix_part_size
......@@ -1659,10 +1650,22 @@ impl<'a> InstructionSelection {
&tmp_actual_size,
fix_part_size as u64
);
(tmp_actual_size, tmp_var_len)
(tmp_actual_size, var_len)
}
};
if !vm.vm_options.flag_use_alloc {
let tmp_res = self.get_result_value(node, 0);
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![actual_size],
Some(vec![tmp_res]),
Some(node),
f_context,
vm
);
} else {
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
......@@ -1674,8 +1677,10 @@ impl<'a> InstructionSelection {
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
let encode = make_value_int_const(
mm::get_gc_type_encode(ty_info.gc_type.id),
vm
);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
......@@ -1684,6 +1689,8 @@ impl<'a> InstructionSelection {
f_context,
vm
);
}
}
Instruction_::AllocA(ref ty) => {
......
......@@ -1726,8 +1726,21 @@ impl<'a> InstructionSelection {
let ty_align = ty_info.alignment;
let const_size = self.make_int_const(size as u64, vm);
if !vm.vm_options.flag_use_alloc {
let tmp_res = self.get_result_value(node);
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![const_size],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
vm
);
} else {
// get allocator
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_allocator =
self.emit_get_allocator(node, f_content, f_context, vm);
// allocate
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
......@@ -1751,6 +1764,7 @@ impl<'a> InstructionSelection {
vm
);
}
}
Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on NEWHYBRID");
......@@ -1829,7 +1843,20 @@ impl<'a> InstructionSelection {
}
};
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
if !vm.vm_options.flag_use_alloc {
let tmp_res = self.get_result_value(node);
self.emit_runtime_entry(
&entrypoints::MALLOC,
vec![actual_size],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
vm
);
} else {
let tmp_allocator =
self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
......@@ -1853,6 +1880,7 @@ impl<'a> InstructionSelection {
vm
);
}
}
/*Instruction_::AllocA(ref ty) => {
trace!("instsel on AllocA");
......
......@@ -123,11 +123,15 @@ fn emit_muir_dot_inner(file: &mut File, f_name: MuName, f_content: &FunctionCont
// all the instructions
for inst in block_content.body.iter() {
<<<<<<< HEAD
write!(
file,
" {}\\l",
escape_string(format!("{}", inst.as_inst()))
).unwrap();
=======
write!(file, " {}\\l", escape_string(format!("{}", inst.as_inst_ref()))).unwrap();
>>>>>>> malloc
}
// "];
......
......@@ -116,6 +116,11 @@ lazy_static! {
"muentry_unpin_object",
vec![ADDRESS_TYPE.clone()],
vec![]);
pub static ref MALLOC : RuntimeEntrypoint = RuntimeEntrypoint::new(
"alloc_mem_zero",
vec![UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
}
// decl: exception.rs
......
......@@ -82,3 +82,7 @@ int32_t c_check_result() {
char * alloc_mem(size_t size){
return (char *) malloc(size);
}
void* alloc_mem_zero(size_t size){
return calloc(size, 1);
}
......@@ -54,3 +54,7 @@ void* resolve_symbol(const char* sym) {
// printf("%s\n", sym);
return dlsym(RTLD_DEFAULT, sym);
}
void* alloc_mem_zero(size_t size){
return calloc(size, 1);
}
......@@ -34,6 +34,7 @@ Compiler:
--disable-inline disable compiler function inlining
--disable-regalloc-validate disable register allocation validation
--disable-ir-validate disable IR validation
--use-alloc Use alloc (instead of the faster calloc)
--emit-debug-info emit debugging information
AOT Compiler:
......@@ -64,6 +65,7 @@ pub struct VMOptions {
pub flag_disable_inline: bool,
pub flag_disable_regalloc_validate: bool,
pub flag_disable_ir_validate: bool,
pub flag_use_alloc: bool,
pub flag_emit_debug_info: bool,
// AOT compiler
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment