Commit 10e9b6f1 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Merge branch 'malloc' into threadlocal_reg2

parents 044d9d4b 84cfb281
...@@ -1571,62 +1571,52 @@ impl<'a> InstructionSelection { ...@@ -1571,62 +1571,52 @@ impl<'a> InstructionSelection {
Instruction_::New(ref ty) => { Instruction_::New(ref ty) => {
trace!("instsel on NEW"); trace!("instsel on NEW");
if cfg!(debug_assertions) { let tmp_res = self.get_result_value(node, 0);
match ty.v {
MuType_::Hybrid(_) => {
panic!("cannot use NEW for hybrid, use NEWHYBRID instead")
}
_ => {}
}
}
let ty_info = vm.get_backend_type_info(ty.id()); let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align = ty_info.alignment; let ty_align = ty_info.alignment;
let size = make_value_int_const(ty_info.size as u64, vm);
let const_size = make_value_int_const(size as u64, vm); if !vm.vm_options.flag_use_alloc {
self.emit_runtime_entry(
let tmp_allocator = self.emit_get_allocator(f_context, vm); &entrypoints::MALLOC,
let tmp_res = self.emit_alloc_sequence( vec![size],
tmp_allocator.clone(), Some(vec![tmp_res]),
const_size, Some(node),
ty_align, f_context,
node, vm
f_context, );
vm } else {
); let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode) let encode = make_value_int_const(
let encode = mm::get_gc_type_encode(ty_info.gc_type.id),
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm); vm
self.emit_runtime_entry( );
&entrypoints::INIT_OBJ, self.emit_runtime_entry(
vec![tmp_allocator.clone(), tmp_res.clone(), encode], &entrypoints::INIT_OBJ,
None, vec![tmp_allocator.clone(), tmp_res.clone(), encode],
Some(node), None,
f_context, Some(node),
vm f_context,
); vm
);
}
} }
Instruction_::NewHybrid(ref ty, var_len) => { Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on NEWHYBRID"); trace!("instsel on NEWHYBRID");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {}
_ => {
panic!(
"NEWHYBRID is only for allocating hybrid types, \
use NEW for others"
)
}
}
}
let ty_info = vm.get_backend_type_info(ty.id()); let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size; let fix_part_size = ty_info.size;
let var_ty_size = ty_info.elem_size.unwrap(); let var_ty_size = ty_info.elem_size.unwrap();
let ty_align = ty_info.alignment;
// actual size = fix_part_size + var_ty_size * len // actual size = fix_part_size + var_ty_size * len
let (actual_size, length) = { let (actual_size, length) = {
...@@ -1641,15 +1631,16 @@ impl<'a> InstructionSelection { ...@@ -1641,15 +1631,16 @@ impl<'a> InstructionSelection {
make_value_int_const(var_len as u64, vm) make_value_int_const(var_len as u64, vm)
) )
} else { } else {
let var_len = self.emit_ireg(var_len, f_content, f_context, vm);
emit_zext(self.backend.as_mut(), &var_len); // this will zero
let tmp_actual_size = let tmp_actual_size =
make_temporary(f_context, UINT64_TYPE.clone(), vm); make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
// tmp_actual_size = tmp_var_len*var_ty_size // tmp_actual_size = var_len*var_ty_size
emit_mul_u64( emit_mul_u64(
self.backend.as_mut(), self.backend.as_mut(),
&tmp_actual_size, &tmp_actual_size,
&tmp_var_len, &cast_value(&var_len, &UINT64_TYPE),
var_ty_size as u64 var_ty_size as u64
); );
// tmp_actual_size = tmp_var_len*var_ty_size + fix_part_size // tmp_actual_size = tmp_var_len*var_ty_size + fix_part_size
...@@ -1659,31 +1650,47 @@ impl<'a> InstructionSelection { ...@@ -1659,31 +1650,47 @@ impl<'a> InstructionSelection {
&tmp_actual_size, &tmp_actual_size,
fix_part_size as u64 fix_part_size as u64
); );
(tmp_actual_size, tmp_var_len) (tmp_actual_size, var_len)
} }
}; };
let tmp_allocator = self.emit_get_allocator(f_context, vm); if !vm.vm_options.flag_use_alloc {
let tmp_res = self.emit_alloc_sequence( let tmp_res = self.get_result_value(node, 0);
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode) self.emit_runtime_entry(
let encode = &entrypoints::MALLOC,
make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm); vec![actual_size],
self.emit_runtime_entry( Some(vec![tmp_res]),
&entrypoints::INIT_HYBRID, Some(node),
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length], f_context,
None, vm
Some(node), );
f_context, } else {
vm let tmp_allocator = self.emit_get_allocator(f_context, vm);
); let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_context,
vm
);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = make_value_int_const(
mm::get_gc_type_encode(ty_info.gc_type.id),
vm
);
self.emit_runtime_entry(
&entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None,
Some(node),
f_context,
vm
);
}
} }
Instruction_::AllocA(ref ty) => { Instruction_::AllocA(ref ty) => {
......
...@@ -1726,30 +1726,44 @@ impl<'a> InstructionSelection { ...@@ -1726,30 +1726,44 @@ impl<'a> InstructionSelection {
let ty_align = ty_info.alignment; let ty_align = ty_info.alignment;
let const_size = self.make_int_const(size as u64, vm); let const_size = self.make_int_const(size as u64, vm);
// get allocator if !vm.vm_options.flag_use_alloc {
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm); let tmp_res = self.get_result_value(node);
// allocate self.emit_runtime_entry(
let tmp_res = self.emit_alloc_sequence( &entrypoints::MALLOC,
tmp_allocator.clone(), vec![const_size],
const_size, Some(vec![tmp_res]),
ty_align, Some(node),
node, f_content,
f_content, f_context,
f_context, vm
vm );
); } else {
// muentry_init_object(%allocator, %tmp_res, %encode) // get allocator
let encode = let tmp_allocator =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm); self.emit_get_allocator(node, f_content, f_context, vm);
self.emit_runtime_entry( // allocate
&entrypoints::INIT_OBJ, let tmp_res = self.emit_alloc_sequence(
vec![tmp_allocator.clone(), tmp_res.clone(), encode], tmp_allocator.clone(),
None, const_size,
Some(node), ty_align,
f_content, node,
f_context, f_content,
vm f_context,
); vm
);
// muentry_init_object(%allocator, %tmp_res, %encode)
let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry(
&entrypoints::INIT_OBJ,
vec![tmp_allocator.clone(), tmp_res.clone(), encode],
None,
Some(node),
f_content,
f_context,
vm
);
}
} }
Instruction_::NewHybrid(ref ty, var_len) => { Instruction_::NewHybrid(ref ty, var_len) => {
...@@ -1829,29 +1843,43 @@ impl<'a> InstructionSelection { ...@@ -1829,29 +1843,43 @@ impl<'a> InstructionSelection {
} }
}; };
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm); if !vm.vm_options.flag_use_alloc {
let tmp_res = self.emit_alloc_sequence( let tmp_res = self.get_result_value(node);
tmp_allocator.clone(), self.emit_runtime_entry(
actual_size, &entrypoints::MALLOC,
ty_align, vec![actual_size],
node, Some(vec![tmp_res]),
f_content, Some(node),
f_context, f_content,
vm f_context,
); vm
);
} else {
let tmp_allocator =
self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(
tmp_allocator.clone(),
actual_size,
ty_align,
node,
f_content,
f_context,
vm
);
// muentry_init_object(%allocator, %tmp_res, %encode) // muentry_init_object(%allocator, %tmp_res, %encode)
let encode = let encode =
self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm); self.make_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
self.emit_runtime_entry( self.emit_runtime_entry(
&entrypoints::INIT_HYBRID, &entrypoints::INIT_HYBRID,
vec![tmp_allocator.clone(), tmp_res.clone(), encode, length], vec![tmp_allocator.clone(), tmp_res.clone(), encode, length],
None, None,
Some(node), Some(node),
f_content, f_content,
f_context, f_context,
vm vm
); );
}
} }
/*Instruction_::AllocA(ref ty) => { /*Instruction_::AllocA(ref ty) => {
......
...@@ -123,11 +123,15 @@ fn emit_muir_dot_inner(file: &mut File, f_name: MuName, f_content: &FunctionCont ...@@ -123,11 +123,15 @@ fn emit_muir_dot_inner(file: &mut File, f_name: MuName, f_content: &FunctionCont
// all the instructions // all the instructions
for inst in block_content.body.iter() { for inst in block_content.body.iter() {
<<<<<<< HEAD
write!( write!(
file, file,
" {}\\l", " {}\\l",
escape_string(format!("{}", inst.as_inst())) escape_string(format!("{}", inst.as_inst()))
).unwrap(); ).unwrap();
=======
write!(file, " {}\\l", escape_string(format!("{}", inst.as_inst_ref()))).unwrap();
>>>>>>> malloc
} }
// "]; // "];
......
...@@ -116,6 +116,11 @@ lazy_static! { ...@@ -116,6 +116,11 @@ lazy_static! {
"muentry_unpin_object", "muentry_unpin_object",
vec![ADDRESS_TYPE.clone()], vec![ADDRESS_TYPE.clone()],
vec![]); vec![]);
pub static ref MALLOC : RuntimeEntrypoint = RuntimeEntrypoint::new(
"alloc_mem_zero",
vec![UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
} }
// decl: exception.rs // decl: exception.rs
......
...@@ -82,3 +82,7 @@ int32_t c_check_result() { ...@@ -82,3 +82,7 @@ int32_t c_check_result() {
char * alloc_mem(size_t size){ char * alloc_mem(size_t size){
return (char *) malloc(size); return (char *) malloc(size);
} }
void* alloc_mem_zero(size_t size){
return calloc(size, 1);
}
...@@ -54,3 +54,7 @@ void* resolve_symbol(const char* sym) { ...@@ -54,3 +54,7 @@ void* resolve_symbol(const char* sym) {
// printf("%s\n", sym); // printf("%s\n", sym);
return dlsym(RTLD_DEFAULT, sym); return dlsym(RTLD_DEFAULT, sym);
} }
void* alloc_mem_zero(size_t size){
return calloc(size, 1);
}
...@@ -185,4 +185,4 @@ macro_rules! error_if { ...@@ -185,4 +185,4 @@ macro_rules! error_if {
error!($($arg)*) error!($($arg)*)
} }
} }
} }
\ No newline at end of file
...@@ -29,4 +29,4 @@ pub mod built_info; ...@@ -29,4 +29,4 @@ pub mod built_info;
pub mod api; pub mod api;
/// handle type for client. This handle type is opaque to the client /// handle type for client. This handle type is opaque to the client
pub mod handle; pub mod handle;
\ No newline at end of file
...@@ -34,6 +34,7 @@ Compiler: ...@@ -34,6 +34,7 @@ Compiler:
--disable-inline disable compiler function inlining --disable-inline disable compiler function inlining
--disable-regalloc-validate disable register allocation validation --disable-regalloc-validate disable register allocation validation
--disable-ir-validate disable IR validation --disable-ir-validate disable IR validation
--use-alloc Use alloc (instead of the faster calloc)
--emit-debug-info emit debugging information --emit-debug-info emit debugging information
AOT Compiler: AOT Compiler:
...@@ -64,6 +65,7 @@ pub struct VMOptions { ...@@ -64,6 +65,7 @@ pub struct VMOptions {
pub flag_disable_inline: bool, pub flag_disable_inline: bool,
pub flag_disable_regalloc_validate: bool, pub flag_disable_regalloc_validate: bool,
pub flag_disable_ir_validate: bool, pub flag_disable_ir_validate: bool,
pub flag_use_alloc: bool,
pub flag_emit_debug_info: bool, pub flag_emit_debug_info: bool,
// AOT compiler // AOT compiler
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment