WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit a684c62a authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Updated type layout and ALLOCA implemenation

parent 7f88d839
...@@ -41,6 +41,7 @@ libloading = "*" ...@@ -41,6 +41,7 @@ libloading = "*"
lazy_static = "*" lazy_static = "*"
log = "*" log = "*"
stderrlog = "*" stderrlog = "*"
num = "*"
hprof = "*" hprof = "*"
memmap = "*" memmap = "*"
memsec = "0.1.9" memsec = "0.1.9"
......
...@@ -2165,6 +2165,13 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2165,6 +2165,13 @@ impl CodeGenerator for ASMCodeGen {
let asm = format!("B {}", mangle_name(dest_name.clone())); let asm = format!("B {}", mangle_name(dest_name.clone()));
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Unconditional(dest_name), None); self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Unconditional(dest_name), None);
} }
fn emit_b_func(&mut self, dest_name: MuName)
{
trace!("emit: \tB {}", dest_name);
let asm = format!("B {}", mangle_name(dest_name.clone()));
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Return, None);
}
fn emit_b_cond(&mut self, cond: &str, dest_name: MuName) fn emit_b_cond(&mut self, cond: &str, dest_name: MuName)
{ {
trace!("emit: \tB.{} {}", cond, dest_name); trace!("emit: \tB.{} {}", cond, dest_name);
...@@ -2181,6 +2188,14 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2181,6 +2188,14 @@ impl CodeGenerator for ASMCodeGen {
let asm = format!("BR {}", reg1); let asm = format!("BR {}", reg1);
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{id1 => vec![loc1]}, false, ASMBranchTarget::UnconditionalReg(id1), None); self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{id1 => vec![loc1]}, false, ASMBranchTarget::UnconditionalReg(id1), None);
} }
fn emit_br_func(&mut self, func_address: Reg)
{
trace!("emit: \tBR {}", dest_address);
let (reg1, id1, loc1) = self.prepare_reg(dest_address, 2 + 1);
let asm = format!("BR {}", reg1);
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Return, None);
}
fn emit_cbnz(&mut self, src: Reg, dest_name: MuName) { self.internal_branch_op("CBNZ", src, dest_name); } fn emit_cbnz(&mut self, src: Reg, dest_name: MuName) { self.internal_branch_op("CBNZ", src, dest_name); }
fn emit_cbz(&mut self, src: Reg, dest_name: MuName) { self.internal_branch_op("CBZ", src, dest_name); } fn emit_cbz(&mut self, src: Reg, dest_name: MuName) { self.internal_branch_op("CBZ", src, dest_name); }
fn emit_tbnz(&mut self, src1: Reg, src2: u8, dest_name: MuName) { self.internal_branch_op_imm("TBNZ", src1, src2, dest_name); } fn emit_tbnz(&mut self, src1: Reg, src2: u8, dest_name: MuName) { self.internal_branch_op_imm("TBNZ", src1, src2, dest_name); }
......
...@@ -110,8 +110,11 @@ pub trait CodeGenerator { ...@@ -110,8 +110,11 @@ pub trait CodeGenerator {
// Branches // Branches
fn emit_b(&mut self, dest_name: MuName); fn emit_b(&mut self, dest_name: MuName);
fn emit_b_func(&mut self, func: MuName);
fn emit_b_cond(&mut self, cond: &str, dest_name: MuName); fn emit_b_cond(&mut self, cond: &str, dest_name: MuName);
fn emit_br(&mut self, dest_address: Reg); fn emit_br(&mut self, dest_address: Reg);
fn emit_br_func(&mut self, func_address: Reg);
fn emit_ret(&mut self, src: Reg); fn emit_ret(&mut self, src: Reg);
fn emit_cbnz(&mut self, src: Reg, dest_name: MuName); fn emit_cbnz(&mut self, src: Reg, dest_name: MuName);
fn emit_cbz(&mut self, src: Reg, dest_name: MuName); fn emit_cbz(&mut self, src: Reg, dest_name: MuName);
......
...@@ -44,6 +44,7 @@ use std::collections::HashMap; ...@@ -44,6 +44,7 @@ use std::collections::HashMap;
use std::collections::LinkedList; use std::collections::LinkedList;
use std::mem; use std::mem;
use std::any::Any; use std::any::Any;
use num::integer::lcm;
const INLINE_FASTPATH : bool = false; const INLINE_FASTPATH : bool = false;
...@@ -1171,16 +1172,7 @@ impl <'a> InstructionSelection { ...@@ -1171,16 +1172,7 @@ impl <'a> InstructionSelection {
let ty_info = vm.get_backend_type_info(ty.id()); let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment; let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size; let fix_part_size = ty_info.size;
let var_ty_size = match ty.v { let var_ty_size = ty_info.elem_size.unwrap();
MuType_::Hybrid(ref name) => {
let map_lock = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty_ = map_lock.get(name).unwrap();
let var_ty = hybrid_ty_.get_var_ty();
vm.get_backend_type_size((var_ty.id()))
},
_ => panic!("only expect HYBRID type here")
};
// actual size = fix_part_size + var_ty_size * len // actual size = fix_part_size + var_ty_size * len
let (actual_size, length) = { let (actual_size, length) = {
...@@ -1256,7 +1248,6 @@ impl <'a> InstructionSelection { ...@@ -1256,7 +1248,6 @@ impl <'a> InstructionSelection {
}, },
_ => panic!("only expect HYBRID type here") _ => panic!("only expect HYBRID type here")
}; };
let res = self.get_result_value(node, 0); let res = self.get_result_value(node, 0);
let ref ops = inst.ops; let ref ops = inst.ops;
...@@ -1265,15 +1256,19 @@ impl <'a> InstructionSelection { ...@@ -1265,15 +1256,19 @@ impl <'a> InstructionSelection {
let var_len = node_imm_to_u64(var_len) as usize; let var_len = node_imm_to_u64(var_len) as usize;
self.emit_alloca_const(&res, var_ty_size*var_len + fix_part_size, ty_align, f_context, vm, node); self.emit_alloca_const(&res, var_ty_size*var_len + fix_part_size, ty_align, f_context, vm, node);
} else { } else {
assert!(ty_align % 16 == 0); let align = lcm(ty_align, 16) as u64; // This is always going to be 16
assert!(align.is_power_of_two());
let var_len = self.emit_ireg(var_len, f_content, f_context, vm); let var_len = self.emit_ireg(var_len, f_content, f_context, vm);
// set res to the total size of the object (i.e. var_ty_size*var_len + fix_part_size) // set res to the total size of the object (i.e. var_ty_size*var_len + fix_part_size)
emit_madd_u64_u64(self.backend.as_mut(), &res, &var_len, f_context, vm, var_ty_size as u64, fix_part_size as u64); emit_madd_u64_u64(self.backend.as_mut(), &res, &var_len, f_context, vm, var_ty_size as u64, fix_part_size as u64);
// Grow the stack by 'res' bytes // Grow the stack by 'res' bytes
self.backend.emit_sub(&SP, &SP, &res); // Note: the SP can't be used as the source of the emit_and so we have to make a temporary
// Align the stack pointer down to the nearest multiple of 16 let tmp_sp = make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.backend.emit_and_imm(&SP, &SP, !(16 - 1)); self.backend.emit_sub(&tmp_sp, &SP, &res);
// Align the stack pointer down to the nearest multiple of align (which should be a power of two)
self.backend.emit_and_imm(&SP, &tmp_sp, !(align - 1));
// Zero out 'res' bytes starting at the stack pointer // Zero out 'res' bytes starting at the stack pointer
self.emit_runtime_entry(&entrypoints::MEM_ZERO, vec![SP.clone(), res.clone()], None, Some(node), f_context, vm); self.emit_runtime_entry(&entrypoints::MEM_ZERO, vec![SP.clone(), res.clone()], None, Some(node), f_context, vm);
...@@ -2751,28 +2746,33 @@ impl <'a> InstructionSelection { ...@@ -2751,28 +2746,33 @@ impl <'a> InstructionSelection {
} }
fn emit_alloca_const(&mut self, res: &P<Value>, size: usize, align: usize, f_context: &mut FunctionContext, vm: &VM, node: &TreeNode) { fn emit_alloca_const(&mut self, res: &P<Value>, size: usize, align: usize, f_context: &mut FunctionContext, vm: &VM, node: &TreeNode) {
assert!(16 % align == 0); let align = lcm(align, 16); // This is always going to be 16
// The stack pointer has to be 16 bytes aligned // The stack pointer has to be 16 bytes aligned
let size = round_up(size, 16) as u64; let alloc_size = round_up(size, align) as u64;
if size <= 64 { if size <= 64 {
// Note: this is the same threshold clang -O3 uses to decide whether to call memset // Note: this is the same threshold clang -O3 uses to decide whether to call memset
// Allocate 'size' bytes on the stack // Allocate 'alloc_size' bytes on the stack
emit_sub_u64(self.backend.as_mut(), &SP, &SP, size); emit_sub_u64(self.backend.as_mut(), &SP, &SP, alloc_size);
// Just push pairs of the zero register to the stack // Just push pairs of the zero register to the stack
// TODO: Optimise for the case where we don't need to zero initilise a multiple of 16-bytes
for i in 0..size/16 { for i in 0..size/16 {
// Push pairs of 0's on the stack // Push pairs of 0's on the stack
let dest = make_value_base_offset(&SP, (16*i) as i64, &UINT128_TYPE, vm); let dest = make_value_base_offset(&SP, (16*i) as i64, &UINT128_TYPE, vm);
let dest = emit_mem(self.backend.as_mut(), &dest, get_type_alignment(&UINT128_TYPE, vm), f_context, vm); let dest = emit_mem(self.backend.as_mut(), &dest, get_type_alignment(&UINT128_TYPE, vm), f_context, vm);
self.backend.emit_stp(&dest, &XZR, &XZR); self.backend.emit_stp(&dest, &XZR, &XZR);
} }
let leftover = size % 16;
if leftover != 0 { // Push the remaining bytes we need to
let offset = 16*(size/16);
let src = cast_value(&XZR, &get_alignment_type(leftover.next_power_of_two()));
emit_store_base_offset(self.backend.as_mut(), &SP, offset as i64, &src, f_context, vm);
}
self.backend.emit_mov(&res, &SP); self.backend.emit_mov(&res, &SP);
} else { } else {
// Allocate 'size' bytes on the stack // Allocate 'alloc_size' bytes on the stack
emit_sub_u64(self.backend.as_mut(), &res, &SP, size); emit_sub_u64(self.backend.as_mut(), &res, &SP, alloc_size);
self.emit_runtime_entry(&entrypoints::MEM_ZERO, vec![res.clone(), make_value_int_const(size as u64, vm)], None, Some(node), f_context, vm); self.emit_runtime_entry(&entrypoints::MEM_ZERO, vec![res.clone(), make_value_int_const(size as u64, vm)], None, Some(node), f_context, vm);
self.backend.emit_mov(&SP, &res); self.backend.emit_mov(&SP, &res);
}; };
...@@ -4210,8 +4210,8 @@ impl <'a> InstructionSelection { ...@@ -4210,8 +4210,8 @@ impl <'a> InstructionSelection {
} }
// GETELEMIREF <T1 T2> opnd index = opnd + index*element_size(T1) // GETELEMIREF <T1 T2> opnd index = opnd + index*element_size(T1)
Instruction_::GetElementIRef{base, index, ..} => { Instruction_::GetElementIRef{base, index, ..} => {
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap().get_elem_ty().unwrap(); let array_type = ops[base].clone_value().ty.get_referent_ty().unwrap();
let element_size = vm.get_backend_type_size(element_type.id()); let element_size = vm.get_backend_type_info(array_type.id()).elem_size.unwrap();
self.emit_shift_ref(&ops[base], &ops[index], element_size, f_content, f_context, vm) self.emit_shift_ref(&ops[base], &ops[index], element_size, f_content, f_context, vm)
} }
......
...@@ -1467,9 +1467,9 @@ impl <'a> InstructionSelection { ...@@ -1467,9 +1467,9 @@ impl <'a> InstructionSelection {
let ty_info = vm.get_backend_type_info(ty.id()); let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment; let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size; let fix_part_size = ty_info.size;
let var_ty_size = match ty_info.elem_padded_size { let var_ty_size = match ty_info.elem_size {
Some(sz) => sz, Some(sz) => sz,
None => panic!("expect HYBRID type here with elem_padded_size, found {}", ty_info) None => panic!("expect HYBRID type here with elem_size, found {}", ty_info)
}; };
// compute actual size (size = fix_part_size + var_ty_size * len) // compute actual size (size = fix_part_size + var_ty_size * len)
...@@ -4580,9 +4580,9 @@ impl <'a> InstructionSelection { ...@@ -4580,9 +4580,9 @@ impl <'a> InstructionSelection {
Some(ty) => ty, Some(ty) => ty,
None => panic!("expected base in GetElemIRef to be type IRef, found {}", iref_array_ty) None => panic!("expected base in GetElemIRef to be type IRef, found {}", iref_array_ty)
}; };
let ele_ty_size = match vm.get_backend_type_info(array_ty.id()).elem_padded_size { let ele_ty_size = match vm.get_backend_type_info(array_ty.id()).elem_size {
Some(sz) => sz, Some(sz) => sz,
None => panic!("array backend type should have a elem_padded_size, found {}", array_ty) None => panic!("array backend type should have a elem_size, found {}", array_ty)
}; };
if self.match_iimm(index) { if self.match_iimm(index) {
......
...@@ -26,6 +26,7 @@ use utils::ByteSize; ...@@ -26,6 +26,7 @@ use utils::ByteSize;
use utils::math::align_up; use utils::math::align_up;
use runtime::mm; use runtime::mm;
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern}; use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
use num::integer::lcm;
/// for ahead-of-time compilation (boot image making), the file contains a persisted VM, a persisted /// for ahead-of-time compilation (boot image making), the file contains a persisted VM, a persisted
/// heap, constants. This allows the VM to resume execution with the same status as before persisting. /// heap, constants. This allows the VM to resume execution with the same status as before persisting.
...@@ -204,16 +205,14 @@ pub struct BackendType { ...@@ -204,16 +205,14 @@ pub struct BackendType {
pub alignment: ByteSize, pub alignment: ByteSize,
/// struct layout of the type, None if this is not a struct/hybrid type /// struct layout of the type, None if this is not a struct/hybrid type
pub struct_layout: Option<Vec<ByteSize>>, pub struct_layout: Option<Vec<ByteSize>>,
/// element padded size for hybrid/array type /// element size for hybrid/array type
/// for hybrid/array, every element needs to be properly aligned pub elem_size: Option<ByteSize>,
/// thus it may take more space than it actually needs
pub elem_padded_size: Option<ByteSize>,
/// GC type, containing information for GC (this is a temporary design) /// GC type, containing information for GC (this is a temporary design)
/// See Issue#12 /// See Issue#12
pub gc_type: P<GCType> pub gc_type: P<GCType>
} }
rodal_struct!(BackendType{size, alignment, struct_layout, elem_padded_size, gc_type}); rodal_struct!(BackendType{size, alignment, struct_layout, elem_size, gc_type});
impl BackendType { impl BackendType {
/// gets field offset of a struct/hybrid type. Panics if this is not struct/hybrid type /// gets field offset of a struct/hybrid type. Panics if this is not struct/hybrid type
...@@ -234,23 +233,23 @@ impl BackendType { ...@@ -234,23 +233,23 @@ impl BackendType {
MuType_::Int(size_in_bit) => { MuType_::Int(size_in_bit) => {
match size_in_bit { match size_in_bit {
1 ... 8 => BackendType{ 1 ... 8 => BackendType{
size: 1, alignment: 1, struct_layout: None, elem_padded_size: None, size: 1, alignment: 1, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1)) gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
}, },
9 ... 16 => BackendType{ 9 ... 16 => BackendType{
size: 2, alignment: 2, struct_layout: None, elem_padded_size: None, size: 2, alignment: 2, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2)) gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2))
}, },
17 ... 32 => BackendType{ 17 ... 32 => BackendType{
size: 4, alignment: 4, struct_layout: None, elem_padded_size: None, size: 4, alignment: 4, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4)) gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
}, },
33 ... 64 => BackendType{ 33 ... 64 => BackendType{
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None, size: 8, alignment: 8, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8)) gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
}, },
128 => BackendType { 128 => BackendType {
size: 16, alignment: 16, struct_layout: None, elem_padded_size: None, size: 16, alignment: 16, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(16, 16)) gc_type: mm::add_gc_type(GCType::new_noreftype(16, 16))
}, },
_ => unimplemented!() _ => unimplemented!()
...@@ -260,7 +259,7 @@ impl BackendType { ...@@ -260,7 +259,7 @@ impl BackendType {
MuType_::Ref(_) MuType_::Ref(_)
| MuType_::IRef(_) | MuType_::IRef(_)
| MuType_::WeakRef(_) => BackendType{ | MuType_::WeakRef(_) => BackendType{
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None, size: 8, alignment: 8, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_reftype()) gc_type: mm::add_gc_type(GCType::new_reftype())
}, },
// pointer/opque ref // pointer/opque ref
...@@ -269,36 +268,46 @@ impl BackendType { ...@@ -269,36 +268,46 @@ impl BackendType {
| MuType_::FuncRef(_) | MuType_::FuncRef(_)
| MuType_::ThreadRef | MuType_::ThreadRef
| MuType_::StackRef => BackendType{ | MuType_::StackRef => BackendType{
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None, size: 8, alignment: 8, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8)) gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
}, },
// tagref // tagref
MuType_::Tagref64 => BackendType { MuType_::Tagref64 => BackendType {
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None, size: 8, alignment: 8, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_reftype()) gc_type: mm::add_gc_type(GCType::new_reftype())
}, },
// floating point // floating point
MuType_::Float => BackendType{ MuType_::Float => BackendType{
size: 4, alignment: 4, struct_layout: None, elem_padded_size: None, size: 4, alignment: 4, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4)) gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
}, },
MuType_::Double => BackendType { MuType_::Double => BackendType {
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None, size: 8, alignment: 8, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8)) gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
}, },
// array // array
MuType_::Array(ref ty, len) => { MuType_::Array(ref ty, len) => {
let ele_ty = vm.get_backend_type_info(ty.id()); let ele_ty = vm.get_backend_type_info(ty.id());
let ele_padded_size = align_up(ele_ty.size, ele_ty.alignment); let elem_size = ele_ty.size;
let mut size = ele_ty.size*len;
let mut align = ele_ty.alignment;
if cfg!(target_arch = "x86_64") && size >= 16 {
// Acording to the AMD64 SYSV ABI Version 0.99.8,
// a 'local or global array variable of at least 16 bytes ... always has alignment of at least 16 bytes'
// An array may be allocated in different ways, and whether it is possible for one to count as local
// or global variables is unknown.
// So to be safe, we assume this rule always applies for all array allocations.
align = lcm(align, 16);
size = align_up(size, align);
}
BackendType{ BackendType{
size : ele_padded_size * len, size : size,
alignment : ele_ty.alignment, alignment : align,
struct_layout: None, struct_layout: None,
elem_padded_size : Some(ele_padded_size), elem_size : Some(elem_size),
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID, gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID, size, align,
ele_padded_size * len,
ele_ty.alignment,
Some(RefPattern::Repeat{ Some(RefPattern::Repeat{
pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])), pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
count : len count : len
...@@ -331,27 +340,32 @@ impl BackendType { ...@@ -331,27 +340,32 @@ impl BackendType {
// treat var_ty as array (getting its alignment) // treat var_ty as array (getting its alignment)
let var_ele_ty = vm.get_backend_type_info(var_ty.id()); let var_ele_ty = vm.get_backend_type_info(var_ty.id());
let var_align = var_ele_ty.alignment; let var_size = var_ele_ty.size;
let var_padded_size = align_up(var_ele_ty.size, var_ele_ty.alignment); ret.elem_size = Some(var_size);
ret.elem_padded_size = Some(var_padded_size);
let var_align = if cfg!(target_arch = "x86_64") {
// fix type info as hybrid // Acording to the AMD64 SYSV ABI Version 0.99.8,
// 1. check alignment // a 'a C99 variable-length array variable always has alignment of at least 16 bytes'
if ret.alignment < var_align { // Whether the var part of hybrid counts as a variable-length array is unknown,
ret.alignment = var_align; // so to be safe, we assume this rule always applies to the hybrids var part
} lcm(var_ele_ty.alignment, 16)
// 2. fix gc type } else {
var_ele_ty.alignment
};
ret.alignment = lcm(ret.alignment, var_align);
ret.size = align_up(ret.size, ret.alignment);
let mut gctype = ret.gc_type.as_ref().clone(); let mut gctype = ret.gc_type.as_ref().clone();
gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()])); gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
gctype.var_size = Some(var_padded_size); gctype.var_size = Some(var_size);
ret.gc_type = mm::add_gc_type(gctype); ret.gc_type = mm::add_gc_type(gctype);
ret ret
} }
// void // void
MuType_::Void => BackendType{ MuType_::Void => BackendType{
size: 0, alignment: 8, struct_layout: None, elem_padded_size: None, size: 0, alignment: 1, struct_layout: None, elem_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(0, 8)) gc_type: mm::add_gc_type(GCType::new_noreftype(0, 1))
}, },
// vector // vector
MuType_::Vector(_, _) => unimplemented!() MuType_::Vector(_, _) => unimplemented!()
...@@ -375,15 +389,8 @@ impl BackendType { ...@@ -375,15 +389,8 @@ impl BackendType {
trace!("examining field: {}, {:?}", ty, ty_info); trace!("examining field: {}, {:?}", ty, ty_info);
let align = ty_info.alignment; let align = ty_info.alignment;
if struct_align < align { struct_align = lcm(struct_align, align);
struct_align = align; cur = align_up(cur, align);
}
if cur % align != 0 {
// move cursor to next aligned offset
cur = (cur / align + 1) * align;
}
offsets.push(cur); offsets.push(cur);
trace!("aligned to {}", cur); trace!("aligned to {}", cur);
...@@ -405,17 +412,13 @@ impl BackendType { ...@@ -405,17 +412,13 @@ impl BackendType {
} }
// if we need padding at the end // if we need padding at the end
let size = if cur % struct_align != 0 { let size = align_up(cur, struct_align);
(cur / struct_align + 1) * struct_align
} else {
cur
};
BackendType { BackendType {
size : size, size : size,
alignment : struct_align, alignment : struct_align,
struct_layout: Some(offsets), struct_layout: Some(offsets),
elem_padded_size: None, elem_size: None,
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID, gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
size, size,
struct_align, struct_align,
......
...@@ -28,6 +28,7 @@ extern crate maplit; ...@@ -28,6 +28,7 @@ extern crate maplit;
#[macro_use] #[macro_use]
extern crate field_offset; extern crate field_offset;
extern crate extprim; extern crate extprim;
extern crate num;
#[macro_use] #[macro_use]
pub extern crate ast; pub extern crate ast;
......
...@@ -16,7 +16,19 @@ from util import execute, compile_bundle, load_bundle, get_function; ...@@ -16,7 +16,19 @@ from util import execute, compile_bundle, load_bundle, get_function;
import pytest; import pytest;
import ctypes; import ctypes;
# Tests that zebu can handle wierd, but valid mu names def test_alloca_simple():
compile_bundle(
"""
.funcdef test_alloca_simple <main_sig>
{
entry(<int<32>>argc <uptr<uptr<char>>>argv):
a = ALLOCA <struct<int<64> double ref<void>>>
RET <int<32>>0
}
""", "test_alloca_simple");
assert(execute("test_alloca_simple") == 0);
def test_alloca(): def test_alloca():
lib = load_bundle( lib = load_bundle(
""" """
...@@ -54,4 +66,95 @@ def test_alloca(): ...@@ -54,4 +66,95 @@ def test_alloca():
""", "test_alloca"); """, "test_alloca");