Commit b0968ef6 authored by qinsoon's avatar qinsoon

array/hybrid element should be properly aligned. added elem_padded_size to their backend type info

parent f12b89da
Pipeline #386 passed with stage
in 69 minutes and 22 seconds
......@@ -933,8 +933,9 @@ impl <'a> InstructionSelection {
Instruction_::GetIRef(_)
| Instruction_::GetFieldIRef{..}
| Instruction_::GetVarPartIRef{..}
| Instruction_::ShiftIRef{..} => {
trace!("instsel on GET/FIELD/VARPARTIREF, SHIFTIREF");
| Instruction_::ShiftIRef{..}
| Instruction_::GetElementIRef{..} => {
trace!("instsel on GET/FIELD/VARPART/SHIFT/ELEM IREF");
let mem_addr = self.emit_get_mem_from_inst(node, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
......@@ -1059,15 +1060,9 @@ impl <'a> InstructionSelection {
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size;
let var_ty_size = match ty.v {
MuType_::Hybrid(ref name) => {
let map_lock = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty_ = map_lock.get(name).unwrap();
let var_ty = hybrid_ty_.get_var_ty();
vm.get_backend_type_info(var_ty.id()).size
},
_ => panic!("only expect HYBRID type here")
let var_ty_size = match ty_info.elem_padded_size {
Some(sz) => sz,
None => panic!("expect HYBRID type here with elem_padded_size, found {}", ty_info)
};
// actual size = fix_part_size + var_ty_size * len
......@@ -3327,7 +3322,8 @@ impl <'a> InstructionSelection {
Some(ty) => ty,
None => panic!("expected op in ShiftIRef of type IRef, found type: {}", base_ty)
};
let ele_ty_size = vm.get_backend_type_info(ele_ty.id()).size;
let ele_backend_ty = vm.get_backend_type_info(ele_ty.id());
let ele_ty_size = math::align_up(ele_backend_ty.size, ele_backend_ty.alignment);
if self.match_iimm(offset) {
let index = self.node_iimm_to_i32(offset);
......@@ -3419,11 +3415,10 @@ impl <'a> InstructionSelection {
Some(ty) => ty,
None => panic!("expected base in GetElemIRef to be type IRef, found {}", iref_array_ty)
};
let ele_ty = match array_ty.get_elem_ty() {
Some(ty) => ty,
None => panic!("expected base in GetElemIRef to be type Array, found {}", array_ty)
let ele_ty_size = match vm.get_backend_type_info(array_ty.id()).elem_padded_size {
Some(sz) => sz,
None => panic!("array backend type should have a elem_padded_size, found {}", array_ty)
};
let ele_ty_size = vm.get_backend_type_info(ele_ty.id()).size;
if self.match_iimm(index) {
let index = self.node_iimm_to_i32(index);
......@@ -3463,16 +3458,30 @@ impl <'a> InstructionSelection {
}
} else {
let tmp_index = self.emit_ireg(index, f_content, f_context, vm);
// make a copy of it
// (because we may need to alter index, and we dont want to chagne the original value)
let tmp_index_copy = self.make_temporary(f_context, tmp_index.ty.clone(), vm);
self.emit_move_value_to_value(&tmp_index_copy, &tmp_index);
let scale : u8 = match ele_ty_size {
8 | 4 | 2 | 1 => ele_ty_size as u8,
_ => unimplemented!()
16| 32| 64 => {
let shift = math::is_power_of_two(ele_ty_size).unwrap();
// tmp_index_copy = tmp_index_copy << index
self.backend.emit_shl_r_imm8(&tmp_index_copy, shift as i8);
1
}
_ => panic!("unexpected var ty size: {}", ele_ty_size)
};
match base.v {
// GETELEMIREF(IREF, ireg) -> add index and scale
TreeNode_::Instruction(Instruction{v: Instruction_::GetIRef(_), ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_append_index_scale(mem, tmp_index, scale, vm);
let ret = self.addr_append_index_scale(mem, tmp_index_copy, scale, vm);
trace!("MEM from GETELEMIREF(GETIREF, ireg): {}", ret);
ret
......@@ -3480,7 +3489,7 @@ impl <'a> InstructionSelection {
// GETELEMIREF(GETFIELDIREF, ireg) -> add index and scale
TreeNode_::Instruction(Instruction{v: Instruction_::GetFieldIRef{..}, ..}) => {
let mem = self.emit_get_mem_from_inst_inner(base, f_content, f_context, vm);
let ret = self.addr_append_index_scale(mem, tmp_index, scale, vm);
let ret = self.addr_append_index_scale(mem, tmp_index_copy, scale, vm);
trace!("MEM from GETELEMIREF(GETFIELDIREF, ireg): {}", ret);
ret
......@@ -3492,7 +3501,7 @@ impl <'a> InstructionSelection {
let ret = MemoryLocation::Address {
base: tmp,
offset: None,
index: Some(tmp_index),
index: Some(tmp_index_copy),
scale: Some(scale)
};
......
......@@ -5,6 +5,7 @@ pub mod code_emission;
use ast::types;
use utils::ByteSize;
use utils::math::align_up;
use runtime::mm;
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
......@@ -73,23 +74,23 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
MuType_::Int(size_in_bit) => {
match size_in_bit {
1 => BackendTypeInfo{
size: 1, alignment: 1, struct_layout: None,
size: 1, alignment: 1, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
},
8 => BackendTypeInfo{
size: 1, alignment: 1, struct_layout: None,
size: 1, alignment: 1, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
},
16 => BackendTypeInfo{
size: 2, alignment: 2, struct_layout: None,
size: 2, alignment: 2, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2))
},
32 => BackendTypeInfo{
size: 4, alignment: 4, struct_layout: None,
size: 4, alignment: 4, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
},
64 => BackendTypeInfo{
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
},
_ => unimplemented!()
......@@ -99,7 +100,7 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_) => BackendTypeInfo{
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_reftype())
},
// pointer
......@@ -108,30 +109,32 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
| MuType_::FuncRef(_)
| MuType_::ThreadRef
| MuType_::StackRef => BackendTypeInfo{
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
},
// tagref
MuType_::Tagref64 => unimplemented!(),
// floating point
MuType_::Float => BackendTypeInfo{
size: 4, alignment: 4, struct_layout: None,
size: 4, alignment: 4, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
},
MuType_::Double => BackendTypeInfo {
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
},
// array
MuType_::Array(ref ty, len) => {
let ele_ty = vm.get_backend_type_info(ty.id());
let ele_padded_size = align_up(ele_ty.size, ele_ty.alignment);
BackendTypeInfo{
size : ele_ty.size * len,
size : ele_padded_size * len,
alignment : ele_ty.alignment,
struct_layout: None,
elem_padded_size : Some(ele_padded_size),
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
ele_ty.size * len,
ele_padded_size * len,
ele_ty.alignment,
Some(RefPattern::Repeat{
pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
......@@ -166,6 +169,8 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
// treat var_ty as array (getting its alignment)
let var_ele_ty = vm.get_backend_type_info(var_ty.id());
let var_align = var_ele_ty.alignment;
let var_padded_size = align_up(var_ele_ty.size, var_ele_ty.alignment);
ret.elem_padded_size = Some(var_padded_size);
// fix type info as hybrid
// 1. check alignment
......@@ -175,14 +180,14 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
// 2. fix gc type
let mut gctype = ret.gc_type.as_ref().clone();
gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
gctype.var_size = Some(var_ele_ty.size);
gctype.var_size = Some(var_padded_size);
ret.gc_type = mm::add_gc_type(gctype);
ret
}
// void
MuType_::Void => BackendTypeInfo{
size: 0, alignment: 8, struct_layout: None,
size: 0, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(0, 8))
},
// vector
......@@ -245,6 +250,7 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
size : size,
alignment : struct_align,
struct_layout: Some(offsets),
elem_padded_size: None,
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
size,
struct_align,
......@@ -269,7 +275,11 @@ pub fn sequetial_layout(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, ByteSize, V
pub struct BackendTypeInfo {
pub size: ByteSize,
pub alignment: ByteSize,
pub struct_layout: Option<Vec<ByteSize>>,
// for hybrid/array, every element needs to be properly aligned
// thus it may take more space than it actually needs
pub elem_padded_size: Option<ByteSize>,
pub gc_type: P<GCType>
}
......
......@@ -13,4 +13,12 @@ pub fn is_power_of_two(x: usize) -> Option<u8> {
} else {
None
}
}
pub fn align_up(x: usize, align: usize) -> usize {
if x % align == 0 {
x
} else {
(x / align + 1) * align
}
}
\ No newline at end of file
......@@ -1489,6 +1489,66 @@ fn shift_iref_ele_8bytes() -> VM {
vm
}
#[test]
fn test_shift_iref_ele_9bytes() {
let lib = testutil::compile_fnc("shift_iref_ele_9bytes", &shift_iref_ele_9bytes);
unsafe {
let shift_iref_ele_9bytes : libloading::Symbol<unsafe extern fn(u64, u64) -> u64> = lib.get(b"shift_iref_ele_9bytes").unwrap();
let res = shift_iref_ele_9bytes(0, 0);
println!("shift_iref_ele_9bytes(0, 0) = {}", res);
assert_eq!(res, 0);
let res = shift_iref_ele_9bytes(0, 1);
println!("shift_iref_ele_9bytes(0, 1) = {}", res);
assert_eq!(res, 16);
let res = shift_iref_ele_9bytes(0, 2);
println!("shift_iref_ele_9bytes(0, 2) = {}", res);
assert_eq!(res, 32);
}
}
fn shift_iref_ele_9bytes() -> VM {
let vm = VM::new();
typedef! ((vm) int8 = mu_int(8));
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) elem = mu_struct(int64, int8));
typedef! ((vm) iref_elem = mu_iref(elem));
funcsig! ((vm) sig = (iref_elem, int64) -> (iref_elem));
funcdecl! ((vm) <sig> shift_iref_ele_9bytes);
funcdef! ((vm) <sig> shift_iref_ele_9bytes VERSION shift_iref_ele_9bytes_v1);
// blk entry
block! ((vm, shift_iref_ele_9bytes_v1) blk_entry);
ssa! ((vm, shift_iref_ele_9bytes_v1) <iref_elem> base);
ssa! ((vm, shift_iref_ele_9bytes_v1) <int64> index);
ssa! ((vm, shift_iref_ele_9bytes_v1) <iref_elem> res);
inst! ((vm, shift_iref_ele_9bytes_v1) blk_entry_shiftiref:
res = SHIFTIREF base index (is_ptr: false)
);
inst! ((vm, shift_iref_ele_9bytes_v1) blk_entry_ret:
RET (res)
);
define_block! ((vm, shift_iref_ele_9bytes_v1) blk_entry(base, index) {
blk_entry_shiftiref, blk_entry_ret
});
define_func_ver!((vm) shift_iref_ele_9bytes_v1 (entry: blk_entry) {
blk_entry
});
vm
}
#[test]
fn test_shift_iref_ele_16bytes() {
let lib = testutil::compile_fnc("shift_iref_ele_16bytes", &shift_iref_ele_16bytes);
......@@ -1549,7 +1609,6 @@ fn shift_iref_ele_16bytes() -> VM {
}
#[test]
#[ignore]
fn test_get_elem_iref_array_ele_9bytes() {
let lib = testutil::compile_fnc("get_elem_iref_array_ele_9bytes", &get_elem_iref_array_ele_9bytes);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment