Commit b844bfc3 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Fixed some aarch64 bugs

parent 1f230479
......@@ -1050,6 +1050,13 @@ impl Value {
}
pub fn is_func_const(&self) -> bool {
match self.v {
Value_::Constant(Constant::FuncRef(_)) => true,
_ => false
}
}
pub fn is_int_const(&self) -> bool {
match self.v {
Value_::Constant(Constant::Int(_)) => true,
......
......@@ -50,7 +50,8 @@ struct ASMCode {
entry: MuName,
blocks: LinkedHashMap<MuName, ASMBlock>,
frame_size_patchpoints: Vec<ASMLocation>
frame_size_lower_patchpoints: Vec<ASMLocation>,
frame_size_upper_patchpoints: Vec<ASMLocation>
}
unsafe impl Send for ASMCode {}
......@@ -126,7 +127,8 @@ impl ASMCode {
entry: self.entry.clone(),
code: vec![],
blocks: linked_hashmap!{},
frame_size_patchpoints: vec![]
frame_size_lower_patchpoints: vec![],
frame_size_upper_patchpoints: vec![]
};
// iterate through old machine code
......@@ -217,7 +219,7 @@ impl ASMCode {
}
// fix patchpoint
for patchpoint in self.frame_size_patchpoints.iter() {
for patchpoint in self.frame_size_lower_patchpoints.iter() {
let new_patchpoint = ASMLocation {
line: *location_map.get(&patchpoint.line).unwrap(),
index: patchpoint.index,
......@@ -225,9 +227,20 @@ impl ASMCode {
oplen: patchpoint.oplen
};
ret.frame_size_patchpoints.push(new_patchpoint);
ret.frame_size_lower_patchpoints.push(new_patchpoint);
}
// fix patchpoint
for patchpoint in self.frame_size_upper_patchpoints.iter() {
let new_patchpoint = ASMLocation {
line: *location_map.get(&patchpoint.line).unwrap(),
index: patchpoint.index,
len: patchpoint.len,
oplen: patchpoint.oplen
};
ret.frame_size_upper_patchpoints.push(new_patchpoint);
}
ret.control_flow_analysis();
Box::new(ret)
......@@ -511,9 +524,13 @@ impl ASMCode {
}
}
fn add_frame_size_patchpoint(&mut self, patchpoint: ASMLocation) {
self.frame_size_patchpoints.push(patchpoint);
fn add_frame_size_lower_patchpoint(&mut self, patchpoint: ASMLocation) {
self.frame_size_lower_patchpoints.push(patchpoint);
}
fn add_frame_size_upper_patchpoint(&mut self, patchpoint: ASMLocation) {
self.frame_size_upper_patchpoints.push(patchpoint);
}
}
use std::any::Any;
......@@ -707,9 +724,10 @@ impl MachineCode for ASMCode {
}
fn set_inst_nop(&mut self, index: usize) {
self.code[index].code.clear();
// self.code.remove(index);
// self.code.insert(index, ASMInst::nop());
let ref mut inst = self.code[index];
inst.code.clear();
inst.defines.clear();
inst.uses.clear();
}
fn remove_unnecessary_callee_saved(&mut self, used_callee_saved: Vec<MuID>) -> HashSet<MuID> {
......@@ -758,15 +776,31 @@ impl MachineCode for ASMCode {
fn patch_frame_size(&mut self, size: usize) {
debug_assert!(size % 16 == 0);
let size = size as u64;
let lower_size = (size & bits_ones(12)).to_string();
let upper_size = (size >> 12).to_string();
assert!(size <= bits_ones(24)); // Maximum frame size (16 MiB - 1B)
let size = size.to_string();
debug_assert!(size.len() <= FRAME_SIZE_PLACEHOLDER_LEN);
for loc in self.frame_size_patchpoints.iter() {
for loc in self.frame_size_lower_patchpoints.iter() {
let ref mut inst = self.code[loc.line];
string_utils::replace(&mut inst.code, loc.index, &size, size.len());
if size & bits_ones(12) == 0 {
//self.set_inst_nop(loc.line);
inst.code.clear();
inst.defines.clear();
inst.uses.clear();
} else {
string_utils::replace(&mut inst.code, loc.index, &lower_size, lower_size.len());
}
}
for loc in self.frame_size_upper_patchpoints.iter() {
let ref mut inst = self.code[loc.line];
if size >> 12 == 0 {
inst.code.clear();
inst.defines.clear();
inst.uses.clear();
} else {
string_utils::replace(&mut inst.code, loc.index, &upper_size, upper_size.len());
}
}
}
......@@ -1007,13 +1041,9 @@ lazy_static! {
};
}
const FRAME_SIZE_PLACEHOLDER_LEN: usize = 10; // a frame is smaller than 1 << 10
lazy_static! {
pub static ref FRAME_SIZE_PLACEHOLDER : String = {
let blank_spaces = [' ' as u8; FRAME_SIZE_PLACEHOLDER_LEN];
format!("{}", str::from_utf8(&blank_spaces).unwrap())
};
}
// Maximum frame size is: '4095; SUB SP, SP,
const FRAME_SIZE_PART_PLACEHOLDER_LEN: usize = 4; // maximum 'frame' size part is 4095
const FRAME_SIZE_PART_PLACEHOLDER: &str = " "; // maximum 'frame' size part is 4095
impl ASMCodeGen {
pub fn new() -> ASMCodeGen {
......@@ -1185,7 +1215,8 @@ impl ASMCodeGen {
let n = offset.ty.get_int_length().unwrap();
let shift_type = if n == 64 {
if signed {
"SXTX"
"LSL"
//"SXTX"
} else {
"LSL"
}
......@@ -1515,10 +1546,10 @@ impl ASMCodeGen {
)
}
// dest <= inst(src1, src2)
fn internal_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>) {
let inst = inst.to_string();
trace_emit!("\t{} {}, {} -> {}", inst, src1, src2, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1);
......@@ -2183,7 +2214,8 @@ impl CodeGenerator for ASMCodeGen {
entry: entry,
code: vec![],
blocks: linked_hashmap!{},
frame_size_patchpoints: vec![]
frame_size_lower_patchpoints: vec![],
frame_size_upper_patchpoints: vec![]
}));
// to link with C sources via gcc
......@@ -2233,7 +2265,8 @@ impl CodeGenerator for ASMCodeGen {
entry: Arc::new("none".to_string()),
code: vec![],
blocks: linked_hashmap!{},
frame_size_patchpoints: vec![]
frame_size_lower_patchpoints: vec![],
frame_size_upper_patchpoints: vec![]
}));
}
......@@ -2330,19 +2363,30 @@ impl CodeGenerator for ASMCodeGen {
}
fn emit_frame_grow(&mut self) {
trace_emit!("\tSUB SP, SP, #FRAME_SIZE_PLACEHOLDER");
let asm = format!("SUB SP,SP,#{}", FRAME_SIZE_PLACEHOLDER.clone());
trace_emit!("\tFRAME GROW");
let asm = format!("SUB SP,SP,#{}", FRAME_SIZE_PART_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut()
.add_frame_size_lower_patchpoint(ASMLocation::new(line, 11, FRAME_SIZE_PART_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{}, // let reg alloc ignore this instruction
linked_hashmap!{},
false
);
let asm = format!("SUB SP,SP,#{},LSL #12", FRAME_SIZE_PART_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut()
.add_frame_size_patchpoint(ASMLocation::new(line, 11, FRAME_SIZE_PLACEHOLDER_LEN, 0));
.add_frame_size_upper_patchpoint(ASMLocation::new(line, 11, FRAME_SIZE_PART_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{}, // let reg alloc ignore this instruction
linked_hashmap!{},
false
)
);
}
// Pushes a pair of registers on the givne stack (uses the STP instruction)
......
......@@ -1060,11 +1060,11 @@ pub fn is_valid_arithmetic_imm(val: u64) -> bool {
// (the resulting value will be valid iff 'val' is valid, and the lower 'n' bits will equal val)
pub fn replicate_logical_imm(val: u64, n: usize) -> u64 {
let op_size = if n <= 32 { 32 } else { 64 };
let mut val = val;
let mut new_val = val;
for i in 1..op_size / n {
val |= val << i * n;
new_val |= val << i * n;
}
val
new_val
}
......@@ -1389,6 +1389,15 @@ pub fn match_value_int_imm(op: &P<Value>) -> bool {
_ => false
}
}
pub fn match_value_zero(op: &P<Value>) -> bool {
match op.v {
Value_::Constant(Constant::Int(x)) => x == 0,
Value_::Constant(Constant::NullRef) => true,
_ => false
}
}
pub fn match_value_ref_imm(op: &P<Value>) -> bool {
match op.v {
Value_::Constant(Constant::NullRef) => true,
......@@ -1416,6 +1425,13 @@ pub fn match_node_int_imm(op: &TreeNode) -> bool {
}
}
pub fn match_node_zero(op: &TreeNode) -> bool {
match op.v {
TreeNode_::Value(ref pv) => match_value_zero(pv),
_ => false
}
}
// The only valid ref immediate is a null ref
pub fn match_node_ref_imm(op: &TreeNode) -> bool {
match op.v {
......@@ -1576,6 +1592,21 @@ and arg is the name of the argument that can't be the zero register (do so for e
let arg = replace_zero_register(backend, &arg, f_context, vm);
*/
pub fn replace_unexpected_zero_register(
backend: &mut CodeGenerator,
val: &P<Value>,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
if is_zero_register(&val) {
let temp = make_temporary(f_context, val.ty.clone(), vm);
backend.emit_mov_imm(&temp, 0);
temp
} else {
val.clone()
}
}
pub fn replace_zero_register(
backend: &mut CodeGenerator,
val: &P<Value>,
......@@ -1896,6 +1927,7 @@ fn emit_cmp_u64(
} else if is_valid_arithmetic_imm(val) {
let imm_shift = val > 4096;
let imm_val = if imm_shift { val >> 12 } else { val };
let src1 = replace_unexpected_zero_register(backend, src1, f_context, vm);
backend.emit_cmp_imm(&src1, imm_val as u16, imm_shift);
} else {
let tmp = make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -1919,6 +1951,7 @@ fn emit_cmn_u64(
} else if is_valid_arithmetic_imm(val) {
let imm_shift = val > 4096;
let imm_val = if imm_shift { val >> 12 } else { val };
let src1 = replace_unexpected_zero_register(backend, src1, f_context, vm);
backend.emit_cmn_imm(&src1, imm_val as u16, imm_shift);
} else {
let tmp = make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -1986,61 +2019,12 @@ fn emit_reg_value(
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
match pv.v {
Value_::SSAVar(_) => pv.clone(),
Value_::Constant(ref c) => {
match c {
&Constant::Int(val) => {
/*if val == 0 {
// TODO emit the zero register (NOTE: it can't be used by all instructions)
// Use the zero register (saves having to use a temporary)
get_alias_for_length(XZR.id(), get_bit_size(&pv.ty, vm))
} else {*/
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
debug!("tmp's ty: {}", tmp.ty);
emit_mov_u64(backend, &tmp, val);
tmp
//}
}
&Constant::IntEx(ref val) => {
assert!(val.len() == 2);
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let (tmp_l, tmp_h) = split_int128(&tmp, f_context, vm);
emit_mov_u64(backend, &tmp_l, val[0]);
emit_mov_u64(backend, &tmp_h, val[1]);
tmp
}
&Constant::FuncRef(func_id) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem =
make_value_symbolic(vm.get_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, vm);
tmp
}
&Constant::NullRef => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
backend.emit_mov_imm(&tmp, 0);
tmp
//get_alias_for_length(XZR.id(), get_bit_size(&pv.ty, vm))
}
&Constant::Double(val) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
emit_mov_f64(backend, &tmp, f_context, vm, val);
tmp
}
&Constant::Float(val) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
emit_mov_f32(backend, &tmp, f_context, vm, val);
tmp
}
_ => panic!("expected fpreg or ireg")
}
}
_ => panic!("expected fpreg or ireg")
if is_int_reg(&pv) || is_int_ex_reg(&pv) {
emit_ireg_value(backend, pv, f_context, vm)
} else if is_fp_reg(&pv) {
emit_fpreg_value(backend, pv, f_context, vm)
} else {
unreachable!();
}
}
......@@ -2056,10 +2040,8 @@ pub fn emit_ireg_value(
Value_::Constant(ref c) => {
match c {
&Constant::Int(val) => {
// TODO Deal with zero case
// TODO: Deal with zero case
/*if val == 0 {
// TODO: Are there any (integer) instructions that can't use the Zero reg?
// Use the zero register (saves having to use a temporary)
get_alias_for_length(XZR.id(), get_bit_size(&pv.ty, vm))
} else {*/
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
......@@ -2762,6 +2744,13 @@ fn emit_move_value_to_value(
if src.is_int_const() {
let imm = value_imm_to_u64(src);
emit_mov_u64(backend, dest, imm);
} else if src.is_func_const() {
let func_id = match src.v {
Value_::Constant(Constant::FuncRef(id)) => id,
_ => unreachable!()
};
let mem = make_value_symbolic(vm.get_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &dest, &mem, vm);
} else if is_int_reg(&src) {
backend.emit_mov(dest, src);
} else if src.is_mem() {
......
......@@ -1283,6 +1283,7 @@ struct BundleLoader<'lb, 'lvm> {
built_ref_of: IdPMap<MuType>,
built_iref_of: IdPMap<MuType>,
built_uptr_of: IdPMap<MuType>,
built_strong_variant: IdPMap<MuType>,
built_constint_of: HashMap<u64, P<Value>>,
current_sig: Option<P<MuFuncSig>>,
......@@ -1324,6 +1325,7 @@ fn load_bundle(b: &mut MuIRBuilder) {
built_ref_of: Default::default(),
built_iref_of: Default::default(),
built_uptr_of: Default::default(),
built_strong_variant: Default::default(),
built_constint_of: Default::default(),
current_sig: Default::default(),
current_entry: Default::default(),
......@@ -1389,6 +1391,27 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
impl_refvoid
}
fn ensure_strong_variant(&mut self, ty: &P<MuType>) -> P<MuType> {
if let Some(ref sty) = self.built_strong_variant.get(&ty.id()) {
return (*sty).clone();
}
let sty = match &ty.v {
&MuType_::WeakRef(ref t) => {
let id = self.vm.next_id();
let sty = P(MuType::new(id, MuType_::muref(t.clone())));
self.built_types.insert(id, sty.clone());
sty
},
_ => ty.clone()
};
trace!("Ensure strong variant is defined: {} {:?}", sty.id(), sty);
self.built_strong_variant.insert(ty.id(), sty.clone());
sty
}
fn ensure_refi64(&mut self) -> P<MuType> {
if let Some(ref refi64) = self.built_refi64 {
return refi64.clone();
......@@ -2725,9 +2748,10 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
impl_to_ty.is_double())
}
ConvOp::REFCAST => {
(impl_from_ty.is_ref() && impl_to_ty.is_ref()) ||
(impl_from_ty.is_iref() && impl_to_ty.is_iref()) ||
(impl_from_ty.is_funcref() && impl_to_ty.is_funcref())
(impl_from_ty.is_ref() || impl_from_ty.is_iref() ||
impl_from_ty.is_funcref()) &&
(impl_to_ty.is_ref() || impl_to_ty.is_iref() ||
impl_to_ty.is_funcref())
}
ConvOp::PTRCAST => {
(impl_from_ty.is_ptr() || impl_from_ty.is_int()) &&
......@@ -3296,8 +3320,8 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
let impl_ord = self.build_mem_ord(ord);
let impl_loc = self.get_treenode(fcb, loc);
let impl_rvtype = self.get_built_type(refty);
let impl_rv = self.new_ssa(fcb, result_id, self.vm.make_strong_type(impl_rvtype))
.clone_value();
let impl_actual_rvtype = self.ensure_strong_variant(&impl_rvtype);
let impl_rv = self.new_ssa(fcb, result_id, impl_actual_rvtype).clone_value();
let impl_refty = self.get_built_type(refty);
assert_ir!(impl_ord != MemoryOrder::Release && impl_ord != MemoryOrder::AcqRel);
......
......@@ -77,8 +77,6 @@ pub struct VM {
name_id_map: RwLock<HashMap<MuName, MuID>>, // +64
/// types declared to the VM
types: RwLock<HashMap<MuID, P<MuType>>>, // +120
/// Ref types declared by 'make_strong_type', the key is the ID of the Referant
ref_types: RwLock<HashMap<MuID, P<MuType>>>,
/// types that are resolved as BackendType
backend_type_info: RwLock<HashMap<MuID, Box<BackendType>>>,
/// constants declared to the VM
......@@ -137,7 +135,6 @@ unsafe impl rodal::Dump for VM {
dumper.dump_object(&self.id_name_map);
dumper.dump_object(&self.name_id_map);
dumper.dump_object(&self.types);
dumper.dump_object(&self.ref_types);
dumper.dump_object(&self.backend_type_info);
dumper.dump_object(&self.constants);
dumper.dump_object(&self.globals);
......@@ -232,7 +229,6 @@ impl<'a> VM {
name_id_map: RwLock::new(HashMap::new()),
constants: RwLock::new(HashMap::new()),
types: RwLock::new(HashMap::new()),
ref_types: RwLock::new(HashMap::new()),
backend_type_info: RwLock::new(HashMap::new()),
globals: RwLock::new(HashMap::new()),
global_locations: RwLock::new(hashmap!{}),
......@@ -815,27 +811,6 @@ impl<'a> VM {
}
}
pub fn make_strong_type(&self, ty: P<MuType>) -> P<MuType> {
match &ty.v {
&MuType_::WeakRef(ref t) => {
let res = self.ref_types
.read()
.unwrap()
.get(&t.id())
.map(|x| x.clone());
match res {
Some(ty) => ty,
None => {
let ty = P(MuType::new(self.next_id(), MuType_::muref(t.clone())));
self.ref_types.write().unwrap().insert(t.id(), ty.clone());
ty
}
}
}
_ => ty.clone()
}
}
/// adds a new bundle into VM.
/// This function will drain the contents of all arguments. Ideally, this function should
/// happen atomically. e.g. The client should not see a new type added without also seeing
......
......@@ -178,3 +178,38 @@ def test_double_inline():
RET (a b)
}
""", "test_double_inline");
def test_funcref():
lib = load_bundle(
"""
.funcdef triv<()->()>
{
entry():
RET
}
.funcdef ret_funcref <()->(funcref<()->()>)>
{
entry():
RET triv
}
""", "test_funcref");
def test_load():
lib = load_bundle(
"""
.funcdef load <(iref<weakref<void>>)->(ref<void>)>
{
entry(<iref<weakref<void>>>a):
r = LOAD<weakref<void>> a
RET r
}
""", "test_load");
def test_xor():
lib = load_bundle(
"""
.funcdef xor<(int<8>)->(int<8>)>
{
entry(<int<8>>v):
r = XOR<int<8>> v <int<8>>1
RET r
}
""", "test_xor");
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment