Commit 9c65b72a authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Inline thread_local, fixed bug in 128-bit integer passing, and prevented future bugs with emit_mem.

parent cf845b8c
......@@ -1112,9 +1112,9 @@ impl ASMCodeGen {
result_str.push_str(",");
let n = offset.ty.get_int_length().unwrap();
let shift_type =
// Note: LSL (which is an unsigned extension from 64 bits to 64-bits) is equivelent to SXTX (which is a signed extension from 64-bits to 64-bits, but is encoded differently)
// unfortunently there is a bug in valgrind in that it treats the SXTX version as an illegal instructin, so instead we emit LSL for the 64-bit signed case (instead of SXTX)
if n == 64 { if signed { "LSL" } else { "LSL" } } else if n == 32 { if signed { "SXTW" } else { "UXTW" } } else { panic!("Unexpected size for offset register") };
if n == 64 { if signed { "SXTX" } else { "LSL" } }
else if n == 32 { if signed { "SXTW" } else { "UXTW" } }
else { panic!("Unexpected size for offset register") };
result_str.push_str(&shift_type);
result_str.push_str(" #");
......@@ -1337,6 +1337,23 @@ impl ASMCodeGen {
)
}
fn internal_binop_str(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &str) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {} -> {}", inst, src1, src2, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(src1, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{},#{}", inst, reg1, reg2, src2);
self.add_asm_inst(
asm,
ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id2, vec![loc2]),
false
)
}
// dest <= inst(src1, src2)
fn internal_unop_shift(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
......@@ -1692,7 +1709,6 @@ impl ASMCodeGen {
}
};
trace!("emit: \t{} {} -> {}", inst, src, dest);
let (reg, id, loc) = self.prepare_reg(dest, inst.len() + 1);
......@@ -2125,6 +2141,8 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_add_str(&mut self, dest: Reg, src1: Reg, src2: &str) {self.internal_binop_str("ADD", dest, src1, src2)}
// Pushes a pair of registers on the givne stack (uses the STP instruction)
fn emit_push_pair(&mut self, src1: &P<Value>, src2: &P<Value>, stack: &P<Value>) {
trace!("emit: \tpush_pair {},{} -> {}[-8,-16]", src1, src2, stack);
......@@ -2986,7 +3004,7 @@ pub fn spill_rewrite(
codegen.start_code_sequence();
let spill_mem = emit_mem(&mut codegen, &spill_mem, &mut func.context, vm);
let spill_mem = emit_mem(&mut codegen, &spill_mem, get_type_alignment(&temp.ty, vm), &mut func.context, vm);
codegen.emit_ldr_spill(&temp, &spill_mem);
codegen.finish_code_sequence_asm()
......@@ -3033,7 +3051,7 @@ pub fn spill_rewrite(
let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence();
let spill_mem = emit_mem(&mut codegen, &spill_mem, &mut func.context, vm);
let spill_mem = emit_mem(&mut codegen, &spill_mem, get_type_alignment(&temp.ty, vm), &mut func.context, vm);
codegen.emit_str_spill(&spill_mem, &temp);
codegen.finish_code_sequence_asm()
......
......@@ -37,6 +37,10 @@ pub trait CodeGenerator {
fn emit_frame_grow(&mut self); // Emits a SUB
fn emit_frame_shrink(&mut self); // Emits an ADD
// Used to pass a string that the assembler will interpret as an immediate argument
// (This is neccesary to support the use of ELF relocations like ':tprel_hi12:foo')
fn emit_add_str(&mut self, dest: Reg, src1: Reg, src2: &str);
// stack minimpulation
fn emit_push_pair(&mut self, src1: Reg, src2: Reg, stack: Reg); // Emits a STP
fn emit_pop_pair(&mut self, dest1: Reg, dest2: Reg, stack: Reg); // Emits a LDP
......
......@@ -633,7 +633,7 @@ impl <'a> InstructionSelection {
_ => self.backend.emit_ldar(&res, &temp_loc)
};
} else {
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, f_context, vm);
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, get_type_alignment(&res.ty, vm), f_context, vm);
self.backend.emit_ldr(&res, &temp_loc, false);
}
} else if self.match_ireg_ex(node) {
......@@ -641,7 +641,7 @@ impl <'a> InstructionSelection {
match order {
MemoryOrder::NotAtomic => {
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, f_context, vm);
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, get_type_alignment(&res.ty, vm), f_context, vm);
self.backend.emit_ldp(&res_l, &res_h, &temp_loc);
}
......@@ -735,7 +735,7 @@ impl <'a> InstructionSelection {
_ => self.backend.emit_stlr(&temp_loc, &val)
};
} else {
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, f_context, vm);
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, get_type_alignment(&val.ty, vm), f_context, vm);
self.backend.emit_str(&temp_loc, &val);
}
} else if self.match_ireg_ex(val_op) {
......@@ -743,7 +743,7 @@ impl <'a> InstructionSelection {
match order {
MemoryOrder::NotAtomic => {
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, f_context, vm);
let temp_loc = emit_mem(self.backend.as_mut(), &resolved_loc, 16, f_context, vm);
self.backend.emit_stp(&temp_loc, &val_l, &val_h);
}
......@@ -981,7 +981,7 @@ impl <'a> InstructionSelection {
// emit a call to swap_back_to_native_stack(sp_loc: Address)
// get thread local and add offset to get sp_loc
let tl = self.emit_get_threadlocal(Some(node), f_context, vm);
let tl = self.emit_get_threadlocal(f_context, vm);
self.backend.emit_add_imm(&tl, &tl, *thread::NATIVE_SP_LOC_OFFSET as u16, false);
self.emit_runtime_entry(&entrypoints::SWAP_BACK_TO_NATIVE_STACK, vec![tl.clone()], None, Some(node), f_context, vm);
......@@ -991,7 +991,7 @@ impl <'a> InstructionSelection {
Instruction_::CommonInst_GetThreadLocal => {
trace!("instsel on GETTHREADLOCAL");
// get thread local
let tl = self.emit_get_threadlocal(Some(node), f_context, vm);
let tl = self.emit_get_threadlocal(f_context, vm);
let tmp_res = self.get_result_value(node, 0);
......@@ -1010,7 +1010,7 @@ impl <'a> InstructionSelection {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
// get thread local
let tl = self.emit_get_threadlocal(Some(node), f_context, vm);
let tl = self.emit_get_threadlocal(f_context, vm);
// store tmp_op -> [tl + USER_TLS_OFFSTE]
emit_store_base_offset(self.backend.as_mut(), &tl, *thread::USER_TLS_OFFSET as i64, &tmp_op, f_context, vm);
......@@ -1065,7 +1065,7 @@ impl <'a> InstructionSelection {
let const_size = make_value_int_const(size as u64, vm);
let tmp_allocator = self.emit_get_allocator(node, f_context, vm);
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), const_size, ty_align, node, f_context, vm);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
......@@ -1125,7 +1125,7 @@ impl <'a> InstructionSelection {
}
};
let tmp_allocator = self.emit_get_allocator(node, f_context, vm);
let tmp_allocator = self.emit_get_allocator(f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), actual_size, ty_align, node, f_context, vm);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
......@@ -2416,9 +2416,9 @@ impl <'a> InstructionSelection {
}
}
fn emit_get_allocator(&mut self, node: &TreeNode, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_get_allocator(&mut self, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// ASM: %tl = get_thread_local()
let tmp_tl = self.emit_get_threadlocal(Some(node), f_context, vm);
let tmp_tl = self.emit_get_threadlocal(f_context, vm);
// ASM: lea [%tl + allocator_offset] -> %tmp_allocator
let allocator_offset = *thread::ALLOCATOR_OFFSET;
......@@ -2463,15 +2463,21 @@ impl <'a> InstructionSelection {
}
}
// TODO: Inline this function call (it's like 4 lines of assembly...)
fn emit_get_threadlocal(
&mut self,
cur_node: Option<&TreeNode>,
f_context: &mut FunctionContext,
vm: &VM) -> P<Value> {
let mut rets = self.emit_runtime_entry(&entrypoints::GET_THREAD_LOCAL, vec![], None, cur_node, f_context, vm);
// This generates code identical to (though it may use different registers) the function muentry_get_thread_local
fn emit_get_threadlocal(&mut self, f_context: &mut FunctionContext, vm: &VM) -> P<Value>
{
let tmp = make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
// Read the start address of thread local storage
self.backend.emit_mrs(&tmp, "TPIDR_EL0");
// Add the offset of mu_tls
self.backend.emit_add_str(&tmp, &tmp, ":tprel_hi12:mu_tls");
self.backend.emit_add_str(&tmp, &tmp, ":tprel_lo12_nc:mu_tls");
rets.pop().unwrap()
// Load tmp with the value of mu_tls
emit_load(self.backend.as_mut(), &tmp, &tmp, f_context, vm);
tmp
}
// ret: Option<Vec<P<Value>>
......@@ -2652,7 +2658,7 @@ impl <'a> InstructionSelection {
let i = i as usize;
let t = if reference[i] { P(MuType::new(new_internal_id(), MuType_::IRef(arg_types[i].clone()))) } else { arg_types[i].clone() };
let size = round_up(vm.get_type_size(t.id()), 8);
let align = vm.get_backend_type_info(t.id()).alignment;
let align = get_type_alignment(&t, vm);
match t.v {
Hybrid(_) => panic!("hybrid argument not supported"),
......@@ -2774,6 +2780,7 @@ impl <'a> InstructionSelection {
_ => {
// Need to pass in two registers
if is_int_ex_reg(&arg_val) && arg_loc.is_reg() {
let arg_val = emit_reg_value(self.backend.as_mut(), &arg_val, f_context, vm);
let (val_l, val_h) = split_int128(&arg_val, f_context, vm);
let arg_loc_h = get_register_from_id(arg_loc.id() + 2);
......@@ -3277,7 +3284,7 @@ impl <'a> InstructionSelection {
trace!("allocate frame slot for reg {}", reg);
let loc = self.current_frame.as_mut().unwrap().alloc_slot_for_callee_saved_reg(reg.clone(), vm);
let loc = emit_mem(self.backend.as_mut(), &loc, f_context, vm);
let loc = emit_mem(self.backend.as_mut(), &loc, get_type_alignment(&reg.ty, vm), f_context, vm);
self.backend.emit_str_callee_saved(&loc, &reg);
}
for i in 0..CALLEE_SAVED_GPRs.len() {
......@@ -3285,8 +3292,9 @@ impl <'a> InstructionSelection {
trace!("allocate frame slot for regs {}", reg);
let loc = self.current_frame.as_mut().unwrap().alloc_slot_for_callee_saved_reg(reg.clone(), vm);
let loc = emit_mem(self.backend.as_mut(), &loc, f_context, vm);
let loc = emit_mem(self.backend.as_mut(), &loc, get_type_alignment(&reg.ty, vm), f_context, vm);
self.backend.emit_str_callee_saved(&loc, &reg);
}
// unload arguments
......@@ -3361,7 +3369,7 @@ impl <'a> InstructionSelection {
let ref reg = CALLEE_SAVED_GPRs[i];
let reg_id = reg.extract_ssa_id().unwrap();
let loc = self.current_frame.as_mut().unwrap().allocated.get(&reg_id).unwrap().make_memory_op(reg.ty.clone(), vm);
let loc = emit_mem(self.backend.as_mut(), &loc, f_context, vm);
let loc = emit_mem(self.backend.as_mut(), &loc, get_type_alignment(&reg.ty, vm), f_context, vm);
self.backend.emit_ldr_callee_saved(reg, &loc);
}
for i in (0..CALLEE_SAVED_FPRs.len()).rev() {
......@@ -3369,7 +3377,7 @@ impl <'a> InstructionSelection {
let reg_id = reg.extract_ssa_id().unwrap();
let loc = self.current_frame.as_mut().unwrap().allocated.get(&reg_id).unwrap().make_memory_op(reg.ty.clone(), vm);
let loc = emit_mem(self.backend.as_mut(), &loc, f_context, vm);
let loc = emit_mem(self.backend.as_mut(), &loc, get_type_alignment(&reg.ty, vm), f_context, vm);
self.backend.emit_ldr_callee_saved(reg, &loc);
}
......@@ -3453,16 +3461,11 @@ impl <'a> InstructionSelection {
let mut imm_val = 0 as u64;
// Is one of the arguments a valid immediate?
let emit_imm = if match_node_int_imm(&op2) {
imm_val = node_imm_to_u64(&op2);
if op.is_signed() {
imm_val = get_signed_value(imm_val, n) as u64;
}
imm_val = node_imm_to_i64(&op2, op.is_signed());
is_valid_arithmetic_imm(imm_val)
} else if match_node_int_imm(&op1) {
imm_val = node_imm_to_u64(&op1);
if op.is_signed() {
imm_val = get_signed_value(imm_val, n) as u64;
}
imm_val = node_imm_to_i64(&op1, op.is_signed());
// if op1 is a valid immediate, swap it with op2
if is_valid_arithmetic_imm(imm_val) {
std::mem::swap(&mut op1, &mut op2);
......@@ -4018,7 +4021,7 @@ impl <'a> InstructionSelection {
fn emit_landingpad(&mut self, exception_arg: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
// get thread local and add offset to get exception_obj
let tl = self.emit_get_threadlocal(None, f_context, vm);
let tl = self.emit_get_threadlocal(f_context, vm);
emit_load_base_offset(self.backend.as_mut(), exception_arg, &tl, *thread::EXCEPTION_OBJ_OFFSET as i64, f_context, vm);
}
......
// TODO: CHECK THAT THE TYPE OF EVERY MEMORY LOCATION HAS THE CORRECT SIZE
// (the size should be size of the area in memory that it is referring to, and will indicate
// how much data any load/store instructions that uses it will operate on
// (so it should be [1], 8, 16, 32, 64, or 128 bits in size (when using emit_mem, it can have other sizes before this))
#![allow(non_upper_case_globals)]
// TODO: Move architecture independent codes in here, inst_sel and asm_backend to somewhere else...
......@@ -284,6 +290,12 @@ pub fn get_bit_size(ty : &P<MuType>, vm: &VM) -> usize
}
}
#[inline(always)]
pub fn get_type_alignment(ty: &P<MuType>, vm: &VM) -> usize
{
vm.get_backend_type_info(ty.id()).alignment
}
#[inline(always)]
pub fn primitive_byte_size(ty : &P<MuType>) -> usize
{
......@@ -978,12 +990,12 @@ pub fn is_valid_logical_imm(val : u64, n : usize) -> bool {
return true;
}
// Returns the value of 'val' truncated to 'size', interpreted as an unsigned integer
// Returns the value of 'val' truncated to 'size', and then zero extended
pub fn get_unsigned_value(val: u64, size: usize) -> u64 {
(val & bits_ones(size)) as u64 // clears all but the lowest 'size' bits of val
}
// Returns the value of 'val' truncated to 'size', interpreted as a signed integer
// Returns the value of 'val' truncated to 'size', and then sign extended
pub fn get_signed_value(val: u64, size: usize) -> i64 {
if size == 64 {
val as i64
......@@ -1105,12 +1117,12 @@ pub fn bits_ones(n: usize) -> u64 {
#[inline(always)]
pub fn is_valid_immediate_offset(val: i64, n : usize) -> bool {
use std;
let n_align = std::cmp::max(n, 8);
let n_align = std::cmp::max(n, 8);
if n <= 8 {
(val >= -(1 << 8) && val < (1 << 8)) || // Valid 9 bit signed unscaled offset
// Valid unsigned 12-bit scalled offset
(val >= 0 && (val as u64) % (n_align as u64) == 0 && ((val as u64) / (n_align as u64) < (1 << 12)))
} else { // Will use a load-pair instead
} else { // Will be using a load/store-pair
// Is val a signed 7 bit multiple of n_align
(val as u64) % (n_align as u64) == 0 && ((val as u64)/(n_align as u64) < (1 << 7))
}
......@@ -1270,6 +1282,18 @@ pub fn node_imm_to_u64(op: &TreeNode) -> u64 {
_ => panic!("expected imm")
}
}
pub fn node_imm_to_i64(op: &TreeNode, signed: bool) -> u64 {
match op.v {
TreeNode_::Value(ref pv) => value_imm_to_i64(pv, signed),
_ => panic!("expected imm")
}
}
pub fn node_imm_to_s64(op: &TreeNode) -> i64 {
match op.v {
TreeNode_::Value(ref pv) => value_imm_to_s64(pv),
_ => panic!("expected imm")
}
}
pub fn node_imm_to_f64(op: &TreeNode) -> f64 {
match op.v {
......@@ -1321,7 +1345,20 @@ pub fn value_imm_to_u64(op: &P<Value>) -> u64 {
}
}
pub fn value_imm_to_i64(op: &P<Value>) -> i64 {
pub fn value_imm_to_i64(op: &P<Value>, signed: bool) -> u64 {
match op.v {
Value_::Constant(Constant::Int(val)) =>
if signed {
get_signed_value(val as u64, op.ty.get_int_length().unwrap()) as u64
} else {
get_unsigned_value(val as u64, op.ty.get_int_length().unwrap())
},
Value_::Constant(Constant::NullRef) => 0,
_ => panic!("expected imm int")
}
}
pub fn value_imm_to_s64(op: &P<Value>) -> i64 {
match op.v {
Value_::Constant(Constant::Int(val)) =>
get_signed_value(val as u64, op.ty.get_int_length().unwrap()),
......@@ -1594,6 +1631,26 @@ fn emit_add_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_
}
}
// dest = src1*val + src2
fn emit_madd_u64(backend: &mut CodeGenerator, dest: &P<Value>, src1: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64, src2: &P<Value>)
{
if val == 0 {
// dest = src2
backend.emit_mov(&dest, &src2);
} else if val == 1 {
// dest = src1 + src2
backend.emit_add(&dest, &src1, &src2);
} else if val.is_power_of_two() {
// dest = src1 << log2(val) + src2
backend.emit_lsl_imm(&dest, &src1, log2(val as u64) as u8);
backend.emit_add(&dest, &dest, &src2);
} else {
// dest = src1 * val + src2
let temp_mul = make_temporary(f_context, src1.ty.clone(), vm);
emit_mov_u64(backend, &temp_mul, val as u64);
backend.emit_madd(&dest, &src1, &temp_mul, &src2);
}
}
// Compare register with value
fn emit_cmp_u64(backend: &mut CodeGenerator, src1: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64)
{
......@@ -1842,8 +1899,7 @@ pub fn emit_ireg_ex_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context:
}
}
pub fn emit_mem(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let n = vm.get_backend_type_info(pv.ty.id()).alignment;
pub fn emit_mem(backend: &mut CodeGenerator, pv: &P<Value>, alignment: usize, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
match pv.v {
Value_::Memory(ref mem) => {
match mem {
......@@ -1853,14 +1909,15 @@ pub fn emit_mem(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Func
if offset.is_some() {
let offset = offset.as_ref().unwrap();
if match_value_int_imm(offset) {
let mut offset_val = value_imm_to_i64(offset);
let mut offset_val = value_imm_to_i64(offset, signed) as i64;
offset_val *= scale as i64;
if is_valid_immediate_offset(offset_val, n) {
if is_valid_immediate_offset(offset_val, alignment) {
Some(make_value_int_const(offset_val as u64, vm))
} else if n <= 8 {
let offset = make_temporary(f_context, UINT64_TYPE.clone(), vm);
emit_mov_u64(backend, &offset, offset_val as u64);
Some(offset)
} else if alignment <= 8 {
let offset = make_temporary(f_context, UINT64_TYPE.clone(), vm);
emit_mov_u64(backend, &offset, offset_val as u64);
Some(offset)
} else {
// We will be using a store/load pair which dosn't support register offsets
return emit_mem_base(backend, &pv, f_context, vm);
......@@ -1869,7 +1926,7 @@ pub fn emit_mem(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Func
let offset = emit_ireg_value(backend, offset, f_context, vm);
// TODO: If scale == (2^n)*m (for some m), set shift = n, and multiply index by m
if !is_valid_immediate_scale(scale, n) {
if !is_valid_immediate_scale(scale, alignment) {
let temp = make_temporary(f_context, offset.ty.clone(), vm);
emit_mul_u64(backend, &temp, &offset, f_context, vm, scale);
......@@ -1917,7 +1974,11 @@ pub fn emit_mem(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Func
_ => pv.clone()
}
}
_ => panic!("expected memory")
_ => // Use the value as the base registers
{
let tmp_mem = make_value_base_offset(&pv, 0, &pv.ty, vm);
emit_mem(backend, &tmp_mem, alignment, f_context, vm)
}
}
}
......@@ -1931,7 +1992,7 @@ fn emit_mem_base(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fun
if offset.is_some() {
let offset = offset.as_ref().unwrap();
if match_value_int_imm(offset) {
let offset_val = value_imm_to_i64(offset);
let offset_val = value_imm_to_i64(offset, signed) as i64;
if offset_val == 0 {
base.clone() // trivial
} else {
......@@ -2011,7 +2072,11 @@ fn emit_mem_base(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fun
})
})
}
_ => panic!("expected memory")
_ => // Use the value as the base register
{
let tmp_mem = make_value_base_offset(&pv, 0, &pv.ty, vm);
emit_mem_base(backend, &tmp_mem, f_context, vm)
}
}
}
......@@ -2063,8 +2128,20 @@ pub fn emit_addr_sym(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value
}
fn emit_calculate_address(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
let src = emit_mem(backend, &src, f_context, vm);
match src.v {
Value_::Memory(MemoryLocation::VirtualAddress{ref base, ref offset, scale, signed}) => {
if offset.is_some() {
let offset = offset.as_ref().unwrap();
if match_value_int_imm(offset) {
emit_add_u64(backend, &dest, &base, f_context, vm, ((value_imm_to_i64(offset, signed) as i64)*(scale as i64)) as u64);
} else {
// dest = offset * scale + base
emit_madd_u64(backend, &dest, &offset, f_context, vm, scale as u64, &base);
}
} else {
backend.emit_mov(&dest, &base)
}
}
// offset(base,index,scale)
Value_::Memory(MemoryLocation::Address{ref base, ref offset, shift, signed}) => {
if offset.is_some() {
......@@ -2192,7 +2269,7 @@ fn memory_location_shift(backend: &mut CodeGenerator, mem: MemoryLocation, more_
// Returns a memory location that points to 'Base + offset*scale + more_offset*new_scale'
fn memory_location_shift_scale(backend: &mut CodeGenerator, mem: MemoryLocation, more_offset: &P<Value>, new_scale: u64, f_context: &mut FunctionContext, vm: &VM) -> MemoryLocation {
if match_value_int_imm(&more_offset) {
let more_offset = value_imm_to_i64(&more_offset);
let more_offset = value_imm_to_s64(&more_offset);
memory_location_shift(backend, mem, more_offset * (new_scale as i64), f_context, vm)
} else {
let mut new_scale = new_scale;
......@@ -2353,7 +2430,7 @@ fn emit_move_value_to_value(backend: &mut CodeGenerator, dest: &P<Value>, src: &
}
fn emit_load(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
let src = emit_mem(backend, &src, f_context, vm);
let src = emit_mem(backend, &src, get_type_alignment(&dest.ty, vm), f_context, vm);
if is_int_reg(dest) || is_fp_reg(dest) {
backend.emit_ldr(&dest, &src, false);
} else if is_int_ex_reg(dest) {
......@@ -2366,7 +2443,7 @@ fn emit_load(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_con
}
fn emit_store(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
let dest = emit_mem(backend, &dest, f_context, vm);
let dest = emit_mem(backend, &dest, get_type_alignment(&src.ty, vm), f_context, vm);
if is_int_reg(src) || is_fp_reg(src) {
backend.emit_str(&dest, &src);
} else if is_int_ex_reg(src) {
......@@ -2385,7 +2462,6 @@ fn emit_load_base_offset(backend: &mut CodeGenerator, dest: &P<Value>, base: &P<
fn emit_store_base_offset(backend: &mut CodeGenerator, base: &P<Value>, offset: i64, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
let mem = make_value_base_offset(base, offset, &src.ty, vm);
let mem = emit_mem(backend, &mem, f_context, vm);
emit_store(backend, &mem, src, f_context, vm);
}
......
......@@ -186,7 +186,7 @@ impl FrameSlot {
base: aarch64::FP.clone(),
offset: Some(Value::make_int_const(vm.next_id(), self.offset as u64)),
scale: 1,
signed: false
signed: true
}
)
})
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment