Commit b1f44703 authored by Yi Lin's avatar Yi Lin

[wip] pass compilation on aarch64, cannot run though

parent c9b13533
......@@ -34,7 +34,7 @@ gcc = "*"
ast = {path = "src/ast"}
utils = {path = "src/utils"}
gc = {path = "src/gc"}
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "*" }
libc="*"
field-offset = "*"
libloading = "*"
......
......@@ -25,4 +25,4 @@ utils = {path = "../utils"}
lazy_static = "*"
log = "*"
simple_logger = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "*" }
......@@ -1128,6 +1128,10 @@ pub enum MemoryLocation {
}
}
#[cfg(target_arch = "aarch64")]
rodal_enum!(MemoryLocation{{VirtualAddress: base, offset, signed, scale},
{Address: base, offset, shift, signed}, {Symbolic: label, is_global}});
#[cfg(target_arch = "aarch64")]
impl fmt::Display for MemoryLocation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -1323,4 +1327,4 @@ impl MuEntity for TreeNode {
TreeNode_::Value(ref pv) => pv.as_entity()
}
}
}
\ No newline at end of file
}
......@@ -2845,13 +2845,13 @@ pub fn emit_context_with_reloc(vm: &VM,
file.write_fmt(format_args!("{}:\n", dump_label)).unwrap();
let base = obj_dump.reference_addr;
let end = obj_dump.mem_start.plus(obj_dump.mem_size);
let end = obj_dump.mem_start + obj_dump.mem_size;
assert!(base.is_aligned_to(POINTER_SIZE));
let mut offset = 0;
while offset < obj_dump.mem_size {
let cur_addr = base.plus(offset);
let cur_addr = base + offset;
if obj_dump.reference_offsets.contains(&offset) {
// write ref with label
......@@ -2874,7 +2874,7 @@ pub fn emit_context_with_reloc(vm: &VM,
file.write_fmt(format_args!(".xword {}\n", label.clone())).unwrap();
} else {
// write plain word (as bytes)
let next_word_addr = cur_addr.plus(POINTER_SIZE);
let next_word_addr = cur_addr + POINTER_SIZE;
if next_word_addr <= end {
write_data_bytes(&mut file, cur_addr, next_word_addr);
......@@ -2930,7 +2930,7 @@ fn write_data_bytes(f: &mut File, from: Address, to: Address) {
let byte = unsafe {cursor.load::<u8>()};
f.write_fmt(format_args!("0x{:x}", byte)).unwrap();
cursor = cursor.plus(1);
cursor = cursor + 1 as ByteSize;
if cursor != to {
f.write(",".as_bytes()).unwrap();
......
......@@ -2617,7 +2617,7 @@ impl <'a> InstructionSelection {
fn emit_alloc_sequence(&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
if size.is_int_const() {
// size known at compile time, we can choose to emit alloc_small or large now
let size_i = size.extract_int_const();
let size_i = size.extract_int_const().unwrap();
if size_i + OBJECT_HEADER_SIZE as u64 > mm::LARGE_OBJECT_THRESHOLD as u64 {
self.emit_alloc_sequence_large(tmp_allocator, size, align, node, f_context, vm)
......@@ -2804,7 +2804,7 @@ impl <'a> InstructionSelection {
if hfa_n > 0 {
let mut res = vec![get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(&t, vm)/hfa_n)];
for i in 1..hfa_n {
res.push(get_alias_for_length(RETURN_FPR[i].id(), get_bit_size(&t, vm)/hfa_n));
res.push(get_alias_for_length(RETURN_FPRS[i].id(), get_bit_size(&t, vm)/hfa_n));
}
res
} else if size <= 8 {
......@@ -2823,7 +2823,7 @@ impl <'a> InstructionSelection {
Void => vec![], // Nothing to return
Int(128) => // Return in 2 GPRs
vec![RETURN_GPRs[0].clone(), RETURN_GPRs[0].clone()],
vec![RETURN_GPRS[0].clone(), RETURN_GPRS[0].clone()],
// Integral or pointer type
_ =>
......@@ -2965,7 +2965,7 @@ impl <'a> InstructionSelection {
ngrn = round_up(ngrn, 2); // align NGRN to the next even number
if ngrn < 7 {
locations.push(ARGUMENT_GPRs[ngrn].clone());
locations.push(ARGUMENT_GPRS[ngrn].clone());
ngrn += 2;
} else {
ngrn = 8;
......@@ -3286,8 +3286,13 @@ impl <'a> InstructionSelection {
// record exception block (CCall may have an exception block)
if cur_node.is_some() {
let cur_node = cur_node.unwrap();
if cur_node.op == OpCode::CCall {
unimplemented!()
match cur_node.v {
TreeNode_::Instruction(Instruction {v: Instruction_::CCall{..}, ..}) => {
unimplemented!()
}
_ => {
// wont have an exception branch, ignore
}
}
}
}
......@@ -3526,8 +3531,8 @@ impl <'a> InstructionSelection {
let loc = emit_mem(self.backend.as_mut(), &loc, get_type_alignment(&reg.ty, vm), f_context, vm);
self.backend.emit_str_callee_saved(&loc, &reg);
}
for i in 0..CALLEE_SAVED_FPRs.len() {
let ref reg = CALLEE_SAVED_FPRs[i];
for i in 0..CALLEE_SAVED_FPRS.len() {
let ref reg = CALLEE_SAVED_FPRS[i];
trace!("allocate frame slot for reg {}", reg);
let loc = self.current_frame.as_mut().unwrap().alloc_slot_for_callee_saved_reg(reg.clone(), vm);
......@@ -3669,7 +3674,7 @@ impl <'a> InstructionSelection {
match inst.v {
Instruction_::CmpOp(op, op1, ..) => {
if op::is_int_cmp(op) {
if op.is_int_cmp() {
node_type(&ops[op1]).get_int_length().unwrap() == 128 &&
!op.is_symmetric()
} else {
......
......@@ -44,6 +44,7 @@ use ast::op;
use compiler::backend::RegGroup;
use vm::VM;
use utils::ByteSize;
use utils::LinkedHashMap;
use std::collections::HashMap;
......@@ -668,7 +669,7 @@ lazy_static! {
// put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_MACHINE_REGs : Vec<P<Value>> = vec![
pub static ref ALL_USABLE_MACHINE_REGS : Vec<P<Value>> = vec![
X0.clone(),
X1.clone(),
X2.clone(),
......@@ -788,13 +789,13 @@ pub fn get_previous_frame_pointer(frame_pointer: Address) -> Address {
// Gets the return address for the current frame pointer
#[inline(always)]
pub fn get_return_address(frame_pointer: Address) -> Address {
unsafe { frame_pointer.plus(8).load::<Address>() }
unsafe { (frame_pointer + 8 as ByteSize).load::<Address>() }
}
// Gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address) -> Address {
frame_pointer.plus(16)
frame_pointer + 16 as ByteSize
}
#[inline(always)]
......@@ -805,16 +806,16 @@ pub fn set_previous_frame_pointer(frame_pointer: Address, value: Address) {
// Gets the return address for the current frame pointer
#[inline(always)]
pub fn set_return_address(frame_pointer: Address, value: Address) {
unsafe { frame_pointer.plus(8).store::<Address>(value) }
unsafe { (frame_pointer + 8 as ByteSize).store::<Address>(value) }
}
// Reg should be a 64-bit callee saved GPR or FPR
pub fn get_callee_saved_offset(reg: MuID) -> isize {
debug_assert!(is_callee_saved(reg));
let id = if reg < FPR_ID_START {
(reg - CALLEE_SAVED_GPRs[0].id())/2
(reg - CALLEE_SAVED_GPRS[0].id())/2
} else {
(reg - CALLEE_SAVED_FPRs[0].id()) / 2 + CALLEE_SAVED_GPRs.len()
(reg - CALLEE_SAVED_FPRS[0].id()) / 2 + CALLEE_SAVED_GPRS.len()
};
(id as isize + 1)*(-8)
}
......@@ -1862,7 +1863,7 @@ fn emit_reg_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fu
&Constant::FuncRef(func_id) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem = make_value_symbolic(vm.get_func_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
let mem = make_value_symbolic(vm.get_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, f_context, vm);
tmp
},
......@@ -1922,7 +1923,7 @@ pub fn emit_ireg_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &m
&Constant::FuncRef(func_id) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem = make_value_symbolic(vm.get_func_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
let mem = make_value_symbolic(vm.get_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, f_context, vm);
tmp
},
......@@ -2317,7 +2318,7 @@ fn memory_location_shift(backend: &mut CodeGenerator, mem: MemoryLocation, more_
if offset.is_some() {
let offset = offset.as_ref().unwrap();
if match_value_int_imm(&offset) {
let offset = offset.extract_int_const()*scale + (more_offset as u64);
let offset = offset.extract_int_const().unwrap() *scale + (more_offset as u64);
make_value_int_const(offset as u64, vm)
} else {
let offset = emit_ireg_value(backend, &offset, f_context, vm);
......@@ -2372,7 +2373,7 @@ fn memory_location_shift_scale(backend: &mut CodeGenerator, mem: MemoryLocation,
let offset = offset.as_ref().unwrap();
if match_value_int_imm(&offset) {
let temp = make_temporary(f_context, offset.ty.clone(), vm);
let offset_scaled = (offset.extract_int_const() as i64)*(scale as i64);
let offset_scaled = (offset.extract_int_const().unwrap() as i64)*(scale as i64);
if offset_scaled % (new_scale as i64) == 0 {
emit_add_u64(backend, &temp, &more_offset, f_context, vm, (offset_scaled / (new_scale as i64)) as u64);
// new_scale*temp = (more_offset + (offset*scale)/new_scale)
......
......@@ -182,9 +182,9 @@ pub use compiler::backend::aarch64::emit_context_with_reloc;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::spill_rewrite;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
pub use compiler::backend::aarch64::ARGUMENT_GPRS;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
pub use compiler::backend::aarch64::ARGUMENT_FPRS;
use vm::VM;
use ast::types::*;
......@@ -497,4 +497,4 @@ impl RegGroup {
pub fn get_from_value(val: &P<Value>) -> RegGroup {
RegGroup::get_from_ty(&val.ty)
}
}
\ No newline at end of file
}
......@@ -38,4 +38,4 @@ simple_logger = "*"
aligned_alloc = "*"
crossbeam = "*"
field-offset = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "*" }
......@@ -24,6 +24,6 @@ crate-type = ["rlib"]
memmap = "*"
memsec = "0.1.9"
byteorder = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "*" }
log = "*"
doubly = "1.1.3"
\ No newline at end of file
doubly = "1.1.3"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment