WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 4e35ae29 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Re added deleted tests.

Added support for generation of unscaled immediate memory offsets.
Fixed compilation errors.
Modified calling of the c 'exit' function in tests so that it will expect a int<64> instead of an int<32>.
Modified formating of various IR things (like Value's and Instructions) to be more easillly readable.
parent 93c4cc59
......@@ -7,7 +7,7 @@ Cargo.lock
*.log
*.DS_Store
*.swp
.idea
.idea
*.pyc
*.o
*.dylib
......
......@@ -576,8 +576,8 @@ pub struct Destination {
impl Destination {
fn debug_str(&self, ops: &Vec<P<TreeNode>>) -> String {
let mut ret = format!("{} with ", self.target);
ret.push('[');
let mut ret = format!("{}", self.target);
ret.push('(');
for i in 0..self.args.len() {
let ref arg = self.args[i];
ret.push_str(arg.debug_str(ops).as_str());
......@@ -585,7 +585,7 @@ impl Destination {
ret.push_str(", ");
}
}
ret.push(']');
ret.push(')');
ret
}
......
......@@ -667,7 +667,7 @@ impl fmt::Display for TreeNode {
match self.v {
TreeNode_::Value(ref pv) => pv.fmt(f),
TreeNode_::Instruction(ref inst) => {
write!(f, "+({})", inst)
write!(f, "{}", inst)
}
}
}
......@@ -777,16 +777,16 @@ impl fmt::Display for Value {
if DISPLAY_TYPE {
match self.v {
Value_::SSAVar(_) => {
write!(f, "+({} %{})", self.ty, self.hdr)
write!(f, "{}(%{})", self.ty, self.hdr)
},
Value_::Constant(ref c) => {
write!(f, "+({} {} @{})", self.ty, c, self.hdr)
write!(f, "{}({})", self.ty, c)
},
Value_::Global(ref ty) => {
write!(f, "+(GLOBAL {} @{})", ty, self.hdr)
write!(f, "{}(@{})", ty, self.hdr)
},
Value_::Memory(ref mem) => {
write!(f, "+(MEM {} %{})", mem, self.hdr)
write!(f, "{}(%{})", mem, self.hdr)
}
}
} else {
......@@ -798,10 +798,10 @@ impl fmt::Display for Value {
write!(f, "{}", c)
},
Value_::Global(_) => {
write!(f, "GLOBAL @{}", self.hdr)
write!(f, "@{}", self.hdr)
},
Value_::Memory(ref mem) => {
write!(f, "MEM {} %{}", mem, self.hdr)
write!(f, "{}(%{})", mem, self.hdr)
}
}
}
......@@ -1170,15 +1170,28 @@ impl PartialEq for MuEntityHeader {
}
}
const DISPLAY_ID : bool = false;
impl fmt::Display for MuEntityHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.name().is_none() {
write!(f, "UNNAMED #{}", self.id)
if DISPLAY_ID {
if self.name().is_none() {
write!(f, "UNAMED #{}", self.id)
} else {
if PRINT_ABBREVIATE_NAME {
write!(f, "{} #{}", self.abbreviate_name().unwrap(), self.id)
} else {
write!(f, "{} #{}", self.name().unwrap(), self.id)
}
}
} else {
if PRINT_ABBREVIATE_NAME {
write!(f, "{} #{}", self.abbreviate_name().unwrap(), self.id)
if self.name().is_none() {
write!(f, "{}", self.id)
} else {
write!(f, "{} #{}", self.name().unwrap(), self.id)
if PRINT_ABBREVIATE_NAME {
write!(f, "{}", self.abbreviate_name().unwrap())
} else {
write!(f, "{}", self.name().unwrap())
}
}
}
}
......
#![allow(unused_variables)]
use compiler::backend::AOT_EMIT_CONTEXT_FILE;
use compiler::backend::RegGroup;
use utils::ByteSize;
use utils::Address;
use utils::POINTER_SIZE;
use compiler::backend::aarch64;
use compiler::backend::aarch64::*;
use compiler::backend::{Reg, Mem};
......@@ -1163,7 +1161,7 @@ impl ASMCodeGen {
op.extract_ssa_id().unwrap()
}
fn aarch64_prepare_mem(&self, op: &P<Value>, loc: usize) -> (String, LinkedHashMap<MuID, Vec<ASMLocation>>) {
fn prepare_mem(&self, op: &P<Value>, loc: usize) -> (String, LinkedHashMap<MuID, Vec<ASMLocation>>) {
if cfg!(debug_assertions) {
match op.v {
Value_::Memory(_) => {},
......@@ -1287,10 +1285,10 @@ impl ASMCodeGen {
self.cur.take().unwrap()
}
fn emit_ldr_spill(&mut self, dest: Reg, src: Mem) { self.aarch64_internal_load("LDR", dest, src, false, true); }
fn emit_str_spill(&mut self, dest: Mem, src: Reg) { self.aarch64_internal_store("STR", dest, src, true) }
fn emit_ldr_spill(&mut self, dest: Reg, src: Mem) { self.internal_load("LDR", dest, src, false, true); }
fn emit_str_spill(&mut self, dest: Mem, src: Reg) { self.internal_store("STR", dest, src, true) }
fn aarch64_internal_simple(&mut self, inst: &str) {
fn internal_simple(&mut self, inst: &str) {
let inst = inst.to_string();
trace!("emit: \t{}", inst);
......@@ -1304,7 +1302,7 @@ impl ASMCodeGen {
)
}
fn aarch64_internal_simple_imm(&mut self, inst: &str, val: u64) {
fn internal_simple_imm(&mut self, inst: &str, val: u64) {
let inst = inst.to_string();
trace!("emit: \t{} {}", inst, val);
......@@ -1318,7 +1316,7 @@ impl ASMCodeGen {
)
}
fn aarch64_internal_simple_str(&mut self, inst: &str, option: &str) {
fn internal_simple_str(&mut self, inst: &str, option: &str) {
let inst = inst.to_string();
let option = option.to_string();
trace!("emit: \t{} {}", inst, option);
......@@ -1334,7 +1332,7 @@ impl ASMCodeGen {
}
#[warn(unused_variables)] // A system instruction
fn aarch64_internal_system(&mut self, inst: &str, option: &str, src: &P<Value>) {
fn internal_system(&mut self, inst: &str, option: &str, src: &P<Value>) {
let inst = inst.to_string();
let option = option.to_string();
trace!("emit: \t{} {} {}", inst, option, src);
......@@ -1346,12 +1344,12 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
aarch64_ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id1, vec![loc1]),
false
)
}
fn aarch64_internal_branch_op(&mut self, inst: &str, src: &P<Value>, dest_name: MuName) {
fn internal_branch_op(&mut self, inst: &str, src: &P<Value>, dest_name: MuName) {
trace!("emit: \t{} {}, {}", inst, src, dest_name);
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
......@@ -1360,7 +1358,7 @@ impl ASMCodeGen {
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{ id1 => vec![loc1]}, false, ASMBranchTarget::Conditional(dest_name), None);
}
fn aarch64_internal_branch_op_imm(&mut self, inst: &str, src1: &P<Value>, src2: u8, dest_name: MuName) {
fn internal_branch_op_imm(&mut self, inst: &str, src1: &P<Value>, src2: u8, dest_name: MuName) {
trace!("emit: \t{} {},{},{}", inst, src1, src2, dest_name);
let (reg1, id1, loc1) = self.prepare_reg(src1, inst.len() + 1);
......@@ -1370,7 +1368,7 @@ impl ASMCodeGen {
}
#[warn(unused_variables)] // Same as inetnral_binop except extends the second source register
fn aarch64_internal_binop_ext(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, signed : bool, shift: u8) {
fn internal_binop_ext(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, signed : bool, shift: u8) {
let inst = inst.to_string();
let ext_s = if signed { "S" } else { "U" };
let ext_p = match src2.ty.get_int_length() {
......@@ -1396,13 +1394,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_create_hash_map(vec![(id2, loc2), (id3, loc3)]),
ignore_zero_register(id1, vec![loc1]),
create_hash_map(vec![(id2, loc2), (id3, loc3)]),
false
)
}
fn aarch64_internal_binop_imm(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: u64, shift: u8) {
fn internal_binop_imm(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: u64, shift: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {} LSL {} -> {}", inst, src1, src2, shift, dest);
......@@ -1417,14 +1415,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_ignore_zero_register(id2, vec![loc2]),
ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id2, vec![loc2]),
false
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
fn aarch64_internal_unop_shift(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, shift: &str, amount: u8) {
fn internal_unop_shift(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {} {} -> {}", inst, src, shift, amount, dest);
......@@ -1435,14 +1433,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_ignore_zero_register(id2, vec![loc2]),
ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id2, vec![loc2]),
false
)
}
#[warn(unused_variables)] // dest <= inst(src)
fn aarch64_internal_unop(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
fn internal_unop(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {} -> {}", inst, src, dest);
......@@ -1453,14 +1451,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_ignore_zero_register(id2, vec![loc2]),
ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id2, vec![loc2]),
false
)
}
// Note: different instructions have different allowed src values
fn aarch64_internal_unop_imm(&mut self, inst: &str, dest: &P<Value>, src: u64, shift: u8) {
fn internal_unop_imm(&mut self, inst: &str, dest: &P<Value>, src: u64, shift: u8) {
debug_assert!(shift == 0 || shift == 16 || shift == 32 || shift == 48);
let inst = inst.to_string();
trace!("emit: \t{} {} LSL {} -> {}", inst, src, shift, dest);
......@@ -1474,7 +1472,7 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id1, vec![loc1]),
linked_hashmap!{},
false
)
......@@ -1482,7 +1480,7 @@ impl ASMCodeGen {
#[warn(unused_variables)] // dest <= inst(src1, src2)
fn aarch64_internal_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>) {
fn internal_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {} -> {}", inst, src1, src2, dest);
......@@ -1494,14 +1492,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_create_hash_map(vec![(id2, loc2), (id3, loc3)]),
ignore_zero_register(id1, vec![loc1]),
create_hash_map(vec![(id2, loc2), (id3, loc3)]),
false
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
fn aarch64_internal_binop_shift(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, shift: &str, amount: u8) {
fn internal_binop_shift(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {}, {} {} -> {}", inst, src1, src2, shift, amount, dest);
......@@ -1513,14 +1511,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_create_hash_map(vec![(id2, loc2), (id3, loc3)]),
ignore_zero_register(id1, vec![loc1]),
create_hash_map(vec![(id2, loc2), (id3, loc3)]),
false
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2, src3)
fn aarch64_internal_ternop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, src3 : &P<Value>) {
fn internal_ternop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, src3 : &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {}, {} -> {}", inst, src3, src1, src2, dest);
......@@ -1533,13 +1531,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_create_hash_map(vec![(id2, loc2), (id3, loc3), (id4, loc4)]),
ignore_zero_register(id1, vec![loc1]),
create_hash_map(vec![(id2, loc2), (id3, loc3), (id4, loc4)]),
false
)
}
fn aarch64_internal_ternop_imm(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: u64, src3: u64) {
fn internal_ternop_imm(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: u64, src3: u64) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {}, {} -> {}", inst, src1, src2, src3, dest);
......@@ -1550,14 +1548,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_ignore_zero_register(id2, vec![loc2]),
ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id2, vec![loc2]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst(src1, src2)
fn aarch64_internal_cmpop(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>)
fn internal_cmpop(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>)
{
let inst = inst.to_string();
trace!("emit: \t{} {}, {}", inst, src1, src2);
......@@ -1570,13 +1568,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
aarch64_create_hash_map(vec![(id1, loc1), (id2, loc2)]),
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
fn aarch64_internal_cmpop_shift(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, shift: &str, amount: u8) {
fn internal_cmpop_shift(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {},{}, {} {}", inst, src1, src2, shift, amount);
......@@ -1588,13 +1586,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
aarch64_create_hash_map(vec![(id1, loc1), (id2, loc2)]),
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // Same as inetnral_binop except extends the second source register
fn aarch64_internal_cmpop_ext(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, signed : bool, shift: u8) {
fn internal_cmpop_ext(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, signed : bool, shift: u8) {
let inst = inst.to_string();
let ext_s = if signed { "S" } else { "U" };
let ext_p = match src2.ty.get_int_length() {
......@@ -1617,12 +1615,12 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
aarch64_create_hash_map(vec![(id1, loc1), (id2, loc2)]),
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst(src1, src2 [<< 12])
fn aarch64_internal_cmpop_imm(&mut self, inst: &str, src1: &P<Value>, src2: u64, shift: u8)
fn internal_cmpop_imm(&mut self, inst: &str, src1: &P<Value>, src2: u64, shift: u8)
{
let inst = inst.to_string();
trace!("emit: \t{} {}, {} LSL {}", inst, src1, src2, shift);
......@@ -1638,13 +1636,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{ },
aarch64_ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id1, vec![loc1]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst(src1, 0.0)
fn aarch64_internal_cmpop_f0(&mut self, inst: &str, src1: &P<Value>)
fn internal_cmpop_f0(&mut self, inst: &str, src1: &P<Value>)
{
let inst = inst.to_string();
trace!("emit: \t{} {}, 0.0", inst, src1);
......@@ -1656,13 +1654,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{ },
aarch64_ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id1, vec![loc1]),
false
)
}
#[warn(unused_variables)] // dest <= inst<cond>()
fn aarch64_internal_cond_op(&mut self, inst: &str, dest: &P<Value>, cond: &str) {
fn internal_cond_op(&mut self, inst: &str, dest: &P<Value>, cond: &str) {
let inst = inst.to_string();
let cond = cond.to_string();
trace!("emit: \t{} {} -> {}", inst, cond, dest);
......@@ -1673,14 +1671,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id1, vec![loc1]),
linked_hashmap!{},
false
)
}
#[warn(unused_variables)] // dest <= inst<cond>(src)
fn aarch64_internal_cond_unop(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, cond: &str) {
fn internal_cond_unop(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, cond: &str) {
let inst = inst.to_string();
let cond = cond.to_string();
trace!("emit: \t{} {} {} -> {}", inst, cond, src, dest);
......@@ -1692,14 +1690,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_ignore_zero_register(id2, vec![loc2]),
ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id2, vec![loc2]),
false
)
}
#[warn(unused_variables)] // dest <= inst<cond>(src1, src2)
fn aarch64_internal_cond_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, cond: &str) {
fn internal_cond_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, cond: &str) {
let inst = inst.to_string();
let cond = cond.to_string();
trace!("emit: \t{} {}, {}, {} -> {}", inst, cond, src1, src2, dest);
......@@ -1712,14 +1710,14 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id1, vec![loc1]),
aarch64_create_hash_map(vec![(id2, loc2), (id3, loc3)]),
ignore_zero_register(id1, vec![loc1]),
create_hash_map(vec![(id2, loc2), (id3, loc3)]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst<cond>(src1, src2, flags)
fn aarch64_internal_cond_cmpop(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, flags: u8, cond: &str)
fn internal_cond_cmpop(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, flags: u8, cond: &str)
{
let inst = inst.to_string();
let cond = cond.to_string();
......@@ -1733,13 +1731,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
aarch64_create_hash_map(vec![(id1, loc1), (id2, loc2)]),
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst<cond>(src1, src2, flags)
fn aarch64_internal_cond_cmpop_imm(&mut self, inst: &str, src1: &P<Value>, src2: u8, flags: u8, cond: &str)
fn internal_cond_cmpop_imm(&mut self, inst: &str, src1: &P<Value>, src2: u8, flags: u8, cond: &str)
{
let inst = inst.to_string();
let cond = cond.to_string();
......@@ -1752,12 +1750,12 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{ },
aarch64_ignore_zero_register(id1, vec![loc1]),
ignore_zero_register(id1, vec![loc1]),
false
)
}
fn aarch64_internal_load(&mut self, inst: &str, dest: &P<Value>, src: Mem, signed: bool, is_spill_related: bool)
fn internal_load(&mut self, inst: &str, dest: &P<Value>, src: Mem, signed: bool, is_spill_related: bool)
{
let op_len = primitive_byte_size(&dest.ty);
let inst = inst.to_string() + if signed {
......@@ -1782,14 +1780,14 @@ impl ASMCodeGen {
trace!("emit: \t{} {} -> {}", inst, src, dest);
let (reg, id, loc) = self.prepare_reg(dest, inst.len() + 1);
let (mem, uses) = self.aarch64_prepare_mem(src, inst.len() + 1 + reg.len() + 1);
let (mem, uses) = self.prepare_mem(src, inst.len() + 1 + reg.len() + 1);
let asm = format!("{} {},{}", inst, reg, mem);
if is_spill_related {
self.add_asm_inst_with_spill(
asm,
aarch64_ignore_zero_register(id, vec![loc]),
ignore_zero_register(id, vec![loc]),
uses,
true,
SpillMemInfo::Load(src.clone())
......@@ -1797,7 +1795,7 @@ impl ASMCodeGen {
} else {
self.add_asm_inst(
asm,
aarch64_ignore_zero_register(id, vec![loc]),
ignore_zero_register(id, vec![loc]),
uses,
true
)
......@@ -1806,26 +1804,26 @@ impl ASMCodeGen {
}
// TODO: What to do when src1/src2/stack are the same???
fn aarch64_internal_load_pair(&mut self, inst: &str, dest1: &P<Value>, dest2: &P<Value>, src: &P<Value>) {
fn internal_load_pair(&mut self, inst: &str, dest1: &P<Value>, dest2: &P<Value>, src: &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {} -> {},{}", inst, src, dest1, dest2);
let (reg1, id1, loc1) = self.prepare_reg(dest1, 3 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest2, 3 + 1 + reg1.len() + 1);
let (mem, uses) = self.aarch64_prepare_mem(src, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
let (mem, uses) = self.prepare_mem(src, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
let asm = format!("{} {},{},{}", inst, reg1, reg2, mem);
self.add_asm_inst(
asm,
aarch64_create_hash_map(vec![(id1, loc1), (id2, loc2)]),
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
uses,
true
)
}
fn aarch64_internal_store(&mut self, inst: &str, dest: Mem, src : &P<Value>, is_spill_related: bool)
fn internal_store(&mut self, inst: &str, dest: Mem, src : &P<Value>, is_spill_related: bool)
{
let op_len = primitive_byte_size(&src.ty);
let inst = inst.to_string() + match op_len {
......@@ -1839,7 +1837,7 @@ impl ASMCodeGen {
trace!("emit: \t{} {} -> {}", inst, src, dest);
let (reg, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
let (mem, mut uses) = self.aarch64_prepare_mem(dest, inst.len() + 1 + reg.len() + 1);
let (mem, mut uses) = self.prepare_mem(dest, inst.len() + 1 + reg.len() + 1);
// the register we used for the memory location is counted as 'use'
// use the vec from mem as 'use' (push use reg from src to it)
...