To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

Commit f31749c9 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Fixed all errors that were found when running the python tests.

Also modified some of the rust tests so that they are applicable to aarch64.
parent c313f4d7
......@@ -667,7 +667,7 @@ impl fmt::Display for TreeNode {
match self.v {
TreeNode_::Value(ref pv) => pv.fmt(f),
TreeNode_::Instruction(ref inst) => {
write!(f, "{}", inst)
write!(f, "({})", inst)
}
}
}
......@@ -763,7 +763,8 @@ impl Value {
}
}
const DISPLAY_TYPE : bool = true;
const DISPLAY_ID : bool = true;
const DISPLAY_TYPE : bool = false;
const PRINT_ABBREVIATE_NAME: bool = true;
impl fmt::Debug for Value {
......@@ -786,7 +787,7 @@ impl fmt::Display for Value {
write!(f, "{}(@{})", ty, self.hdr)
},
Value_::Memory(ref mem) => {
write!(f, "{}(%{})", mem, self.hdr)
write!(f, "%{}{})", self.hdr, mem)
}
}
} else {
......@@ -801,7 +802,7 @@ impl fmt::Display for Value {
write!(f, "@{}", self.hdr)
},
Value_::Memory(ref mem) => {
write!(f, "{}(%{})", mem, self.hdr)
write!(f, "%{}{}", self.hdr, mem)
}
}
}
......@@ -1170,12 +1171,11 @@ impl PartialEq for MuEntityHeader {
}
}
const DISPLAY_ID : bool = false;
impl fmt::Display for MuEntityHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if DISPLAY_ID {
if self.name().is_none() {
write!(f, "UNAMED #{}", self.id)
write!(f, "{}", self.id)
} else {
if PRINT_ABBREVIATE_NAME {
write!(f, "{} #{}", self.abbreviate_name().unwrap(), self.id)
......
......@@ -527,13 +527,22 @@ impl MachineCode for ASMCode {
fn is_jmp(&self, index: usize) -> Option<MuName> {
let inst = self.code.get(index);
match inst {
Some(inst) if inst.code.starts_with("B.") || inst.code.starts_with("B ")
|| inst.code.starts_with("CBNZ ") || inst.code.starts_with("CBZ ")
|| inst.code.starts_with("TBNZ ") || inst.code.starts_with("TBZ ")=> {
Some(inst) if inst.code.starts_with("B.") || inst.code.starts_with("B ") => {
// Destination is the first argument
let split : Vec<&str> = inst.code.split(' ').collect();
Some(ASMCodeGen::unmangle_block_label(self.name.clone(), String::from(split[1])))
}
Some(inst) if inst.code.starts_with("CBNZ ") || inst.code.starts_with("CBZ ") => {
// Destination is the second argument
let split : Vec<&str> = inst.code.split(',').collect();
Some(ASMCodeGen::unmangle_block_label(self.name.clone(), String::from(split[1])))
}
Some(inst) if inst.code.starts_with("TBNZ ") || inst.code.starts_with("TBZ ") => {
// Destination is the third argument
let split : Vec<&str> = inst.code.split(',').collect();
Some(ASMCodeGen::unmangle_block_label(self.name.clone(), String::from(split[2])))
}
_ => None
}
}
......@@ -649,8 +658,8 @@ impl MachineCode for ASMCode {
fn set_inst_nop(&mut self, index: usize) {
self.code[index].code.clear();
// self.code.remove(index);
// self.code.insert(index, ASMInst::nop());
// self.code.remove(index);
// self.code.insert(index, ASMInst::nop());
}
fn remove_unnecessary_callee_saved(&mut self, used_callee_saved: Vec<MuID>) -> HashSet<MuID> {
......@@ -755,8 +764,8 @@ impl MachineCode for ASMCode {
fn trace_inst(&self, i: usize) {
trace!("#{}\t{:30}\t\tdefine: {:?}\tuses: {:?}\tpred: {:?}\tsucc: {:?}",
i, self.code[i].code, self.get_inst_reg_defines(i), self.get_inst_reg_uses(i),
self.code[i].preds, self.code[i].succs);
i, self.code[i].code, self.get_inst_reg_defines(i), self.get_inst_reg_uses(i),
self.code[i].preds, self.code[i].succs);
}
fn get_ir_block_livein(&self, block: &str) -> Option<&Vec<MuID>> {
......@@ -944,7 +953,6 @@ impl ASMBlock {
}
}
// EXPORTED
pub struct ASMCodeGen {
cur: Option<Box<ASMCode>>
}
......@@ -997,7 +1005,7 @@ impl ASMCodeGen {
self.cur_mut().code.push(ASMInst::symbolic(code));
}
fn add_asm_symbolic(&mut self, code: String){
fn add_asm_symbolic(&mut self, code: String) {
trace!("emit: {}", code);
self.cur_mut().code.push(ASMInst::symbolic(code));
}
......@@ -1009,20 +1017,20 @@ impl ASMCodeGen {
fn add_asm_call(&mut self, code: String, potentially_excepting: Option<MuName>, target: Option<(MuID, ASMLocation)>) {
// a call instruction will use all the argument registers
// do not need
let mut uses : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
if target.is_some() {
let (id, loc) = target.unwrap();
uses.insert(id, vec![loc]);
}
// for reg in ARGUMENT_GPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// for reg in ARGUMENT_FPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// for reg in ARGUMENT_GPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// for reg in ARGUMENT_FPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// defines: return registers
let mut defines : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
let mut defines: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
for reg in RETURN_GPRs.iter() {
defines.insert(reg.id(), vec![]);
}
......@@ -1054,15 +1062,15 @@ impl ASMCodeGen {
// otherwise it will keep RETURN REGS alive
// and if there is no actual move into RETURN REGS, it will keep RETURN REGS for alive for very long
// and prevents anything using those regsiters
self.add_asm_inst_internal(code, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Return, None);
self.add_asm_inst_internal(code, linked_hashmap! {}, linked_hashmap! {}, false, ASMBranchTarget::Return, None);
}
fn add_asm_branch(&mut self, code: String, target: MuName) {
self.add_asm_inst_internal(code, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Unconditional(target), None);
self.add_asm_inst_internal(code, linked_hashmap! {}, linked_hashmap! {}, false, ASMBranchTarget::Unconditional(target), None);
}
fn add_asm_branch2(&mut self, code: String, target: MuName) {
self.add_asm_inst_internal(code, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Conditional(target), None);
self.add_asm_inst_internal(code, linked_hashmap! {}, linked_hashmap! {}, false, ASMBranchTarget::Conditional(target), None);
}
fn add_asm_inst(
......@@ -1160,14 +1168,14 @@ impl ASMCodeGen {
}
}
let mut ids : Vec<MuID> = vec![];
let mut locs : Vec<ASMLocation> = vec![];
let mut result_str : String = "".to_string();
let mut ids: Vec<MuID> = vec![];
let mut locs: Vec<ASMLocation> = vec![];
let mut result_str: String = "".to_string();
let mut loc_cursor : usize = loc;
let mut loc_cursor: usize = loc;
match op.v {
// offset(base,index,scale)
Value_::Memory(MemoryLocation::Address{ref base, ref offset, shift, signed}) => {
Value_::Memory(MemoryLocation::Address { ref base, ref offset, shift, signed }) => {
result_str.push('[');
loc_cursor += 1;
// deal with base, base is ssa
......@@ -1195,15 +1203,12 @@ impl ASMCodeGen {
result_str.push_str(",");
let n = offset.ty.get_int_length().unwrap();
let shift_type =
if n == 64 { if signed {"SXTX"} else { "LSL" } }
else if n == 32 { if signed {"SXTW"} else { "UXTW" } }
else { panic!("Unexpected size for offset register") };
if n == 64 { if signed { "SXTX" } else { "LSL" } } else if n == 32 { if signed { "SXTW" } else { "UXTW" } } else { panic!("Unexpected size for offset register") };
result_str.push_str(&shift_type);
result_str.push_str(" #");
let shift_str = shift.to_string();
result_str.push_str(&shift_str);
},
Value_::Constant(Constant::Int(val)) => {
let str = (val as i32).to_string();
......@@ -1220,23 +1225,26 @@ impl ASMCodeGen {
}
// scale (for LSL type)
if shift != 0 {
}
if shift != 0 {}
result_str.push(']');
},
Value_::Memory(MemoryLocation::Symbolic{ref label, is_global}) => {
Value_::Memory(MemoryLocation::Symbolic { ref label, is_global }) => {
let label =
if is_global { pic_symbol(label.clone()) }
else { symbol(label.clone()) };
if is_global { format!(":got:{}", label.clone()) }
else { label.clone() };
result_str.push_str(label.as_str());
},
Value_::Memory(MemoryLocation::VirtualAddress {..}) => {
panic!("Can't directly use a virtual adress (try calling emit_mem first)");
}
_ => panic!("expect mem location as value")
}
let uses : LinkedHashMap<MuID, Vec<ASMLocation>> = {
let mut map : LinkedHashMap<MuID, Vec<ASMLocation>> = linked_hashmap!{};
let uses: LinkedHashMap<MuID, Vec<ASMLocation>> = {
let mut map: LinkedHashMap<MuID, Vec<ASMLocation>> = linked_hashmap! {};
for i in 0..ids.len() {
let id = ids[i];
let loc = locs[i].clone();
......@@ -1272,7 +1280,13 @@ impl ASMCodeGen {
fn unmangle_block_label(fn_name: MuName, label: String) -> MuName {
// input: _fn_name_BLOCK_NAME
// return BLOCK_NAME
let split : Vec<&str> = label.splitn(2, &(fn_name + "_")).collect();
let split: Vec<&str> = label.splitn(2, &(fn_name.clone() + "_")).collect();
// TODO: Why was this if statetment unnecesary on x86 (perhaps block names are wrong)
if split.len() == 0 {
trace!("unmangle_block_label: fn_name '{}', label '{}', split ", fn_name, label);
} else if split.len() == 1 {
trace!("unmangle_block_label: fn_name '{}', label '{}', split '{}'", fn_name, label, split[0]);
};
String::from(split[1])
}
......@@ -1280,9 +1294,6 @@ impl ASMCodeGen {
self.cur.take().unwrap()
}
fn emit_ldr_spill(&mut self, dest: Reg, src: Mem) { self.internal_load("LDR", dest, src, false, true, false); }
fn emit_str_spill(&mut self, dest: Mem, src: Reg) { self.internal_store("STR", dest, src, true, false) }
fn internal_simple(&mut self, inst: &str) {
let inst = inst.to_string();
trace!("emit: \t{}", inst);
......@@ -1326,7 +1337,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // A system instruction
// A system instruction
fn internal_system(&mut self, inst: &str, option: &str, src: &P<Value>) {
let inst = inst.to_string();
let option = option.to_string();
......@@ -1338,7 +1349,7 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap! {},
ignore_zero_register(id1, vec![loc1]),
false
)
......@@ -1349,8 +1360,8 @@ impl ASMCodeGen {
let (reg1, id1, loc1) = self.prepare_reg(src, inst.len() + 1);
// symbolic label, we dont need to patch it
let asm = format!("{} {},{}", inst, reg1, symbol(self.mangle_block_label(dest_name.clone())));
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{ id1 => vec![loc1]}, false, ASMBranchTarget::Conditional(dest_name), None);
let asm = format!("{} {},{}", inst, reg1, self.mangle_block_label(dest_name.clone()));
self.add_asm_inst_internal(asm, linked_hashmap! {}, linked_hashmap! { id1 => vec![loc1]}, false, ASMBranchTarget::Conditional(dest_name), None);
}
fn internal_branch_op_imm(&mut self, inst: &str, src1: &P<Value>, src2: u8, dest_name: MuName) {
......@@ -1358,20 +1369,20 @@ impl ASMCodeGen {
let (reg1, id1, loc1) = self.prepare_reg(src1, inst.len() + 1);
// symbolic label, we dont need to patch it
let asm = format!("{} {},#{},{}", inst, reg1, src2, symbol(self.mangle_block_label(dest_name.clone())));
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{ id1 => vec![loc1]}, false, ASMBranchTarget::Conditional(dest_name), None);
let asm = format!("{} {},#{},{}", inst, reg1, src2, self.mangle_block_label(dest_name.clone()));
self.add_asm_inst_internal(asm, linked_hashmap! {}, linked_hashmap! { id1 => vec![loc1]}, false, ASMBranchTarget::Conditional(dest_name), None);
}
#[warn(unused_variables)] // Same as inetnral_binop except extends the second source register
fn internal_binop_ext(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, signed : bool, shift: u8) {
// Same as inetnral_binop except extends the second source register
fn internal_binop_ext(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, signed: bool, shift: u8) {
let inst = inst.to_string();
let ext_s = if signed { "S" } else { "U" };
let ext_p = match src2.ty.get_int_length() {
Some(8) => "B",
Some(8) => "B",
Some(16) => "H",
Some(32) => "W",
Some(64) => "X",
_ => panic!("op size: {} dose not support extension", src2.ty.get_int_length().unwrap())
_ => panic!("op size: {} dose not support extension", src2.ty.get_int_length().unwrap())
};
let ext = ext_s.to_string() + "XT" + ext_p;
......@@ -1383,8 +1394,7 @@ impl ASMCodeGen {
let (reg3, id3, loc3) = self.prepare_reg(src2, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
let asm =
if shift == 0 { format!("{} {},{},{},{}", inst, reg1, reg2, reg3, ext) }
else { format!("{} {},{},{},{} #{}", inst, reg1, reg2, reg3, ext, shift) };
if shift == 0 { format!("{} {},{},{},{}", inst, reg1, reg2, reg3, ext) } else { format!("{} {},{},{},{} #{}", inst, reg1, reg2, reg3, ext, shift) };
self.add_asm_inst(
......@@ -1416,7 +1426,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
// dest <= inst(src1, src2)
fn internal_unop_shift(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {} {} -> {}", inst, src, shift, amount, dest);
......@@ -1434,7 +1444,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // dest <= inst(src)
// dest <= inst(src)
fn internal_unop(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {} -> {}", inst, src, dest);
......@@ -1468,13 +1478,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
ignore_zero_register(id1, vec![loc1]),
linked_hashmap!{},
linked_hashmap! {},
false
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
// dest <= inst(src1, src2)
fn internal_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {} -> {}", inst, src1, src2, dest);
......@@ -1493,7 +1503,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
// dest <= inst(src1, src2)
fn internal_binop_shift(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {}, {} {} -> {}", inst, src1, src2, shift, amount, dest);
......@@ -1512,8 +1522,8 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2, src3)
fn internal_ternop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, src3 : &P<Value>) {
// dest <= inst(src1, src2, src3)
fn internal_ternop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, src3: &P<Value>) {
let inst = inst.to_string();
trace!("emit: \t{} {}, {}, {} -> {}", inst, src3, src1, src2, dest);
......@@ -1549,7 +1559,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst(src1, src2)
// PSTATE.<NZCV> = inst(src1, src2)
fn internal_cmpop(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>)
{
let inst = inst.to_string();
......@@ -1562,13 +1572,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap! {},
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // dest <= inst(src1, src2)
// dest <= inst(src1, src2)
fn internal_cmpop_shift(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, shift: &str, amount: u8) {
let inst = inst.to_string();
trace!("emit: \t{} {},{}, {} {}", inst, src1, src2, shift, amount);
......@@ -1580,22 +1590,22 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap! {},
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // Same as inetnral_binop except extends the second source register
fn internal_cmpop_ext(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, signed : bool, shift: u8) {
// Same as inetnral_binop except extends the second source register
fn internal_cmpop_ext(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, signed: bool, shift: u8) {
let inst = inst.to_string();
let ext_s = if signed { "S" } else { "U" };
let ext_p = match src2.ty.get_int_length() {
Some(8) => "B",
Some(8) => "B",
Some(16) => "H",
Some(32) => "W",
Some(64) => "X",
_ => panic!("op size: {} dose not support extension", src2.ty.get_int_length().unwrap())
_ => panic!("op size: {} dose not support extension", src2.ty.get_int_length().unwrap())
};
let ext = ext_s.to_string() + "XT" + ext_p;
......@@ -1609,12 +1619,12 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap! {},
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst(src1, src2 [<< 12])
// PSTATE.<NZCV> = inst(src1, src2 [<< 12])
fn internal_cmpop_imm(&mut self, inst: &str, src1: &P<Value>, src2: u64, shift: u8)
{
let inst = inst.to_string();
......@@ -1630,13 +1640,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{ },
linked_hashmap! { },
ignore_zero_register(id1, vec![loc1]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst(src1, 0.0)
// PSTATE.<NZCV> = inst(src1, 0.0)
fn internal_cmpop_f0(&mut self, inst: &str, src1: &P<Value>)
{
let inst = inst.to_string();
......@@ -1648,13 +1658,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{ },
linked_hashmap! { },
ignore_zero_register(id1, vec![loc1]),
false
)
}
#[warn(unused_variables)] // dest <= inst<cond>()
// dest <= inst<cond>()
fn internal_cond_op(&mut self, inst: &str, dest: &P<Value>, cond: &str) {
let inst = inst.to_string();
let cond = cond.to_string();
......@@ -1667,12 +1677,12 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
ignore_zero_register(id1, vec![loc1]),
linked_hashmap!{},
linked_hashmap! {},
false
)
}
#[warn(unused_variables)] // dest <= inst<cond>(src)
// dest <= inst<cond>(src)
fn internal_cond_unop(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>, cond: &str) {
let inst = inst.to_string();
let cond = cond.to_string();
......@@ -1691,7 +1701,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // dest <= inst<cond>(src1, src2)
// dest <= inst<cond>(src1, src2)
fn internal_cond_binop(&mut self, inst: &str, dest: &P<Value>, src1: &P<Value>, src2: &P<Value>, cond: &str) {
let inst = inst.to_string();
let cond = cond.to_string();
......@@ -1711,7 +1721,7 @@ impl ASMCodeGen {
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst<cond>(src1, src2, flags)
// PSTATE.<NZCV> = inst<cond>(src1, src2, flags)
fn internal_cond_cmpop(&mut self, inst: &str, src1: &P<Value>, src2: &P<Value>, flags: u8, cond: &str)
{
let inst = inst.to_string();
......@@ -1725,13 +1735,13 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap! {},
create_hash_map(vec![(id1, loc1), (id2, loc2)]),
false
)
}
#[warn(unused_variables)] // PSTATE.<NZCV> = inst<cond>(src1, src2, flags)
// PSTATE.<NZCV> = inst<cond>(src1, src2, flags)
fn internal_cond_cmpop_imm(&mut self, inst: &str, src1: &P<Value>, src2: u8, flags: u8, cond: &str)
{
let inst = inst.to_string();
......@@ -1744,7 +1754,7 @@ impl ASMCodeGen {
self.add_asm_inst(
asm,
linked_hashmap!{ },
linked_hashmap! { },
ignore_zero_register(id1, vec![loc1]),
false
)
......@@ -1759,7 +1769,7 @@ impl ASMCodeGen {
2 => "SH",
4 => "SW",
8 => "",
_ => panic!("unexpected op size: {}", op_len)
_ => panic!("unexpected op size: {}", op_len)
}
} else {
match op_len {
......@@ -1802,7 +1812,6 @@ impl ASMCodeGen {
true
)
}
}
// TODO: What to do when src1/src2/stack are the same???
......@@ -1822,10 +1831,9 @@ impl ASMCodeGen {
uses,
true
)
}
fn internal_store(&mut self, inst: &str, dest: Mem, src : &P<Value>, is_spill_related: bool, is_callee_saved: bool)
fn internal_store(&mut self, inst: &str, dest: Mem, src: &P<Value>, is_spill_related: bool, is_callee_saved: bool)
{
let op_len = primitive_byte_size(&src.ty);
let inst = inst.to_string() + match op_len {
......@@ -1843,11 +1851,12 @@ impl ASMCodeGen {
// the register we used for the memory location is counted as 'use'
// use the vec from mem as 'use' (push use reg from src to it)
if uses.contains_key(&id1) {
if is_zero_register_id(id1) {
// zero register, ignore
} else if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1);
} else if id1 == XZR.extract_ssa_id().unwrap() || id1 == WZR.extract_ssa_id().unwrap() {
// zero register, ignore
} else {
uses.insert(id1, vec![loc1]);
}
......@@ -1857,14 +1866,14 @@ impl ASMCodeGen {
if is_callee_saved {
self.add_asm_inst_with_callee_saved(
asm,
linked_hashmap!{},
linked_hashmap! {},
uses,
true,
)
} else if is_spill_related {
self.add_asm_inst_with_spill(
asm,
linked_hashmap!{},
linked_hashmap! {},
uses,
true,
SpillMemInfo::Store(dest.clone())
......@@ -1879,7 +1888,7 @@ impl ASMCodeGen {
}
}
fn internal_store_exclusive(&mut self, inst: &str, dest: Mem, status : &P<Value>, src : &P<Value>)
fn internal_store_exclusive(&