WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 07c70394 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Remove live-in and live-out for aarch64

parent c0089a9f
......@@ -2035,59 +2035,6 @@ impl CodeGenerator for ASMCodeGen {
self.cur().blocks.contains_key(&block_name)
}
fn set_block_livein(&mut self, block_name: MuName, live_in: &Vec<P<Value>>) {
let cur = self.cur_mut();
match cur.blocks.get_mut(&block_name) {
Some(ref mut block) => {
if block.livein.is_empty() {
let mut live_in = {
let mut ret = vec![];
for p in live_in {
match p.extract_ssa_id() {
Some(id) => ret.push(id),
// this should not happen
None => error!("{} as live-in of block {} is not SSA", p, block_name)
}
}
ret
};
block.livein.append(&mut live_in);
} else {
panic!("seems we are inserting livein to block {} twice", block_name);
}
}
None => panic!("haven't created ASMBlock for {}", block_name)
}
}
fn set_block_liveout(&mut self, block_name: MuName, live_out: &Vec<P<Value>>) {
let cur = self.cur_mut();
match cur.blocks.get_mut(&block_name) {
Some(ref mut block) => {
if block.liveout.is_empty() {
let mut live_out = {
let mut ret = vec![];
for p in live_out {
match p.extract_ssa_id() {
Some(id) => ret.push(id),
// the liveout are actually args out of this block
// (they can be constants)
None => trace!("{} as live-out of block {} is not SSA", p, block_name)
}
}
ret
};
block.liveout.append(&mut live_out);
} else {
panic!("seems we are inserting liveout to block {} twice", block_name);
}
}
None => panic!("haven't created ASMBlock for {}", block_name)
}
}
fn add_cfi_sections(&mut self, arg: &str) { self.add_asm_symbolic(format!(".cfi_sections {}", arg)); }
fn add_cfi_startproc(&mut self) {
self.add_asm_symbolic(".cfi_startproc".to_string());
......
......@@ -32,8 +32,6 @@ pub trait CodeGenerator {
fn start_block(&mut self, block_name: MuName);
fn block_exists(&self, block_name: MuName) -> bool;
fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation;
fn set_block_livein(&mut self, block_name: MuName, live_in: &Vec<P<Value>>);
fn set_block_liveout(&mut self, block_name: MuName, live_out: &Vec<P<Value>>);
fn end_block(&mut self, block_name: MuName);
// add CFI info
......
......@@ -190,7 +190,7 @@ impl <'a> InstructionSelection {
// we need to explicitly jump to it
self.finish_block();
let fallthrough_temp_block = make_block_name(&self.current_fv_name, node.id(), "branch_fallthrough", );
self.start_block(fallthrough_temp_block, &vec![]);
self.start_block(fallthrough_temp_block);
let fallthrough_target = f_content.get_block(fallthrough_dest.target).name();
self.backend.emit_b(fallthrough_target);
......@@ -344,7 +344,7 @@ impl <'a> InstructionSelection {
self.finish_block();
let block_name = make_block_name(&self.current_fv_name, node.id(), format!("switch_not_met_case_{}", case_op_index).as_str());
self.start_block(block_name, &vec![]);
self.start_block(block_name);
}
// emit default
......@@ -637,7 +637,7 @@ impl <'a> InstructionSelection {
self.backend.emit_tbnz(&tmp_res, (to_ty_size - 1) as u8, blk_negative.clone());
self.finish_block();
self.start_block(blk_positive.clone(), &vec![]);
self.start_block(blk_positive.clone());
{
// check to see if the higher bits are the same as the
// sign bit (which is 0), if their not there's an overflow
......@@ -651,7 +651,7 @@ impl <'a> InstructionSelection {
self.backend.emit_b(blk_end.clone());
self.finish_block();
}
self.start_block(blk_negative.clone(), &vec![]);
self.start_block(blk_negative.clone());
{
self.backend.emit_mvn(&tmp, &tmp_res);
// check to see if the higher bits of temp are the same as the
......@@ -665,7 +665,7 @@ impl <'a> InstructionSelection {
self.backend.emit_csel(&tmp_res, &tmp, &tmp_res, "EQ");
self.finish_block();
}
self.start_block(blk_end.clone(), &vec![]);
self.start_block(blk_end.clone());
}
}
},
......@@ -751,7 +751,7 @@ impl <'a> InstructionSelection {
let blk_load_start = make_block_name(&self.current_fv_name, node.id(), "load_start");
// load_start:
self.start_block(blk_load_start.clone(), &vec![temp_loc.clone()]);
self.start_block(blk_load_start.clone());
// Load the value:
......@@ -853,7 +853,7 @@ impl <'a> InstructionSelection {
let blk_store_start = make_block_name(&self.current_fv_name, node.id(), "store_start");
// store_start:
self.start_block(blk_store_start.clone(), &vec![temp_loc.clone()]);
self.start_block(blk_store_start.clone());
let success = make_temporary(f_context, UINT1_TYPE.clone(), vm);
let discard_reg = cast_value(&success, &UINT64_TYPE);
......@@ -924,7 +924,7 @@ impl <'a> InstructionSelection {
self.finish_block();
// cmpxchg_start:
self.start_block(blk_cmpxchg_start.clone(), &vec![loc.clone(),expected.clone(), desired.clone()]);
self.start_block(blk_cmpxchg_start.clone());
if use_acquire {
match res_value.ty.v {
......@@ -1011,7 +1011,7 @@ impl <'a> InstructionSelection {
self.finish_block();
// cmpxchg_failed:
self.start_block(blk_cmpxchg_failed.clone(), &vec![res_success.clone(), res_value.clone()]);
self.start_block(blk_cmpxchg_failed.clone());
self.backend.emit_clrex();
// Set res_success to 1 (the same value STXR/STLXR uses to indicate failure)
......@@ -1020,7 +1020,7 @@ impl <'a> InstructionSelection {
self.finish_block();
// cmpxchg_succeded:
self.start_block(blk_cmpxchg_succeded.clone(), &vec![res_success.clone(), res_value.clone()]);
self.start_block(blk_cmpxchg_succeded.clone());
// this NOT is needed as STXR/STLXR returns sucess as '0', wheras the Mu spec says it should be 1
self.backend.emit_eor_imm(&res_success, &res_success, 1);
}
......@@ -1218,7 +1218,82 @@ impl <'a> InstructionSelection {
Some(node), f_context, vm
);
}
/*Instruction_::AllocA(ref ty) => {
trace!("instsel on ALLOCA");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => panic!("cannot use ALLOCA for hybrid, use ALLOCAHYBRID instead"),
_ => {}
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align = ty_info.alignment;
assert!(16 % ty_align == 0);
// The stack pointer has to be 16 bytes a;ogned
emit_sub_u64(self.backend.as_mut(), &SP, &SP, f_context, vm, round_up(size, 16) as usize);
let tmp_res = self.get_result_value(node, 0);
self.backend.emit_mov(&tmp_res, &SP);
}*/
/*Instruction_::NewHybrid(ref ty, var_len) => {
trace!("instsel on ALLOCAHYBRID");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => {},
_ => panic!("ALLOCAHYBRID is only for allocating hybrid types, use ALLOCA for others")
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let ty_align = ty_info.alignment;
let fix_part_size = ty_info.size;
let var_ty_size = match ty.v {
MuType_::Hybrid(ref name) => {
let map_lock = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty_ = map_lock.get(name).unwrap();
let var_ty = hybrid_ty_.get_var_ty();
vm.get_backend_type_info(var_ty.id()).size
},
_ => panic!("only expect HYBRID type here")
};
// actual size = fix_part_size + var_ty_size * len
let (actual_size, length) = {
let ref ops = inst.ops;
let ref var_len = ops[var_len];
if match_node_int_imm(var_len) {
let var_len = node_imm_to_u64(var_len);
let actual_size = fix_part_size + var_ty_size * (var_len as usize);
emit_sub_u64(self.backend.as_mut(), &SP, &SP, f_context, vm, round_up(actual_size, 16));
(
make_value_int_const(actual_size as u64, vm),
make_value_int_const(var_len as u64, vm)
)
} else {
let tmp_actual_size = make_temporary(f_context, UINT64_TYPE.clone(), vm);
let tmp_var_len = self.emit_ireg(var_len, f_content, f_context, vm);
// tmp_actual_size = tmp_var_len*var_ty_size
emit_mul_u64(self.backend.as_mut(), &tmp_actual_size, &tmp_var_len, f_context, vm, var_ty_size as u64);
// tmp_actual_size = tmp_var_len*var_ty_size + fix_part_size
emit_add_u64(self.backend.as_mut(), &tmp_actual_size, &tmp_actual_size, f_context, vm, fix_part_size as u64);
// SP -= (fix_part_size var_ty_size * VAR_LEN) + 15 & !15
(tmp_actual_size, tmp_var_len)
}
};
let tmp_res = self.get_result_value(node, 0);
self.backend.emit_mov(&tmp_res, &SP);
}*/
// Runtime Entry
Instruction_::Throw(op_index) => {
trace!("instsel on THROW");
......@@ -2721,19 +2796,19 @@ impl <'a> InstructionSelection {
self.finish_block();
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
self.start_block(block_name, &vec![]);
self.start_block(block_name);
self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_context, vm);
self.backend.emit_b(blk_alloc_large_end.clone());
self.finish_block();
// alloc_large:
self.start_block(blk_alloc_large.clone(), &vec![size.clone()]);
self.start_block(blk_alloc_large.clone());
self.emit_alloc_sequence_large(tmp_allocator.clone(), size, align, node, f_context, vm);
self.finish_block();
// alloc_large_end:
self.start_block(blk_alloc_large_end.clone(), &vec![]);
self.start_block(blk_alloc_large_end.clone());
self.get_result_value(node, 0)
}
......@@ -2860,50 +2935,6 @@ impl <'a> InstructionSelection {
}
}
// Returns a list of registers used for return values (used to set the 'livein' for the epilogue block)
fn compute_return_registers(&mut self, t: &P<MuType>, vm: &VM) -> Vec<P<Value>>
{
use ast::types::MuType_::*;
let size = round_up(vm.get_type_size(t.id()), 8);
match t.v {
Vector(_, _) => unimplemented!(),
Float | Double =>
vec![get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(&t, vm))],
Hybrid(_) => panic!("cant return a hybrid"),
Struct(_) | Array(_, _) => {
let hfa_n = hfa_length(t.clone());
if hfa_n > 0 {
let mut res = vec![get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(&t, vm)/hfa_n)];
for i in 1..hfa_n {
res.push(get_alias_for_length(RETURN_FPRs[i].id(), get_bit_size(&t, vm)/hfa_n));
}
res
} else if size <= 8 {
// Return in a single GRP
vec![get_alias_for_length(RETURN_GPRs[0].id(), get_bit_size(&t, vm))]
} else if size <= 16 {
// Return in 2 GPRs
vec![RETURN_GPRs[0].clone(), RETURN_GPRs[0].clone()]
} else {
// Returned on the stack
vec![]
}
}
Void => vec![], // Nothing to return
Int(128) => // Return in 2 GPRs
vec![RETURN_GPRs[0].clone(), RETURN_GPRs[0].clone()],
// Integral or pointer type
_ =>
// can return in a single GPR
vec![get_alias_for_length(RETURN_GPRs[0].id(), get_bit_size(&t, vm))]
}
}
fn compute_return_locations(&mut self, t: &P<MuType>, loc: &P<Value>, vm: &VM) -> P<Value>
{
use ast::types::MuType_::*;
......@@ -3563,7 +3594,7 @@ impl <'a> InstructionSelection {
fn emit_common_prologue(&mut self, args: &Vec<P<Value>>, sig: &P<CFuncSig>, f_context: &mut FunctionContext, vm: &VM) {
let prologue_block = format!("{}:{}", self.current_fv_name, PROLOGUE_BLOCK_NAME);
self.start_block(prologue_block, &vec![]);
self.start_block(prologue_block);
// Push the frame pointer and link register onto the stack
self.backend.emit_push_pair(&LR, &FP, &SP);
......@@ -3671,9 +3702,8 @@ impl <'a> InstructionSelection {
// Live in are the registers that hold the return values
// (if the value is returned through 'XR' than the caller is responsible for managing lifetime)
let livein = self.compute_return_registers(&ret_type, vm);
let epilogue_block = format!("{}:{}", self.current_fv_name, EPILOGUE_BLOCK_NAME);
self.start_block(epilogue_block, &livein);
self.start_block(epilogue_block);
// pop all callee-saved registers
for i in (0..CALLEE_SAVED_FPRs.len()).rev() {
......@@ -4373,11 +4403,9 @@ impl <'a> InstructionSelection {
self.backend.end_block(cur_block.clone());
}
// TODO: Do we need live_in
fn start_block(&mut self, block: String, live_in: &Vec<P<Value>>) {
fn start_block(&mut self, block: String) {
self.current_block = Some(block.clone());
self.backend.start_block(block.clone());
self.backend.set_block_livein(block, &live_in);
}
}
......@@ -4449,20 +4477,11 @@ impl CompilerPass for InstructionSelection {
if block.is_receiving_exception_arg() {
// this block uses exception arguments
// we need to add it to livein, and also emit landingpad for it
let exception_arg = block_content.exn_arg.as_ref().unwrap();
// live in is args of the block + exception arg
let mut livein = block_content.args.to_vec();
livein.push(exception_arg.clone());
self.backend.set_block_livein(block_label.clone(), &livein);
// need to insert a landing pad
self.emit_landingpad(&exception_arg, &mut func.context, vm);
} else {
// live in is args of the block
self.backend.set_block_livein(block_label.clone(), &block_content.args);
}
// doing the actual instruction selection
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment