WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.6% of users enabled 2FA.

Commit a428732b authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Fixed bug where duplicate blocks were created

parent ce779815
......@@ -34,7 +34,7 @@ gcc = "*"
ast = {path = "src/ast"}
utils = {path = "src/utils"}
gc = {path = "src/gc"}
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = ">= 0.0.5" }
libc="*"
field-offset = "*"
libloading = "*"
......
......@@ -25,4 +25,4 @@ utils = {path = "../utils"}
lazy_static = "*"
log = "*"
simple_logger = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = ">= 0.0.5" }
......@@ -2127,22 +2127,6 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_frame_shrink(&mut self) {
trace!("emit: \tframe shrink");
let asm = format!("ADD SP,SP,#{}", FRAME_SIZE_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut().add_frame_size_patchpoint(ASMLocation::new(line, 11, FRAME_SIZE_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap!{},
false
)
}
fn emit_add_str(&mut self, dest: Reg, src1: Reg, src2: &str) {self.internal_binop_str("ADD", dest, src1, src2)}
// Pushes a pair of registers on the givne stack (uses the STP instruction)
......
......@@ -49,7 +49,6 @@ pub trait CodeGenerator {
// emit code to adjust frame
fn emit_frame_grow(&mut self); // Emits a SUB
fn emit_frame_shrink(&mut self); // Emits an ADD
// Used to pass a string that the assembler will interpret as an immediate argument
// (This is neccesary to support the use of ELF relocations like ':tprel_hi12:foo')
......
......@@ -189,7 +189,7 @@ impl <'a> InstructionSelection {
// it is possible that the fallthrough block is scheduled somewhere else
// we need to explicitly jump to it
self.finish_block();
let fallthrough_temp_block = make_block_name(node, "branch_fallthrough", );
let fallthrough_temp_block = make_block_name(&self.current_fv_name, node.id(), "branch_fallthrough", );
self.start_block(fallthrough_temp_block, &vec![]);
let fallthrough_target = f_content.get_block(fallthrough_dest.target).name();
......@@ -343,7 +343,8 @@ impl <'a> InstructionSelection {
self.backend.emit_b_cond("EQ", target);
self.finish_block();
self.start_block(make_block_name(node, format!("switch_not_met_case_{}", case_op_index).as_str()), &vec![]);
let block_name = make_block_name(&self.current_fv_name, node.id(), format!("switch_not_met_case_{}", case_op_index).as_str());
self.start_block(block_name, &vec![]);
}
// emit default
......@@ -628,9 +629,9 @@ impl <'a> InstructionSelection {
// overflows to_ty_size, but not to_ty_reg_size
let to_ty_reg_size = check_op_len(&tmp_res.ty); // The size of the aarch64 register
if to_ty_size != to_ty_reg_size {
let blk_positive = make_block_name(node, "positive");
let blk_negative = make_block_name(node, "negative");
let blk_end = make_block_name(node, "end");
let blk_positive = make_block_name(&self.current_fv_name, node.id(), "positive");
let blk_negative = make_block_name(&self.current_fv_name, node.id(), "negative");
let blk_end = make_block_name(&self.current_fv_name, node.id(), "end");
let tmp = make_temporary(f_context, to_ty.clone(), vm);
self.backend.emit_tbnz(&tmp_res, (to_ty_size - 1) as u8, blk_negative.clone());
......@@ -747,7 +748,7 @@ impl <'a> InstructionSelection {
self.finish_block();
let blk_load_start = make_block_name(node, "load_start");
let blk_load_start = make_block_name(&self.current_fv_name, node.id(), "load_start");
// load_start:
self.start_block(blk_load_start.clone(), &vec![temp_loc.clone()]);
......@@ -849,7 +850,7 @@ impl <'a> InstructionSelection {
self.finish_block();
let blk_store_start = make_block_name(node, "store_start");
let blk_store_start = make_block_name(&self.current_fv_name, node.id(), "store_start");
// store_start:
self.start_block(blk_store_start.clone(), &vec![temp_loc.clone()]);
......@@ -916,9 +917,9 @@ impl <'a> InstructionSelection {
let res_value = self.get_result_value(node, 0);
let res_success = self.get_result_value(node, 1);
let blk_cmpxchg_start = make_block_name(node, "cmpxchg_start");
let blk_cmpxchg_failed = make_block_name(node, "cmpxchg_failed");
let blk_cmpxchg_succeded = make_block_name(node, "cmpxchg_succeded");
let blk_cmpxchg_start = make_block_name(&self.current_fv_name, node.id(), "cmpxchg_start");
let blk_cmpxchg_failed = make_block_name(&self.current_fv_name, node.id(), "cmpxchg_failed");
let blk_cmpxchg_succeded = make_block_name(&self.current_fv_name, node.id(), "cmpxchg_succeded");
self.finish_block();
......@@ -1217,7 +1218,7 @@ impl <'a> InstructionSelection {
Some(node), f_context, vm
);
}
// Runtime Entry
Instruction_::Throw(op_index) => {
trace!("instsel on THROW");
......@@ -2706,8 +2707,8 @@ impl <'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(node, "alloc_large");
let blk_alloc_large_end = make_block_name(node, "alloc_large_end");
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end = make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -2719,7 +2720,8 @@ impl <'a> InstructionSelection {
self.backend.emit_b_cond("GT", blk_alloc_large.clone());
self.finish_block();
self.start_block(make_block_name(node, "allocsmall"), &vec![]);
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
self.start_block(block_name, &vec![]);
self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_context, vm);
self.backend.emit_b(blk_alloc_large_end.clone());
......@@ -3690,10 +3692,6 @@ impl <'a> InstructionSelection {
self.backend.emit_ldr_callee_saved(reg, &loc);
}
//self.backend.emit_frame_shrink();
// Pop the link register and frame pointers
// Pop the frame record
self.backend.emit_mov(&SP, &FP);
self.backend.emit_pop_pair(&FP, &LR, &SP);
......@@ -4361,7 +4359,7 @@ impl <'a> InstructionSelection {
fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String {
let ret = {
if cur_node.is_some() {
make_block_name(cur_node.unwrap(), format!("callsite_{}", self.current_callsite_id).as_str())
make_block_name(&self.current_fv_name, cur_node.unwrap().id(), format!("callsite_{}", self.current_callsite_id).as_str())
} else {
format!("{}:callsite_{}", self.current_fv_name, self.current_callsite_id)
}
......
......@@ -2194,22 +2194,6 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_frame_shrink(&mut self) {
trace!("emit frame shrink");
let asm = format!("addq ${},%rsp", FRAME_SIZE_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut().add_frame_size_patchpoint(ASMLocation::new(line, 6, FRAME_SIZE_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap!{},
false
)
}
fn emit_nop(&mut self, bytes: usize) {
trace!("emit: nop ({} bytes)", bytes);
......@@ -3528,7 +3512,7 @@ pub fn emit_context_with_reloc(vm: &VM,
use std::path;
use std::io::prelude::*;
emit_mu_types(vm);
emit_mu_types("", vm);
debug!("---Emit VM Context---");
create_emit_directory(vm);
......
......@@ -44,8 +44,7 @@ pub trait CodeGenerator {
// emit code to adjust frame
fn emit_frame_grow(&mut self);
fn emit_frame_shrink(&mut self);
fn emit_nop(&mut self, bytes: usize);
// comparison
......
......@@ -347,7 +347,7 @@ impl <'a> InstructionSelection {
// we need to explicitly jump to it
self.finish_block();
let fallthrough_temp_block = make_block_name(node, "branch_fallthrough", );
let fallthrough_temp_block = make_block_name(&self.current_fv_name, node.id(), "branch_fallthrough", );
self.start_block(fallthrough_temp_block);
let fallthrough_target = f_content.get_block(fallthrough_dest.target).name();
......@@ -417,9 +417,9 @@ impl <'a> InstructionSelection {
}
// jcc
_ => {
let blk_true = make_block_name(node, "select_true");
let blk_false = make_block_name(node, "select_false");
let blk_end = make_block_name(node, "select_end");
let blk_true = make_block_name(&self.current_fv_name, node.id(), "select_true");
let blk_false = make_block_name(&self.current_fv_name, node.id(), "select_false");
let blk_end = make_block_name(&self.current_fv_name, node.id(), "select_end");
// jump to blk_true if true
match cmpop {
......@@ -470,9 +470,9 @@ impl <'a> InstructionSelection {
} else if self.match_fpreg(true_val) {
let tmp_res = self.get_result_value(node);
let blk_true = make_block_name(node, "select_true");
let blk_false = make_block_name(node, "select_false");
let blk_end = make_block_name(node, "select_end");
let blk_true = make_block_name(&self.current_fv_name, node.id(), "select_true");
let blk_false = make_block_name(&self.current_fv_name, node.id(), "select_false");
let blk_end = make_block_name(&self.current_fv_name, node.id(), "select_end");
// jump to blk_true if true
match cmpop {
......@@ -610,7 +610,8 @@ impl <'a> InstructionSelection {
}
self.finish_block();
self.start_block(make_block_name(node, format!("switch_not_met_case_{}", case_op_index).as_str()));
let block_name = make_block_name(&self.current_fv_name, node.id(), format!("switch_not_met_case_{}", case_op_index).as_str());
self.start_block(block_name);
}
// emit default
......@@ -945,9 +946,9 @@ impl <'a> InstructionSelection {
// testq %tmp_op %tmp_op
self.backend.emit_test_r_r(&tmp_op, &tmp_op);
let blk_if_signed = make_block_name(node, "uitofp_float_if_signed",);
let blk_if_not_signed = make_block_name(node, "uitofp_float_if_not_signed");
let blk_done = make_block_name(node, "uitofp_float_done");
let blk_if_signed = make_block_name(&self.current_fv_name, node.id(), "uitofp_float_if_signed",);
let blk_if_not_signed = make_block_name(&self.current_fv_name, node.id(), "uitofp_float_if_not_signed");
let blk_done = make_block_name(&self.current_fv_name, node.id(), "uitofp_float_done");
// js %if_signed
self.backend.emit_js(blk_if_signed.clone());
......@@ -1424,7 +1425,28 @@ impl <'a> InstructionSelection {
Some(node), f_content, f_context, vm
);
}
/*Instruction_::AllocA(ref ty) => {
trace!("instsel on AllocA");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => panic!("cannot use ALLOCA for hybrid, use ALLOCAHYBRID instead"),
_ => {}
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align= ty_info.alignment;
if 16 % ty_align != 0 {
// It's not trivial to allign this type...
unimplemented!()
}
// Round size up to the nearest multiple of 16
let size = ((size + 16 - 1)/16)*16;
}*/
Instruction_::Throw(op_index) => {
trace!("instsel on THROW");
......@@ -2534,8 +2556,8 @@ impl <'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(node, "alloc_large");
let blk_alloc_large_end = make_block_name(node, "alloc_large_end");
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end = make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -2549,7 +2571,8 @@ impl <'a> InstructionSelection {
self.backend.emit_jg(blk_alloc_large.clone());
self.finish_block();
self.start_block(make_block_name(node, "allocsmall"));
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
self.start_block(block_name);
// alloc small here
self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_content, f_context, vm);
......@@ -2652,12 +2675,13 @@ impl <'a> InstructionSelection {
// branch to slow path if end > limit (end - limit > 0)
// ASM: jg alloc_slow
let slowpath = make_block_name(node, "allocslow");
let slowpath = make_block_name(&self.current_fv_name, node.id(), "allocslow");
self.backend.emit_jg(slowpath.clone());
// finish current block
self.finish_block();
self.start_block(make_block_name(node, "updatecursor"));
let block_name = make_block_name(&self.current_fv_name, node.id(), "updatecursor");
self.start_block(block_name);
// update cursor
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
......@@ -2674,7 +2698,7 @@ impl <'a> InstructionSelection {
}
// ASM jmp alloc_end
let allocend = make_block_name(node, "alloc_small_end");
let allocend = make_block_name(&self.current_fv_name, node.id(), "alloc_small_end");
self.backend.emit_jmp(allocend.clone());
// finishing current block
......@@ -3377,7 +3401,8 @@ impl <'a> InstructionSelection {
// insert an intermediate block to branch to normal
// the branch is inserted later (because we need to deal with postcall convention)
self.finish_block();
self.start_block(make_block_name(cur_node, "normal_cont_for_call"));
let block_name = make_block_name(&self.current_fv_name, cur_node.id(), "normal_cont_for_call");
self.start_block(block_name);
} else {
self.current_callsites.push_back((callsite.to_relocatable(), 0, stack_arg_size));
}
......@@ -3643,7 +3668,8 @@ impl <'a> InstructionSelection {
}
}
// frame shrink
self.backend.emit_frame_shrink();
// RBP -> RSP
self.backend.emit_mov_r_r(&x86_64::RSP, &x86_64::RBP);
// pop rbp
self.backend.emit_pop_r64(&x86_64::RBP);
......@@ -4736,7 +4762,7 @@ impl <'a> InstructionSelection {
fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String {
let ret = {
if cur_node.is_some() {
make_block_name(cur_node.unwrap(), format!("callsite_{}", self.current_callsite_id).as_str())
make_block_name(&self.current_fv_name, cur_node.unwrap().id(), format!("callsite_{}", self.current_callsite_id).as_str())
} else {
format!("{}:callsite_{}", self.current_fv_name, self.current_callsite_id)
}
......
......@@ -36,37 +36,14 @@ pub fn create_emit_directory(vm: &VM) {
}
}
fn create_emit_file(name: String, vm: &VM) -> File {
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push(name);
match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create emit file {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
}
}
pub struct CodeEmission {
name: &'static str
}
impl CodeEmission {
pub fn new() -> CodeEmission {
CodeEmission {
name: "Code Emission"
}
}
}
#[allow(dead_code)]
pub fn emit_mu_types(vm: &VM) {
pub fn emit_mu_types(suffix: &str, vm: &VM) {
if EMIT_MUIR {
create_emit_directory(vm);
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push("___types.muty");
file_path.push("___types".to_string() + suffix + ".muty");
let mut file = match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create mu types file {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
......@@ -101,217 +78,27 @@ pub fn emit_mu_types(vm: &VM) {
}
}
#[allow(dead_code)]
fn emit_muir(func: &MuFunctionVersion, vm: &VM) {
let func_name = func.name();
// create emit directory
create_emit_directory(vm);
// final IR
{
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push(func_name.clone() + ".muir");
let mut file = match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create muir file {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
};
write!(file, "{:?}", func).unwrap();
}
// original IR (source/input)
{
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push(func_name.clone() + "_orig.muir");
let mut file = match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create muir file {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
};
fn create_emit_file(name: String, vm: &VM) -> File {
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push(name);
writeln!(file, "FuncVer {} of Func #{}", func.hdr, func.func_id).unwrap();
writeln!(file, "Signature: {}", func.sig).unwrap();
writeln!(file, "IR:").unwrap();
if func.get_orig_ir().is_some() {
writeln!(file, "{:?}", func.get_orig_ir().as_ref().unwrap()).unwrap();
} else {
writeln!(file, "Empty").unwrap();
}
match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create emit file {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
}
}
fn emit_muir_dot(func: &MuFunctionVersion, vm: &VM) {
let func_name = func.name();
// create emit directory
create_emit_directory(vm);
// original
{
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push(func_name.clone() + ".orig.dot");
let mut file = match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create muir dot {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
};
emit_muir_dot_inner(&mut file, func_name.clone(), func.get_orig_ir().unwrap());
}
// final
{
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push(func_name.clone() + ".dot");
let mut file = match File::create(file_path.as_path()) {
Err(why) => panic!("couldnt create muir dot {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
};
emit_muir_dot_inner(&mut file, func_name.clone(), func.content.as_ref().unwrap());
}
pub struct CodeEmission {
name: &'static str
}
fn emit_muir_dot_inner(file: &mut File,
f_name: String,
f_content: &FunctionContent) {
use utils::vec_utils;
// digraph func {
writeln!(file, "digraph {} {{", mangle_name(f_name)).unwrap();
// node shape: rect
writeln!(file, "node [shape=rect];").unwrap();
// every graph node (basic block)
for (id, block) in f_content.blocks.iter() {
let block_name = block.name();
// BBid [label = "name
write!(file, "BB{} [label = \"[{}]{} ", *id, *id, &block_name).unwrap();
let block_content = block.content.as_ref().unwrap();
// (args)
write!(file, "{}", vec_utils::as_str(&block_content.args)).unwrap();
if block_content.exn_arg.is_some() {
// [exc_arg]
write!(file, "[{}]", block_content.exn_arg.as_ref().unwrap()).unwrap();
}
write!(file, ":\\l\\l").unwrap();
// all the instructions
for inst in block_content.body.iter() {
write!(file, "{}\\l", inst).unwrap();
}
// "];
writeln!(file, "\"];").unwrap();
}
// every edge
for (id, block) in f_content.blocks.iter() {
use ast::inst::Instruction_::*;
let cur_block = *id;
// get last instruction
let last_inst = block.content.as_ref().unwrap().body.last().unwrap();
match last_inst.v {
TreeNode_::Instruction(ref inst) => {
let ref ops = inst.ops;
match inst.v {
Branch1(ref dest) => {
writeln!(file, "BB{} -> BB{} [label = \"{}\"];",
cur_block, dest.target, vec_utils::as_str(&dest.get_arguments(&ops))
).unwrap();
}
Branch2{ref true_dest, ref false_dest, ..} => {
writeln!(file, "BB{} -> BB{} [label = \"true: {}\"]",
cur_block, true_dest.target, vec_utils::as_str(&true_dest.get_arguments(&ops))
).unwrap();
writeln!(file, "BB{} -> BB{} [label = \"false: {}\"]",
cur_block, false_dest.target, vec_utils::as_str(&false_dest.get_arguments(&ops))
).unwrap();
}
Switch{ref default, ref branches, ..} => {
for &(op, ref dest) in branches.iter() {
writeln!(file, "BB{} -> BB{} [label = \"case {}: {}\"]",
cur_block, dest.target, ops[op], vec_utils::as_str(&dest.get_arguments(&ops))
).unwrap();
}
writeln!(file, "BB{} -> BB{} [label = \"default: {}\"]",
cur_block, default.target, vec_utils::as_str(&default.get_arguments(&ops))
).unwrap();
}
Call{ref resume, ..}
| CCall{ref resume, ..}
| SwapStack{ref resume, ..}
| ExnInstruction{ref resume, ..} => {
let ref normal = resume.normal_dest;
let ref exn = resume.exn_dest;
writeln!(file, "BB{} -> BB{} [label = \"normal: {}\"];",
cur_block, normal.target, vec_utils::as_str(&normal.get_arguments(&ops))
).unwrap();
writeln!(file, "BB{} -> BB{} [label = \"exception: {}\"];",
cur_block, exn.target, vec_utils::as_str(&exn.get_arguments(&ops))
).unwrap();
}
Watchpoint{ref id, ref disable_dest, ref resume, ..} if id.is_some() => {
let ref normal = resume.normal_dest;
let ref exn = resume.exn_dest;
if id.is_some() {
let disable_dest = disable_dest.as_ref().unwrap();
writeln!(file, "BB{} -> {} [label = \"disabled: {}\"];",
cur_block, disable_dest.target, vec_utils::as_str(&disable_dest.get_arguments(&ops))
).unwrap();