Commit a428732b authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Fixed bug where duplicate blocks were created

parent ce779815
......@@ -34,7 +34,7 @@ gcc = "*"
ast = {path = "src/ast"}
utils = {path = "src/utils"}
gc = {path = "src/gc"}
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = ">= 0.0.5" }
libc="*"
field-offset = "*"
libloading = "*"
......
......@@ -25,4 +25,4 @@ utils = {path = "../utils"}
lazy_static = "*"
log = "*"
simple_logger = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = ">= 0.0.5" }
......@@ -2127,22 +2127,6 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_frame_shrink(&mut self) {
trace!("emit: \tframe shrink");
let asm = format!("ADD SP,SP,#{}", FRAME_SIZE_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut().add_frame_size_patchpoint(ASMLocation::new(line, 11, FRAME_SIZE_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap!{},
false
)
}
fn emit_add_str(&mut self, dest: Reg, src1: Reg, src2: &str) {self.internal_binop_str("ADD", dest, src1, src2)}
// Pushes a pair of registers on the givne stack (uses the STP instruction)
......
......@@ -49,7 +49,6 @@ pub trait CodeGenerator {
// emit code to adjust frame
fn emit_frame_grow(&mut self); // Emits a SUB
fn emit_frame_shrink(&mut self); // Emits an ADD
// Used to pass a string that the assembler will interpret as an immediate argument
// (This is neccesary to support the use of ELF relocations like ':tprel_hi12:foo')
......
......@@ -189,7 +189,7 @@ impl <'a> InstructionSelection {
// it is possible that the fallthrough block is scheduled somewhere else
// we need to explicitly jump to it
self.finish_block();
let fallthrough_temp_block = make_block_name(node, "branch_fallthrough", );
let fallthrough_temp_block = make_block_name(&self.current_fv_name, node.id(), "branch_fallthrough", );
self.start_block(fallthrough_temp_block, &vec![]);
let fallthrough_target = f_content.get_block(fallthrough_dest.target).name();
......@@ -343,7 +343,8 @@ impl <'a> InstructionSelection {
self.backend.emit_b_cond("EQ", target);
self.finish_block();
self.start_block(make_block_name(node, format!("switch_not_met_case_{}", case_op_index).as_str()), &vec![]);
let block_name = make_block_name(&self.current_fv_name, node.id(), format!("switch_not_met_case_{}", case_op_index).as_str());
self.start_block(block_name, &vec![]);
}
// emit default
......@@ -628,9 +629,9 @@ impl <'a> InstructionSelection {
// overflows to_ty_size, but not to_ty_reg_size
let to_ty_reg_size = check_op_len(&tmp_res.ty); // The size of the aarch64 register
if to_ty_size != to_ty_reg_size {
let blk_positive = make_block_name(node, "positive");
let blk_negative = make_block_name(node, "negative");
let blk_end = make_block_name(node, "end");
let blk_positive = make_block_name(&self.current_fv_name, node.id(), "positive");
let blk_negative = make_block_name(&self.current_fv_name, node.id(), "negative");
let blk_end = make_block_name(&self.current_fv_name, node.id(), "end");
let tmp = make_temporary(f_context, to_ty.clone(), vm);
self.backend.emit_tbnz(&tmp_res, (to_ty_size - 1) as u8, blk_negative.clone());
......@@ -747,7 +748,7 @@ impl <'a> InstructionSelection {
self.finish_block();
let blk_load_start = make_block_name(node, "load_start");
let blk_load_start = make_block_name(&self.current_fv_name, node.id(), "load_start");
// load_start:
self.start_block(blk_load_start.clone(), &vec![temp_loc.clone()]);
......@@ -849,7 +850,7 @@ impl <'a> InstructionSelection {
self.finish_block();
let blk_store_start = make_block_name(node, "store_start");
let blk_store_start = make_block_name(&self.current_fv_name, node.id(), "store_start");
// store_start:
self.start_block(blk_store_start.clone(), &vec![temp_loc.clone()]);
......@@ -916,9 +917,9 @@ impl <'a> InstructionSelection {
let res_value = self.get_result_value(node, 0);
let res_success = self.get_result_value(node, 1);
let blk_cmpxchg_start = make_block_name(node, "cmpxchg_start");
let blk_cmpxchg_failed = make_block_name(node, "cmpxchg_failed");
let blk_cmpxchg_succeded = make_block_name(node, "cmpxchg_succeded");
let blk_cmpxchg_start = make_block_name(&self.current_fv_name, node.id(), "cmpxchg_start");
let blk_cmpxchg_failed = make_block_name(&self.current_fv_name, node.id(), "cmpxchg_failed");
let blk_cmpxchg_succeded = make_block_name(&self.current_fv_name, node.id(), "cmpxchg_succeded");
self.finish_block();
......@@ -1217,7 +1218,7 @@ impl <'a> InstructionSelection {
Some(node), f_context, vm
);
}
// Runtime Entry
Instruction_::Throw(op_index) => {
trace!("instsel on THROW");
......@@ -2706,8 +2707,8 @@ impl <'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(node, "alloc_large");
let blk_alloc_large_end = make_block_name(node, "alloc_large_end");
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end = make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -2719,7 +2720,8 @@ impl <'a> InstructionSelection {
self.backend.emit_b_cond("GT", blk_alloc_large.clone());
self.finish_block();
self.start_block(make_block_name(node, "allocsmall"), &vec![]);
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
self.start_block(block_name, &vec![]);
self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_context, vm);
self.backend.emit_b(blk_alloc_large_end.clone());
......@@ -3690,10 +3692,6 @@ impl <'a> InstructionSelection {
self.backend.emit_ldr_callee_saved(reg, &loc);
}
//self.backend.emit_frame_shrink();
// Pop the link register and frame pointers
// Pop the frame record
self.backend.emit_mov(&SP, &FP);
self.backend.emit_pop_pair(&FP, &LR, &SP);
......@@ -4361,7 +4359,7 @@ impl <'a> InstructionSelection {
fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String {
let ret = {
if cur_node.is_some() {
make_block_name(cur_node.unwrap(), format!("callsite_{}", self.current_callsite_id).as_str())
make_block_name(&self.current_fv_name, cur_node.unwrap().id(), format!("callsite_{}", self.current_callsite_id).as_str())
} else {
format!("{}:callsite_{}", self.current_fv_name, self.current_callsite_id)
}
......
......@@ -2194,22 +2194,6 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_frame_shrink(&mut self) {
trace!("emit frame shrink");
let asm = format!("addq ${},%rsp", FRAME_SIZE_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut().add_frame_size_patchpoint(ASMLocation::new(line, 6, FRAME_SIZE_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap!{},
false
)
}
fn emit_nop(&mut self, bytes: usize) {
trace!("emit: nop ({} bytes)", bytes);
......@@ -3528,7 +3512,7 @@ pub fn emit_context_with_reloc(vm: &VM,
use std::path;
use std::io::prelude::*;
emit_mu_types(vm);
emit_mu_types("", vm);
debug!("---Emit VM Context---");
create_emit_directory(vm);
......
......@@ -44,8 +44,7 @@ pub trait CodeGenerator {
// emit code to adjust frame
fn emit_frame_grow(&mut self);
fn emit_frame_shrink(&mut self);
fn emit_nop(&mut self, bytes: usize);
// comparison
......
......@@ -347,7 +347,7 @@ impl <'a> InstructionSelection {
// we need to explicitly jump to it
self.finish_block();
let fallthrough_temp_block = make_block_name(node, "branch_fallthrough", );
let fallthrough_temp_block = make_block_name(&self.current_fv_name, node.id(), "branch_fallthrough", );
self.start_block(fallthrough_temp_block);
let fallthrough_target = f_content.get_block(fallthrough_dest.target).name();
......@@ -417,9 +417,9 @@ impl <'a> InstructionSelection {
}
// jcc
_ => {
let blk_true = make_block_name(node, "select_true");
let blk_false = make_block_name(node, "select_false");
let blk_end = make_block_name(node, "select_end");
let blk_true = make_block_name(&self.current_fv_name, node.id(), "select_true");
let blk_false = make_block_name(&self.current_fv_name, node.id(), "select_false");
let blk_end = make_block_name(&self.current_fv_name, node.id(), "select_end");
// jump to blk_true if true
match cmpop {
......@@ -470,9 +470,9 @@ impl <'a> InstructionSelection {
} else if self.match_fpreg(true_val) {
let tmp_res = self.get_result_value(node);
let blk_true = make_block_name(node, "select_true");
let blk_false = make_block_name(node, "select_false");
let blk_end = make_block_name(node, "select_end");
let blk_true = make_block_name(&self.current_fv_name, node.id(), "select_true");
let blk_false = make_block_name(&self.current_fv_name, node.id(), "select_false");
let blk_end = make_block_name(&self.current_fv_name, node.id(), "select_end");
// jump to blk_true if true
match cmpop {
......@@ -610,7 +610,8 @@ impl <'a> InstructionSelection {
}
self.finish_block();
self.start_block(make_block_name(node, format!("switch_not_met_case_{}", case_op_index).as_str()));
let block_name = make_block_name(&self.current_fv_name, node.id(), format!("switch_not_met_case_{}", case_op_index).as_str());
self.start_block(block_name);
}
// emit default
......@@ -945,9 +946,9 @@ impl <'a> InstructionSelection {
// testq %tmp_op %tmp_op
self.backend.emit_test_r_r(&tmp_op, &tmp_op);
let blk_if_signed = make_block_name(node, "uitofp_float_if_signed",);
let blk_if_not_signed = make_block_name(node, "uitofp_float_if_not_signed");
let blk_done = make_block_name(node, "uitofp_float_done");
let blk_if_signed = make_block_name(&self.current_fv_name, node.id(), "uitofp_float_if_signed",);
let blk_if_not_signed = make_block_name(&self.current_fv_name, node.id(), "uitofp_float_if_not_signed");
let blk_done = make_block_name(&self.current_fv_name, node.id(), "uitofp_float_done");
// js %if_signed
self.backend.emit_js(blk_if_signed.clone());
......@@ -1424,7 +1425,28 @@ impl <'a> InstructionSelection {
Some(node), f_content, f_context, vm
);
}
/*Instruction_::AllocA(ref ty) => {
trace!("instsel on AllocA");
if cfg!(debug_assertions) {
match ty.v {
MuType_::Hybrid(_) => panic!("cannot use ALLOCA for hybrid, use ALLOCAHYBRID instead"),
_ => {}
}
}
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
let ty_align= ty_info.alignment;
if 16 % ty_align != 0 {
// It's not trivial to allign this type...
unimplemented!()
}
// Round size up to the nearest multiple of 16
let size = ((size + 16 - 1)/16)*16;
}*/
Instruction_::Throw(op_index) => {
trace!("instsel on THROW");
......@@ -2534,8 +2556,8 @@ impl <'a> InstructionSelection {
// emit: ALLOC_LARGE:
// emit: >> large object alloc
// emit: ALLOC_LARGE_END:
let blk_alloc_large = make_block_name(node, "alloc_large");
let blk_alloc_large_end = make_block_name(node, "alloc_large_end");
let blk_alloc_large = make_block_name(&self.current_fv_name, node.id(), "alloc_large");
let blk_alloc_large_end = make_block_name(&self.current_fv_name, node.id(), "alloc_large_end");
if OBJECT_HEADER_SIZE != 0 {
let size_with_hdr = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
......@@ -2549,7 +2571,8 @@ impl <'a> InstructionSelection {
self.backend.emit_jg(blk_alloc_large.clone());
self.finish_block();
self.start_block(make_block_name(node, "allocsmall"));
let block_name = make_block_name(&self.current_fv_name, node.id(), "allocsmall");
self.start_block(block_name);
// alloc small here
self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_content, f_context, vm);
......@@ -2652,12 +2675,13 @@ impl <'a> InstructionSelection {
// branch to slow path if end > limit (end - limit > 0)
// ASM: jg alloc_slow
let slowpath = make_block_name(node, "allocslow");
let slowpath = make_block_name(&self.current_fv_name, node.id(), "allocslow");
self.backend.emit_jg(slowpath.clone());
// finish current block
self.finish_block();
self.start_block(make_block_name(node, "updatecursor"));
let block_name = make_block_name(&self.current_fv_name, node.id(), "updatecursor");
self.start_block(block_name);
// update cursor
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
......@@ -2674,7 +2698,7 @@ impl <'a> InstructionSelection {
}
// ASM jmp alloc_end
let allocend = make_block_name(node, "alloc_small_end");
let allocend = make_block_name(&self.current_fv_name, node.id(), "alloc_small_end");
self.backend.emit_jmp(allocend.clone());
// finishing current block
......@@ -3377,7 +3401,8 @@ impl <'a> InstructionSelection {
// insert an intermediate block to branch to normal
// the branch is inserted later (because we need to deal with postcall convention)
self.finish_block();
self.start_block(make_block_name(cur_node, "normal_cont_for_call"));
let block_name = make_block_name(&self.current_fv_name, cur_node.id(), "normal_cont_for_call");
self.start_block(block_name);
} else {
self.current_callsites.push_back((callsite.to_relocatable(), 0, stack_arg_size));
}
......@@ -3643,7 +3668,8 @@ impl <'a> InstructionSelection {
}
}
// frame shrink
self.backend.emit_frame_shrink();
// RBP -> RSP
self.backend.emit_mov_r_r(&x86_64::RSP, &x86_64::RBP);
// pop rbp
self.backend.emit_pop_r64(&x86_64::RBP);
......@@ -4736,7 +4762,7 @@ impl <'a> InstructionSelection {
fn new_callsite_label(&mut self, cur_node: Option<&TreeNode>) -> String {
let ret = {
if cur_node.is_some() {
make_block_name(cur_node.unwrap(), format!("callsite_{}", self.current_callsite_id).as_str())
make_block_name(&self.current_fv_name, cur_node.unwrap().id(), format!("callsite_{}", self.current_callsite_id).as_str())
} else {
format!("{}:callsite_{}", self.current_fv_name, self.current_callsite_id)
}
......
This diff is collapsed.
......@@ -418,6 +418,6 @@ impl RegGroup {
}
}
fn make_block_name<T: MuEntity>(entity: &T, label: &str) -> MuName {
format!("{}:{}", entity.name(), label)
fn make_block_name(fv_name: &String, id: MuID, label: &str) -> MuName {
format!("{}.#{}:{}", fv_name, id, label)
}
\ No newline at end of file
......@@ -86,6 +86,7 @@ impl CompilerPolicy {
impl Default for CompilerPolicy {
fn default() -> Self {
let mut passes : Vec<Box<CompilerPass>> = vec![];
passes.push(Box::new(passes::DotGen::new(".orig")));
passes.push(Box::new(passes::Inlining::new()));
// ir level passes
passes.push(Box::new(passes::DefUse::new()));
......@@ -93,6 +94,7 @@ impl Default for CompilerPolicy {
passes.push(Box::new(passes::GenMovPhi::new()));
passes.push(Box::new(passes::ControlFlowAnalysis::new()));
passes.push(Box::new(passes::TraceGen::new()));
passes.push(Box::new(passes::DotGen::new(".transformed")));
// compilation
passes.push(Box::new(backend::inst_sel::InstructionSelection::new()));
......
......@@ -21,6 +21,7 @@ mod control_flow;
mod trace_gen;
mod gen_mov_phi;
mod inlining;
mod dot_gen;
pub use compiler::passes::inlining::Inlining;
pub use compiler::passes::def_use::DefUse;
......@@ -28,6 +29,7 @@ pub use compiler::passes::tree_gen::TreeGen;
pub use compiler::passes::control_flow::ControlFlowAnalysis;
pub use compiler::passes::trace_gen::TraceGen;
pub use compiler::passes::gen_mov_phi::GenMovPhi;
pub use compiler::passes::dot_gen::DotGen;
use std::any::Any;
......
......@@ -38,4 +38,4 @@ simple_logger = "*"
aligned_alloc = "*"
crossbeam = "*"
field-offset = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = ">= 0.0.5" }
......@@ -24,5 +24,5 @@ crate-type = ["rlib"]
memmap = "*"
memsec = "0.1.9"
byteorder = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = ">= 0.0.5" }
log = "*"
......@@ -685,7 +685,7 @@ fn test_coalesce_branch_moves() {
// check
let fv_id = func_ver.id();
assert!(get_number_of_moves(fv_id, &vm) == 1, "The function should not yield any mov instructions other than mov %rsp->%rbp (some possible coalescing failed)");
assert!(get_number_of_moves(fv_id, &vm) == 2, "The function should not yield any mov instructions other than mov %rsp->%rbp and mov %rbp->%rsp (some possible coalescing failed)");
}
}
......@@ -752,7 +752,7 @@ fn test_coalesce_args() {
// check
let fv_id = func_ver.id();
assert!(get_number_of_moves(fv_id, &vm) == 1, "The function should not yield any mov instructions other than mov %rsp->%rbp (or MOV SP -> FP on aarch64) (some possible coalescing failed)");
assert!(get_number_of_moves(fv_id, &vm) == 2, "The function should not yield any mov instructions other than mov %rsp->%rbp and mov %rbp->%rsp (or MOV SP -> FP on aarch64) (some possible coalescing failed)");
}
}
......@@ -811,7 +811,7 @@ fn test_coalesce_branch2_moves() {
// check
let fv_id = func_ver.id();
assert!(get_number_of_moves(fv_id, &vm) <= 3, "too many moves (some possible coalescing failed)");
assert!(get_number_of_moves(fv_id, &vm) <= 4, "too many moves (some possible coalescing failed)");
}
backend::emit_context(&vm);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment