Commit b7d7851a authored by qinsoon's avatar qinsoon

keep refactoring on compiler module

parent 68af6af6
......@@ -15,6 +15,7 @@ use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass;
use compiler::backend;
use compiler::backend::BackendType;
use compiler::backend::RegGroup;
use compiler::backend::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64;
......@@ -2976,7 +2977,7 @@ impl <'a> InstructionSelection {
stack_args.reverse();
let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect();
let (stack_arg_size, _, stack_arg_offsets) = backend::sequetial_layout(&stack_arg_tys, vm);
let (stack_arg_size, _, stack_arg_offsets) = BackendType::sequential_layout(&stack_arg_tys, vm);
let mut stack_arg_size_with_padding = stack_arg_size;
......@@ -3463,7 +3464,7 @@ impl <'a> InstructionSelection {
// old RBP <- RBP
let stack_arg_base_offset : i32 = 16;
let arg_by_stack_tys = arg_by_stack.iter().map(|x| x.ty.clone()).collect();
let (_, _, stack_arg_offsets) = backend::sequetial_layout(&arg_by_stack_tys, vm);
let (_, _, stack_arg_offsets) = BackendType::sequential_layout(&arg_by_stack_tys, vm);
// unload the args
let mut i = 0;
......
......@@ -11,9 +11,47 @@ use std::io::prelude::*;
use std::fs::File;
use std::collections::HashMap;
/// should emit Mu IR dot graph?
pub const EMIT_MUIR : bool = true;
/// should emit machien code dot graph?
pub const EMIT_MC_DOT : bool = true;
pub struct CodeEmission {
name: &'static str
}
impl CodeEmission {
pub fn new() -> CodeEmission {
CodeEmission {
name: "Code Emission"
}
}
}
impl CompilerPass for CodeEmission {
fn name(&self) -> &'static str {
self.name
}
fn as_any(&self) -> &Any {
self
}
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
// emit the actual code
emit_code(func, vm);
// emit debug graphs
if EMIT_MUIR {
emit_muir_dot(func, vm);
}
if EMIT_MC_DOT {
emit_mc_dot(func, vm);
}
}
}
/// creates the emit directory (if it doesnt exist)
pub fn create_emit_directory(vm: &VM) {
use std::fs;
match fs::create_dir(&vm.vm_options.flag_aot_emit_dir) {
......@@ -22,6 +60,7 @@ pub fn create_emit_directory(vm: &VM) {
}
}
/// creates an file to write, panics if the creation fails
fn create_emit_file(name: String, vm: &VM) -> File {
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
......@@ -33,18 +72,6 @@ fn create_emit_file(name: String, vm: &VM) -> File {
}
}
pub struct CodeEmission {
name: &'static str
}
impl CodeEmission {
pub fn new() -> CodeEmission {
CodeEmission {
name: "Code Emission"
}
}
}
#[allow(dead_code)]
pub fn emit_mu_types(vm: &VM) {
if EMIT_MUIR {
......@@ -382,25 +409,3 @@ fn emit_mc_dot(func: &MuFunctionVersion, vm: &VM) {
file.write("}\n".as_bytes()).unwrap();
}
impl CompilerPass for CodeEmission {
fn name(&self) -> &'static str {
self.name
}
fn as_any(&self) -> &Any {
self
}
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
emit_code(func, vm);
if EMIT_MUIR {
emit_muir_dot(func, vm);
}
if EMIT_MC_DOT {
emit_mc_dot(func, vm);
}
}
}
This diff is collapsed.
......@@ -10,6 +10,31 @@ pub struct PeepholeOptimization {
name: &'static str
}
impl CompilerPass for PeepholeOptimization {
fn name(&self) -> &'static str {
self.name
}
fn as_any(&self) -> &Any {
self
}
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
for i in 0..cf.mc().number_of_insts() {
// if two sides of a move instruction are the same, it is redundant, and can be eliminated
self.remove_redundant_move(i, &mut cf);
// if a branch targets a block that immediately follow it, it can be eliminated
self.remove_unnecessary_jump(i, &mut cf);
}
trace!("after peephole optimization:");
cf.mc().trace_mc();
}
}
impl PeepholeOptimization {
pub fn new() -> PeepholeOptimization {
PeepholeOptimization {
......@@ -17,10 +42,12 @@ impl PeepholeOptimization {
}
}
pub fn remove_redundant_move(&mut self, inst: usize, cf: &mut CompiledFunction) {
fn remove_redundant_move(&mut self, inst: usize, cf: &mut CompiledFunction) {
// if this instruction is a move, and move from register to register (no memory operands)
if cf.mc().is_move(inst) && !cf.mc().is_using_mem_op(inst) {
cf.mc().trace_inst(inst);
// get source reg/temp ID
let src : MuID = {
let uses = cf.mc().get_inst_reg_uses(inst);
if uses.len() == 0 {
......@@ -29,8 +56,11 @@ impl PeepholeOptimization {
}
uses[0]
};
// get dest reg/temp ID
let dst : MuID = cf.mc().get_inst_reg_defines(inst)[0];
// turning temp into machine reg
let src_machine_reg : MuID = {
match cf.temps.get(&src) {
Some(reg) => *reg,
......@@ -43,7 +73,8 @@ impl PeepholeOptimization {
None => dst
}
};
// check if two registers are aliased
if backend::is_aliased(src_machine_reg, dst_machine_reg) {
trace!("move between {} and {} is redundant! removed", src_machine_reg, dst_machine_reg);
// redundant, remove this move
......@@ -54,7 +85,7 @@ impl PeepholeOptimization {
}
}
pub fn remove_unnecessary_jump(&mut self, inst: usize, cf: &mut CompiledFunction) {
fn remove_unnecessary_jump(&mut self, inst: usize, cf: &mut CompiledFunction) {
let mut mc = cf.mc_mut();
// if this is last instruction, return
......@@ -83,26 +114,3 @@ impl PeepholeOptimization {
}
}
}
impl CompilerPass for PeepholeOptimization {
fn name(&self) -> &'static str {
self.name
}
fn as_any(&self) -> &Any {
self
}
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
for i in 0..cf.mc().number_of_insts() {
self.remove_redundant_move(i, &mut cf);
self.remove_unnecessary_jump(i, &mut cf);
}
trace!("after peephole optimization:");
cf.mc().trace_mc();
}
}
......@@ -9,7 +9,7 @@ use ast::ptr::*;
use ast::types::*;
use utils::Address;
use compiler::backend::RegGroup;
use compiler::backend::BackendTypeInfo;
use compiler::backend::BackendType;
use runtime::ValueLocation;
use runtime::thread::MuThread;
......@@ -46,7 +46,7 @@ fn allocate(allocator: *mut Mutator, size: ByteSize, align: ByteSize, encode: u6
ret
}
pub fn allocate_fixed(ty: P<MuType>, backendtype: Box<BackendTypeInfo>) -> Address {
pub fn allocate_fixed(ty: P<MuType>, backendtype: Box<BackendType>) -> Address {
let gctype = backendtype.gc_type.clone();
let encode = get_gc_type_encode(gctype.id);
......@@ -57,7 +57,7 @@ pub fn allocate_fixed(ty: P<MuType>, backendtype: Box<BackendTypeInfo>) -> Addre
check_allocator(gctype.size(), gctype.alignment, encode, None).to_address()
}
pub fn allocate_hybrid(ty: P<MuType>, len: u64, backendtype: Box<BackendTypeInfo>) -> Address {
pub fn allocate_hybrid(ty: P<MuType>, len: u64, backendtype: Box<BackendType>) -> Address {
let gctype = backendtype.gc_type.clone();
let encode = get_gc_type_encode(gctype.id);
......@@ -68,7 +68,7 @@ pub fn allocate_hybrid(ty: P<MuType>, len: u64, backendtype: Box<BackendTypeInfo
check_allocator(gctype.size_hybrid(len as u32), gctype.alignment, encode, Some(len)).to_address()
}
pub fn allocate_global(iref_global: P<Value>, backendtype: Box<BackendTypeInfo>) -> ValueLocation {
pub fn allocate_global(iref_global: P<Value>, backendtype: Box<BackendType>) -> ValueLocation {
let referenced_type = match iref_global.ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expected global to be an iref type, found {}", iref_global.ty)
......
use utils;
use utils::Word;
use utils::Address;
use ast::ir::*;
use vm::VM;
use compiler::backend::Word;
use compiler::backend::RegGroup;
use utils::Address;
use std::fmt;
use std::os::raw::c_int;
......
......@@ -168,8 +168,8 @@ impl MuStack {
#[cfg(target_arch = "x86_64")]
pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
use compiler::backend::Word;
use compiler::backend::WORD_SIZE;
use utils::Word;
use utils::WORD_SIZE;
use compiler::backend::RegGroup;
use compiler::backend::x86_64;
......@@ -223,8 +223,8 @@ impl MuStack {
}
pub fn print_stack(&self, n_entries: Option<usize>) {
use compiler::backend::Word;
use compiler::backend::WORD_SIZE;
use utils::Word;
use utils::WORD_SIZE;
let mut cursor = self.upper_bound.sub(WORD_SIZE);
let mut count = 0;
......@@ -250,10 +250,6 @@ impl MuStack {
debug!("0x{:x} | LOWER_BOUND", self.lower_bound);
}
pub fn print_backtrace(&self) {
}
}
pub enum MuStackState {
......
......@@ -7,7 +7,7 @@ use ast::types;
use ast::types::*;
use compiler::{Compiler, CompilerPolicy};
use compiler::backend;
use compiler::backend::BackendTypeInfo;
use compiler::backend::BackendType;
use compiler::machine_code::CompiledFunction;
use runtime::thread::*;
use runtime::ValueLocation;
......@@ -42,7 +42,7 @@ pub struct VM {
// 3
types: RwLock<HashMap<MuID, P<MuType>>>,
// 4
backend_type_info: RwLock<HashMap<MuID, Box<BackendTypeInfo>>>,
backend_type_info: RwLock<HashMap<MuID, Box<BackendType>>>,
// 5
constants: RwLock<HashMap<MuID, P<Value>>>,
// 6
......@@ -527,7 +527,7 @@ impl <'a> VM {
// restore gc types
{
let type_info_guard = vm.backend_type_info.read().unwrap();
let mut type_info_vec: Vec<Box<BackendTypeInfo>> = type_info_guard.values().map(|x| x.clone()).collect();
let mut type_info_vec: Vec<Box<BackendType>> = type_info_guard.values().map(|x| x.clone()).collect();
type_info_vec.sort_by(|a, b| a.gc_type.id.cmp(&b.gc_type.id));
let mut expect_id = 0;
......@@ -905,7 +905,7 @@ impl <'a> VM {
self.compiled_funcs.write().unwrap().insert(func.func_ver_id, RwLock::new(func));
}
pub fn get_backend_type_info(&self, tyid: MuID) -> Box<BackendTypeInfo> {
pub fn get_backend_type_info(&self, tyid: MuID) -> Box<BackendType> {
{
let read_lock = self.backend_type_info.read().unwrap();
......@@ -920,7 +920,7 @@ impl <'a> VM {
Some(ty) => ty,
None => panic!("invalid type id during get_backend_type_info(): {}", tyid)
};
let resolved = Box::new(backend::resolve_backend_type_info(ty, self));
let resolved = Box::new(backend::BackendType::resolve(ty, self));
let mut write_lock = self.backend_type_info.write().unwrap();
write_lock.insert(tyid, resolved.clone());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment