Commit 5fbf8fa4 authored by qinsoon's avatar qinsoon

frame done.

partially serialize compiled function into bootimage
parent 14b3ee5f
...@@ -517,6 +517,14 @@ pub struct Value { ...@@ -517,6 +517,14 @@ pub struct Value {
} }
impl Value { impl Value {
pub fn make_int_const(id: MuID, val: u64) -> P<Value> {
P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: UINT32_TYPE.clone(),
v: Value_::Constant(Constant::Int(val))
})
}
pub fn is_mem(&self) -> bool { pub fn is_mem(&self) -> bool {
match self.v { match self.v {
Value_::Memory(_) => true, Value_::Memory(_) => true,
......
use ptr::P; use ptr::P;
use ir::*; use ir::*;
use utils::POINTER_SIZE;
use utils::vec_utils; use utils::vec_utils;
use std::fmt; use std::fmt;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::RwLock; use std::sync::RwLock;
lazy_static! {
pub static ref ADDRESS_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(POINTER_SIZE * 8))
);
pub static ref UINT32_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(32))
);
pub static ref UINT64_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(64))
);
pub static ref DOUBLE_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::double())
);
pub static ref INTERNAL_TYPES : Vec<P<MuType>> = vec![
ADDRESS_TYPE.clone(),
UINT32_TYPE.clone(),
UINT64_TYPE.clone(),
DOUBLE_TYPE.clone()
];
}
#[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)] #[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct MuType { pub struct MuType {
pub hdr: MuEntityHeader, pub hdr: MuEntityHeader,
......
...@@ -3,12 +3,14 @@ ...@@ -3,12 +3,14 @@
use compiler::backend; use compiler::backend;
use compiler::backend::AOT_EMIT_CONTEXT_FILE; use compiler::backend::AOT_EMIT_CONTEXT_FILE;
use compiler::backend::AOT_EMIT_DIR; use compiler::backend::AOT_EMIT_DIR;
use compiler::backend::RegGroup;
use utils::ByteSize; use utils::ByteSize;
use compiler::backend::x86_64; use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator; use compiler::backend::x86_64::CodeGenerator;
use compiler::machine_code::CompiledFunction; use compiler::machine_code::CompiledFunction;
use compiler::machine_code::MachineCode; use compiler::machine_code::MachineCode;
use vm::VM; use vm::VM;
use runtime::ValueLocation;
use utils::string_utils; use utils::string_utils;
...@@ -629,7 +631,7 @@ impl ASMCodeGen { ...@@ -629,7 +631,7 @@ impl ASMCodeGen {
} }
impl CodeGenerator for ASMCodeGen { impl CodeGenerator for ASMCodeGen {
fn start_code(&mut self, func_name: MuName) { fn start_code(&mut self, func_name: MuName) -> ValueLocation {
self.cur = Some(Box::new(ASMCode { self.cur = Some(Box::new(ASMCode {
name: func_name.clone(), name: func_name.clone(),
code: vec![], code: vec![],
...@@ -655,13 +657,27 @@ impl CodeGenerator for ASMCodeGen { ...@@ -655,13 +657,27 @@ impl CodeGenerator for ASMCodeGen {
})); }));
// to link with C sources via gcc // to link with C sources via gcc
self.add_asm_symbolic(directive_globl(symbol(func_name.clone()))); let func_symbol = symbol(func_name.clone());
self.add_asm_symbolic(format!("{}:", symbol(func_name.clone()))); self.add_asm_symbolic(directive_globl(func_symbol.clone()));
self.add_asm_symbolic(format!("{}:", func_symbol.clone()));
ValueLocation::Relocatable(RegGroup::GPR, func_symbol)
} }
fn finish_code(&mut self) -> Box<MachineCode> { fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode>, ValueLocation) {
let func_end_symbol = {
let mut symbol = symbol(func_name.clone());
symbol.push_str("_end");
symbol
};
self.add_asm_symbolic(directive_globl(func_end_symbol.clone()));
self.control_flow_analysis(); self.control_flow_analysis();
self.cur.take().unwrap()
(
self.cur.take().unwrap(),
ValueLocation::Relocatable(RegGroup::GPR, func_end_symbol)
)
} }
fn print_cur_code(&self) { fn print_cur_code(&self) {
...@@ -1207,7 +1223,7 @@ pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) { ...@@ -1207,7 +1223,7 @@ pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) {
let compiled_funcs = vm.compiled_funcs().read().unwrap(); let compiled_funcs = vm.compiled_funcs().read().unwrap();
let cf = compiled_funcs.get(&fv.id()).unwrap().read().unwrap(); let cf = compiled_funcs.get(&fv.id()).unwrap().read().unwrap();
let code = cf.mc.emit(); let code = cf.mc.as_ref().unwrap().emit();
// create 'emit' directory // create 'emit' directory
create_emit_directory(); create_emit_directory();
......
use ast::ptr::P; use ast::ptr::P;
use ast::ir::*; use ast::ir::*;
use runtime::ValueLocation;
use compiler::machine_code::MachineCode; use compiler::machine_code::MachineCode;
pub trait CodeGenerator { pub trait CodeGenerator {
fn start_code(&mut self, func_name: MuName); fn start_code(&mut self, func_name: MuName) -> ValueLocation;
fn finish_code(&mut self) -> Box<MachineCode>; fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode>, ValueLocation);
fn print_cur_code(&self); fn print_cur_code(&self);
......
...@@ -30,7 +30,9 @@ pub struct InstructionSelection { ...@@ -30,7 +30,9 @@ pub struct InstructionSelection {
backend: Box<CodeGenerator>, backend: Box<CodeGenerator>,
current_block: Option<MuName> current_frame: Option<Frame>,
current_block: Option<MuName>,
current_func_start: Option<ValueLocation>
} }
impl <'a> InstructionSelection { impl <'a> InstructionSelection {
...@@ -38,7 +40,10 @@ impl <'a> InstructionSelection { ...@@ -38,7 +40,10 @@ impl <'a> InstructionSelection {
InstructionSelection{ InstructionSelection{
name: "Instruction Selection (x64)", name: "Instruction Selection (x64)",
backend: Box::new(ASMCodeGen::new()), backend: Box::new(ASMCodeGen::new()),
current_block: None
current_frame: None,
current_block: None,
current_func_start: None,
} }
} }
...@@ -461,26 +466,26 @@ impl <'a> InstructionSelection { ...@@ -461,26 +466,26 @@ impl <'a> InstructionSelection {
// ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor // ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET; let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
let tmp_cursor = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm); let tmp_cursor = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm); self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
// alignup cursor (cursor + align - 1 & !(align - 1)) // alignup cursor (cursor + align - 1 & !(align - 1))
// ASM: lea align-1(%cursor) -> %start // ASM: lea align-1(%cursor) -> %start
let align = ty_info.alignment as i32; let align = ty_info.alignment as i32;
let tmp_start = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm); let tmp_start = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm); self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
// ASM: and %start, !(align-1) -> %start // ASM: and %start, !(align-1) -> %start
self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1)); self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1));
// bump cursor // bump cursor
// ASM: lea size(%start) -> %end // ASM: lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm); let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm); self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
// check with limit // check with limit
// ASM: cmp %end, [%tl + allocator_offset + limit_offset] // ASM: cmp %end, [%tl + allocator_offset + limit_offset]
let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET; let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, runtime::ADDRESS_TYPE.clone(), vm); let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit); self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
// branch to slow path if end > limit // branch to slow path if end > limit
...@@ -513,7 +518,7 @@ impl <'a> InstructionSelection { ...@@ -513,7 +518,7 @@ impl <'a> InstructionSelection {
// arg1: allocator address // arg1: allocator address
let allocator_offset = *thread::ALLOCATOR_OFFSET; let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm); let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm); self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// arg2: size // arg2: size
let const_size = self.make_value_int_const(ty_size as u64, vm); let const_size = self.make_value_int_const(ty_size as u64, vm);
...@@ -569,7 +574,7 @@ impl <'a> InstructionSelection { ...@@ -569,7 +574,7 @@ impl <'a> InstructionSelection {
fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> { fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> {
P(Value{ P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()), hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: runtime::UINT64_TYPE.clone(), ty: UINT64_TYPE.clone(),
v: Value_::Constant(Constant::Int(val)) v: Value_::Constant(Constant::Int(val))
}) })
} }
...@@ -587,7 +592,7 @@ impl <'a> InstructionSelection { ...@@ -587,7 +592,7 @@ impl <'a> InstructionSelection {
} }
fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) { fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, runtime::ADDRESS_TYPE.clone(), vm); let mem = self.make_memory_op_base_offset(base, offset, ADDRESS_TYPE.clone(), vm);
self.backend.emit_lea_r64(dest, &mem); self.backend.emit_lea_r64(dest, &mem);
} }
...@@ -742,7 +747,7 @@ impl <'a> InstructionSelection { ...@@ -742,7 +747,7 @@ impl <'a> InstructionSelection {
} }
} }
fn emit_common_prologue(&mut self, args: &Vec<P<Value>>) { fn emit_common_prologue(&mut self, args: &Vec<P<Value>>, vm: &VM) {
let block_name = "prologue".to_string(); let block_name = "prologue".to_string();
self.backend.start_block(block_name.clone()); self.backend.start_block(block_name.clone());
...@@ -757,11 +762,15 @@ impl <'a> InstructionSelection { ...@@ -757,11 +762,15 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP); self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
// push all callee-saved registers // push all callee-saved registers
for i in 0..x86_64::CALLEE_SAVED_GPRs.len() { {
let ref reg = x86_64::CALLEE_SAVED_GPRs[i]; let frame = self.current_frame.as_mut().unwrap();
// not pushing rbp (as we have done taht) for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() { let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
self.backend.emit_push_r64(&reg); // not pushing rbp (as we have done taht)
if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
self.backend.emit_push_r64(&reg);
frame.alloc_slot_for_callee_saved_reg(reg.clone(), vm);
}
} }
} }
...@@ -998,7 +1007,7 @@ impl <'a> InstructionSelection { ...@@ -998,7 +1007,7 @@ impl <'a> InstructionSelection {
Instruction_::GetIRef(op_index) => { Instruction_::GetIRef(op_index) => {
let ref op = ops[op_index]; let ref op = ops[op_index];
self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, runtime::ADDRESS_TYPE.clone(), vm) self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, ADDRESS_TYPE.clone(), vm)
} }
_ => unimplemented!() _ => unimplemented!()
} }
...@@ -1095,14 +1104,17 @@ impl CompilerPass for InstructionSelection { ...@@ -1095,14 +1104,17 @@ impl CompilerPass for InstructionSelection {
fn start_function(&mut self, vm: &VM, func_ver: &mut MuFunctionVersion) { fn start_function(&mut self, vm: &VM, func_ver: &mut MuFunctionVersion) {
debug!("{}", self.name()); debug!("{}", self.name());
let funcs = vm.funcs().read().unwrap(); self.current_frame = Some(Frame::new());
let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap(); self.current_func_start = Some({
self.backend.start_code(func.name().unwrap()); let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
self.backend.start_code(func.name().unwrap())
});
// prologue (get arguments from entry block first) // prologue (get arguments from entry block first)
let entry_block = func_ver.content.as_ref().unwrap().get_entry_block(); let entry_block = func_ver.content.as_ref().unwrap().get_entry_block();
let ref args = entry_block.content.as_ref().unwrap().args; let ref args = entry_block.content.as_ref().unwrap().args;
self.emit_common_prologue(args); self.emit_common_prologue(args, vm);
} }
#[allow(unused_variables)] #[allow(unused_variables)]
...@@ -1143,13 +1155,21 @@ impl CompilerPass for InstructionSelection { ...@@ -1143,13 +1155,21 @@ impl CompilerPass for InstructionSelection {
fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) { fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
self.backend.print_cur_code(); self.backend.print_cur_code();
let mc = self.backend.finish_code(); let func_name = {
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func.func_id).unwrap().read().unwrap();
func.name().unwrap()
};
let (mc, func_end) = self.backend.finish_code(func_name);
let compiled_func = CompiledFunction { let compiled_func = CompiledFunction {
func_id: func.func_id, func_id: func.func_id,
func_ver_id: func.id(), func_ver_id: func.id(),
temps: HashMap::new(), temps: HashMap::new(),
mc: mc, mc: Some(mc),
frame: Frame::new() frame: self.current_frame.take().unwrap(),
start: self.current_func_start.take().unwrap(),
end: func_end
}; };
vm.add_compiled_func(compiled_func); vm.add_compiled_func(compiled_func);
......
...@@ -24,7 +24,7 @@ macro_rules! GPR { ...@@ -24,7 +24,7 @@ macro_rules! GPR {
let id = new_machine_id(); let id = new_machine_id();
P(Value { P(Value {
hdr: MuEntityHeader::named(id, $name.to_string()), hdr: MuEntityHeader::named(id, $name.to_string()),
ty: GPR_TY.clone(), ty: UINT64_TYPE.clone(),
v: Value_::SSAVar(id) v: Value_::SSAVar(id)
}) })
} }
...@@ -37,18 +37,13 @@ macro_rules! FPR { ...@@ -37,18 +37,13 @@ macro_rules! FPR {
let id = new_machine_id(); let id = new_machine_id();
P(Value { P(Value {
hdr: MuEntityHeader::named(id, $name.to_string()), hdr: MuEntityHeader::named(id, $name.to_string()),
ty: FPR_TY.clone(), ty: DOUBLE_TYPE.clone(),
v: Value_::SSAVar(id) v: Value_::SSAVar(id)
}) })
} }
}; };
} }
lazy_static! {
pub static ref GPR_TY : P<MuType> = P(MuType::new(new_internal_id(), MuType_::int(64)));
pub static ref FPR_TY : P<MuType> = P(MuType::new(new_internal_id(), MuType_::double()));
}
// put into several segments to avoid 'recursion limit reached' error // put into several segments to avoid 'recursion limit reached' error
lazy_static! { lazy_static! {
pub static ref RAX : P<Value> = GPR!("rax"); pub static ref RAX : P<Value> = GPR!("rax");
......
...@@ -152,9 +152,9 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo { ...@@ -152,9 +152,9 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct BackendTypeInfo { pub struct BackendTypeInfo {
size: ByteSize, pub size: ByteSize,
alignment: ByteSize, pub alignment: ByteSize,
struct_layout: Option<Vec<ByteSize>> pub struct_layout: Option<Vec<ByteSize>>
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
......
...@@ -15,18 +15,18 @@ impl PeepholeOptimization { ...@@ -15,18 +15,18 @@ impl PeepholeOptimization {
} }
pub fn remove_redundant_move(&mut self, inst: usize, cf: &mut CompiledFunction) { pub fn remove_redundant_move(&mut self, inst: usize, cf: &mut CompiledFunction) {
if cf.mc.is_move(inst) && !cf.mc.is_using_mem_op(inst) { if cf.mc().is_move(inst) && !cf.mc().is_using_mem_op(inst) {
cf.mc.trace_inst(inst); cf.mc().trace_inst(inst);
let src : MuID = { let src : MuID = {
let uses = cf.mc.get_inst_reg_uses(inst); let uses = cf.mc().get_inst_reg_uses(inst);
if uses.len() != 1 { if uses.len() != 1 {
// moving immediate to register, its not redundant // moving immediate to register, its not redundant
return; return;
} }
uses[0] uses[0]
}; };
let dst : MuID = cf.mc.get_inst_reg_defines(inst)[0]; let dst : MuID = cf.mc().get_inst_reg_defines(inst)[0];
let src_machine_reg : MuID = { let src_machine_reg : MuID = {
match cf.temps.get(&src) { match cf.temps.get(&src) {
...@@ -44,7 +44,7 @@ impl PeepholeOptimization { ...@@ -44,7 +44,7 @@ impl PeepholeOptimization {
if src_machine_reg == dst_machine_reg { if src_machine_reg == dst_machine_reg {
trace!("Redundant! removed"); trace!("Redundant! removed");
// redundant, remove this move // redundant, remove this move
cf.mc.set_inst_nop(inst); cf.mc_mut().set_inst_nop(inst);
} }
} }
} }
...@@ -59,11 +59,11 @@ impl CompilerPass for PeepholeOptimization { ...@@ -59,11 +59,11 @@ impl CompilerPass for PeepholeOptimization {
let compiled_funcs = vm.compiled_funcs().read().unwrap(); let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap(); let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
for i in 0..cf.mc.number_of_insts() { for i in 0..cf.mc().number_of_insts() {
self.remove_redundant_move(i, &mut cf); self.remove_redundant_move(i, &mut cf);
} }
trace!("after peephole optimization:"); trace!("after peephole optimization:");
cf.mc.trace_mc(); cf.mc().trace_mc();
} }
} }
...@@ -262,7 +262,7 @@ pub fn is_machine_reg(reg: MuID) -> bool { ...@@ -262,7 +262,7 @@ pub fn is_machine_reg(reg: MuID) -> bool {
#[allow(unused_variables)] #[allow(unused_variables)]
fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) { fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
let n_insts = cf.mc.number_of_insts(); let n_insts = cf.mc().number_of_insts();
let mut livein : Vec<Vec<MuID>> = vec![vec![]; n_insts]; let mut livein : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut liveout : Vec<Vec<MuID>> = vec![vec![]; n_insts]; let mut liveout : Vec<Vec<MuID>> = vec![vec![]; n_insts];
...@@ -280,10 +280,10 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) { ...@@ -280,10 +280,10 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
// in[n] <- use[n] + (out[n] - def[n]) // in[n] <- use[n] + (out[n] - def[n])
// (1) in[n] = use[n] // (1) in[n] = use[n]
let mut in_set_new = vec![]; let mut in_set_new = vec![];
in_set_new.extend_from_slice(&cf.mc.get_inst_reg_uses(n)); in_set_new.extend_from_slice(&cf.mc().get_inst_reg_uses(n));
// (2) diff = out[n] - def[n] // (2) diff = out[n] - def[n]
let mut diff = liveout[n].to_vec(); let mut diff = liveout[n].to_vec();
for def in cf.mc.get_inst_reg_defines(n) { for def in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def); vec_utils::remove_value(&mut diff, *def);
} }
// (3) in[n] = in[n] + diff // (3) in[n] = in[n] + diff
...@@ -295,7 +295,7 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) { ...@@ -295,7 +295,7 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
// out[n] <- union(in[s] for every successor s of n) // out[n] <- union(in[s] for every successor s of n)
let mut union = vec![]; let mut union = vec![];
for s in cf.mc.get_succs(n) { for s in cf.mc().get_succs(n) {
vec_utils::append_clone_unique(&mut union, &livein[*s]); vec_utils::append_clone_unique(&mut union, &livein[*s]);
} }
...@@ -309,15 +309,15 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) { ...@@ -309,15 +309,15 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
} }
} }
for block in cf.mc.get_all_blocks().to_vec() { for block in cf.mc().get_all_blocks().to_vec() {
if cf.mc.get_ir_block_livein(&block).is_none() { if cf.mc().get_ir_block_livein(&block).is_none() {
let start_inst = cf.mc.get_block_range(&block).unwrap().start; let start_inst = cf.mc().get_block_range(&block).unwrap().start;
cf.mc.set_ir_block_livein(&block, livein[start_inst].to_vec()); cf.mc_mut().set_ir_block_livein(&block, livein[start_inst].to_vec());
} }
if cf.mc.get_ir_block_liveout(&block).is_none() { if cf.mc().get_ir_block_liveout(&block).is_none() {
let end_inst = cf.mc.get_block_range(&block).unwrap().end; let end_inst = cf.mc().get_block_range(&block).unwrap().end;
cf.mc.set_ir_block_liveout(&block, liveout[end_inst].to_vec()); cf.mc_mut().set_ir_block_liveout(&block, liveout[end_inst].to_vec());
} }
} }
} }
...@@ -336,13 +336,13 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion ...@@ -336,13 +336,13 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
} }
// Initialize and creates nodes for all the involved temps/regs // Initialize and creates nodes for all the involved temps/regs
for i in 0..cf.mc.number_of_insts() { for i in 0..cf.mc().number_of_insts() {
for reg_id in cf.mc.get_inst_reg_defines(i) { for reg_id in cf.mc().get_inst_reg_defines(i) {
let reg_id = *reg_id; let reg_id = *reg_id;
ig.new_node(reg_id, &func.context); ig.new_node(reg_id, &func.context);
} }
for reg_id in cf.mc.get_inst_reg_uses(i) { for reg_id in cf.mc().get_inst_reg_uses(i) {
let reg_id = *reg_id; let reg_id = *reg_id;
ig.new_node(reg_id, &func.context); ig.new_node(reg_id, &func.context);
} }
...@@ -351,14 +351,14 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion ...@@ -351,14 +351,14 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// all nodes has been added, we init graph (create adjacency matrix) // all nodes has been added, we init graph (create adjacency matrix)
ig.init_graph(); ig.init_graph();
for block in cf.mc.get_all_blocks() { for block in cf.mc().get_all_blocks() {
// Current_Live(B) = LiveOut(B) // Current_Live(B) = LiveOut(B)
let mut current_live = LinkedHashSet::from_vec(match cf.mc.get_ir_block_liveout(&block) { let mut current_live = LinkedHashSet::from_vec(match cf.mc().get_ir_block_liveout(&block) {
Some(liveout) => liveout.to_vec(), Some(liveout) => liveout.to_vec(),
None => panic!("cannot find liveout for block {}", block) None => panic!("cannot find liveout for block {}", block)
}); });
let range = cf.mc.get_block_range(&block); let range = cf.mc().get_block_range(&block);
if range.is_none() { if range.is_none() {
continue; continue;
} }
...@@ -366,14 +366,14 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion ...@@ -366,14 +366,14 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// for every inst I in reverse order // for every inst I in reverse order
for i in range.unwrap().rev() { for i in range.unwrap().rev() {
let src : Option<MuID> = { let src : Option<MuID> = {
if cf.mc.is_move(i) { if cf.mc().is_move(i) {
let src = cf.mc.get_inst_reg_uses(i); let src = cf.mc().get_inst_reg_uses(i);
let dst = cf.mc.get_inst_reg_defines(i); let dst = cf.mc().get_inst_reg_defines(i);
// src: reg/imm/mem // src: reg/imm/mem
// dest: reg/mem // dest: reg/mem
// we dont care if src/dest is mem // we dont care if src/dest is mem
if cf.mc.is_using_mem_op(i) { if cf.mc().is_using_mem_op(i) {
None None
} else { } else {
if src.len() == 1 { if src.len() == 1 {
...@@ -392,7 +392,7 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion ...@@ -392,7 +392,7 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
}; };
// for every definition D in I // for every definition D in I