Commit 5fbf8fa4 authored by qinsoon's avatar qinsoon

frame done.

partially serialize compiled function into bootimage
parent 14b3ee5f
......@@ -517,6 +517,14 @@ pub struct Value {
}
impl Value {
pub fn make_int_const(id: MuID, val: u64) -> P<Value> {
P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: UINT32_TYPE.clone(),
v: Value_::Constant(Constant::Int(val))
})
}
pub fn is_mem(&self) -> bool {
match self.v {
Value_::Memory(_) => true,
......
use ptr::P;
use ir::*;
use utils::POINTER_SIZE;
use utils::vec_utils;
use std::fmt;
use std::collections::HashMap;
use std::sync::RwLock;
lazy_static! {
pub static ref ADDRESS_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(POINTER_SIZE * 8))
);
pub static ref UINT32_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(32))
);
pub static ref UINT64_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(64))
);
pub static ref DOUBLE_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::double())
);
pub static ref INTERNAL_TYPES : Vec<P<MuType>> = vec![
ADDRESS_TYPE.clone(),
UINT32_TYPE.clone(),
UINT64_TYPE.clone(),
DOUBLE_TYPE.clone()
];
}
#[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct MuType {
pub hdr: MuEntityHeader,
......
......@@ -3,12 +3,14 @@
use compiler::backend;
use compiler::backend::AOT_EMIT_CONTEXT_FILE;
use compiler::backend::AOT_EMIT_DIR;
use compiler::backend::RegGroup;
use utils::ByteSize;
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::machine_code::CompiledFunction;
use compiler::machine_code::MachineCode;
use vm::VM;
use runtime::ValueLocation;
use utils::string_utils;
......@@ -629,7 +631,7 @@ impl ASMCodeGen {
}
impl CodeGenerator for ASMCodeGen {
fn start_code(&mut self, func_name: MuName) {
fn start_code(&mut self, func_name: MuName) -> ValueLocation {
self.cur = Some(Box::new(ASMCode {
name: func_name.clone(),
code: vec![],
......@@ -655,13 +657,27 @@ impl CodeGenerator for ASMCodeGen {
}));
// to link with C sources via gcc
self.add_asm_symbolic(directive_globl(symbol(func_name.clone())));
self.add_asm_symbolic(format!("{}:", symbol(func_name.clone())));
let func_symbol = symbol(func_name.clone());
self.add_asm_symbolic(directive_globl(func_symbol.clone()));
self.add_asm_symbolic(format!("{}:", func_symbol.clone()));
ValueLocation::Relocatable(RegGroup::GPR, func_symbol)
}
fn finish_code(&mut self) -> Box<MachineCode> {
fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode>, ValueLocation) {
let func_end_symbol = {
let mut symbol = symbol(func_name.clone());
symbol.push_str("_end");
symbol
};
self.add_asm_symbolic(directive_globl(func_end_symbol.clone()));
self.control_flow_analysis();
self.cur.take().unwrap()
(
self.cur.take().unwrap(),
ValueLocation::Relocatable(RegGroup::GPR, func_end_symbol)
)
}
fn print_cur_code(&self) {
......@@ -1207,7 +1223,7 @@ pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) {
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let cf = compiled_funcs.get(&fv.id()).unwrap().read().unwrap();
let code = cf.mc.emit();
let code = cf.mc.as_ref().unwrap().emit();
// create 'emit' directory
create_emit_directory();
......
use ast::ptr::P;
use ast::ir::*;
use runtime::ValueLocation;
use compiler::machine_code::MachineCode;
pub trait CodeGenerator {
fn start_code(&mut self, func_name: MuName);
fn finish_code(&mut self) -> Box<MachineCode>;
fn start_code(&mut self, func_name: MuName) -> ValueLocation;
fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode>, ValueLocation);
fn print_cur_code(&self);
......
......@@ -30,7 +30,9 @@ pub struct InstructionSelection {
backend: Box<CodeGenerator>,
current_block: Option<MuName>
current_frame: Option<Frame>,
current_block: Option<MuName>,
current_func_start: Option<ValueLocation>
}
impl <'a> InstructionSelection {
......@@ -38,7 +40,10 @@ impl <'a> InstructionSelection {
InstructionSelection{
name: "Instruction Selection (x64)",
backend: Box::new(ASMCodeGen::new()),
current_block: None
current_frame: None,
current_block: None,
current_func_start: None,
}
}
......@@ -461,26 +466,26 @@ impl <'a> InstructionSelection {
// ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
let tmp_cursor = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
let tmp_cursor = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
// alignup cursor (cursor + align - 1 & !(align - 1))
// ASM: lea align-1(%cursor) -> %start
let align = ty_info.alignment as i32;
let tmp_start = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
let tmp_start = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
// ASM: and %start, !(align-1) -> %start
self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1));
// bump cursor
// ASM: lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
// check with limit
// ASM: cmp %end, [%tl + allocator_offset + limit_offset]
let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, runtime::ADDRESS_TYPE.clone(), vm);
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
// branch to slow path if end > limit
......@@ -513,7 +518,7 @@ impl <'a> InstructionSelection {
// arg1: allocator address
let allocator_offset = *thread::ALLOCATOR_OFFSET;
let tmp_allocator = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
// arg2: size
let const_size = self.make_value_int_const(ty_size as u64, vm);
......@@ -569,7 +574,7 @@ impl <'a> InstructionSelection {
fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> {
P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: runtime::UINT64_TYPE.clone(),
ty: UINT64_TYPE.clone(),
v: Value_::Constant(Constant::Int(val))
})
}
......@@ -587,7 +592,7 @@ impl <'a> InstructionSelection {
}
fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
let mem = self.make_memory_op_base_offset(base, offset, runtime::ADDRESS_TYPE.clone(), vm);
let mem = self.make_memory_op_base_offset(base, offset, ADDRESS_TYPE.clone(), vm);
self.backend.emit_lea_r64(dest, &mem);
}
......@@ -742,7 +747,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_common_prologue(&mut self, args: &Vec<P<Value>>) {
fn emit_common_prologue(&mut self, args: &Vec<P<Value>>, vm: &VM) {
let block_name = "prologue".to_string();
self.backend.start_block(block_name.clone());
......@@ -757,11 +762,15 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
// push all callee-saved registers
for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
// not pushing rbp (as we have done taht)
if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
self.backend.emit_push_r64(&reg);
{
let frame = self.current_frame.as_mut().unwrap();
for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
// not pushing rbp (as we have done taht)
if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
self.backend.emit_push_r64(&reg);
frame.alloc_slot_for_callee_saved_reg(reg.clone(), vm);
}
}
}
......@@ -998,7 +1007,7 @@ impl <'a> InstructionSelection {
Instruction_::GetIRef(op_index) => {
let ref op = ops[op_index];
self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, runtime::ADDRESS_TYPE.clone(), vm)
self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, ADDRESS_TYPE.clone(), vm)
}
_ => unimplemented!()
}
......@@ -1095,14 +1104,17 @@ impl CompilerPass for InstructionSelection {
fn start_function(&mut self, vm: &VM, func_ver: &mut MuFunctionVersion) {
debug!("{}", self.name());
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
self.backend.start_code(func.name().unwrap());
self.current_frame = Some(Frame::new());
self.current_func_start = Some({
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
self.backend.start_code(func.name().unwrap())
});
// prologue (get arguments from entry block first)
let entry_block = func_ver.content.as_ref().unwrap().get_entry_block();
let ref args = entry_block.content.as_ref().unwrap().args;
self.emit_common_prologue(args);
self.emit_common_prologue(args, vm);
}
#[allow(unused_variables)]
......@@ -1143,13 +1155,21 @@ impl CompilerPass for InstructionSelection {
fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
self.backend.print_cur_code();
let mc = self.backend.finish_code();
let func_name = {
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func.func_id).unwrap().read().unwrap();
func.name().unwrap()
};
let (mc, func_end) = self.backend.finish_code(func_name);
let compiled_func = CompiledFunction {
func_id: func.func_id,
func_ver_id: func.id(),
temps: HashMap::new(),
mc: mc,
frame: Frame::new()
mc: Some(mc),
frame: self.current_frame.take().unwrap(),
start: self.current_func_start.take().unwrap(),
end: func_end
};
vm.add_compiled_func(compiled_func);
......
......@@ -24,7 +24,7 @@ macro_rules! GPR {
let id = new_machine_id();
P(Value {
hdr: MuEntityHeader::named(id, $name.to_string()),
ty: GPR_TY.clone(),
ty: UINT64_TYPE.clone(),
v: Value_::SSAVar(id)
})
}
......@@ -37,18 +37,13 @@ macro_rules! FPR {
let id = new_machine_id();
P(Value {
hdr: MuEntityHeader::named(id, $name.to_string()),
ty: FPR_TY.clone(),
ty: DOUBLE_TYPE.clone(),
v: Value_::SSAVar(id)
})
}
};
}
lazy_static! {
pub static ref GPR_TY : P<MuType> = P(MuType::new(new_internal_id(), MuType_::int(64)));
pub static ref FPR_TY : P<MuType> = P(MuType::new(new_internal_id(), MuType_::double()));
}
// put into several segments to avoid 'recursion limit reached' error
lazy_static! {
pub static ref RAX : P<Value> = GPR!("rax");
......
......@@ -152,9 +152,9 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct BackendTypeInfo {
size: ByteSize,
alignment: ByteSize,
struct_layout: Option<Vec<ByteSize>>
pub size: ByteSize,
pub alignment: ByteSize,
pub struct_layout: Option<Vec<ByteSize>>
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
......
......@@ -15,18 +15,18 @@ impl PeepholeOptimization {
}
pub fn remove_redundant_move(&mut self, inst: usize, cf: &mut CompiledFunction) {
if cf.mc.is_move(inst) && !cf.mc.is_using_mem_op(inst) {
cf.mc.trace_inst(inst);
if cf.mc().is_move(inst) && !cf.mc().is_using_mem_op(inst) {
cf.mc().trace_inst(inst);
let src : MuID = {
let uses = cf.mc.get_inst_reg_uses(inst);
let uses = cf.mc().get_inst_reg_uses(inst);
if uses.len() != 1 {
// moving immediate to register, its not redundant
return;
}
uses[0]
};
let dst : MuID = cf.mc.get_inst_reg_defines(inst)[0];
let dst : MuID = cf.mc().get_inst_reg_defines(inst)[0];
let src_machine_reg : MuID = {
match cf.temps.get(&src) {
......@@ -44,7 +44,7 @@ impl PeepholeOptimization {
if src_machine_reg == dst_machine_reg {
trace!("Redundant! removed");
// redundant, remove this move
cf.mc.set_inst_nop(inst);
cf.mc_mut().set_inst_nop(inst);
}
}
}
......@@ -59,11 +59,11 @@ impl CompilerPass for PeepholeOptimization {
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
for i in 0..cf.mc.number_of_insts() {
for i in 0..cf.mc().number_of_insts() {
self.remove_redundant_move(i, &mut cf);
}
trace!("after peephole optimization:");
cf.mc.trace_mc();
cf.mc().trace_mc();
}
}
......@@ -262,7 +262,7 @@ pub fn is_machine_reg(reg: MuID) -> bool {
#[allow(unused_variables)]
fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
let n_insts = cf.mc.number_of_insts();
let n_insts = cf.mc().number_of_insts();
let mut livein : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut liveout : Vec<Vec<MuID>> = vec![vec![]; n_insts];
......@@ -280,10 +280,10 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
// in[n] <- use[n] + (out[n] - def[n])
// (1) in[n] = use[n]
let mut in_set_new = vec![];
in_set_new.extend_from_slice(&cf.mc.get_inst_reg_uses(n));
in_set_new.extend_from_slice(&cf.mc().get_inst_reg_uses(n));
// (2) diff = out[n] - def[n]
let mut diff = liveout[n].to_vec();
for def in cf.mc.get_inst_reg_defines(n) {
for def in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def);
}
// (3) in[n] = in[n] + diff
......@@ -295,7 +295,7 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
// out[n] <- union(in[s] for every successor s of n)
let mut union = vec![];
for s in cf.mc.get_succs(n) {
for s in cf.mc().get_succs(n) {
vec_utils::append_clone_unique(&mut union, &livein[*s]);
}
......@@ -309,15 +309,15 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
}
}
for block in cf.mc.get_all_blocks().to_vec() {
if cf.mc.get_ir_block_livein(&block).is_none() {
let start_inst = cf.mc.get_block_range(&block).unwrap().start;
cf.mc.set_ir_block_livein(&block, livein[start_inst].to_vec());
for block in cf.mc().get_all_blocks().to_vec() {
if cf.mc().get_ir_block_livein(&block).is_none() {
let start_inst = cf.mc().get_block_range(&block).unwrap().start;
cf.mc_mut().set_ir_block_livein(&block, livein[start_inst].to_vec());
}
if cf.mc.get_ir_block_liveout(&block).is_none() {
let end_inst = cf.mc.get_block_range(&block).unwrap().end;
cf.mc.set_ir_block_liveout(&block, liveout[end_inst].to_vec());
if cf.mc().get_ir_block_liveout(&block).is_none() {
let end_inst = cf.mc().get_block_range(&block).unwrap().end;
cf.mc_mut().set_ir_block_liveout(&block, liveout[end_inst].to_vec());
}
}
}
......@@ -336,13 +336,13 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
}
// Initialize and creates nodes for all the involved temps/regs
for i in 0..cf.mc.number_of_insts() {
for reg_id in cf.mc.get_inst_reg_defines(i) {
for i in 0..cf.mc().number_of_insts() {
for reg_id in cf.mc().get_inst_reg_defines(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
}
for reg_id in cf.mc.get_inst_reg_uses(i) {
for reg_id in cf.mc().get_inst_reg_uses(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
}
......@@ -351,14 +351,14 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// all nodes has been added, we init graph (create adjacency matrix)
ig.init_graph();
for block in cf.mc.get_all_blocks() {
for block in cf.mc().get_all_blocks() {
// Current_Live(B) = LiveOut(B)
let mut current_live = LinkedHashSet::from_vec(match cf.mc.get_ir_block_liveout(&block) {
let mut current_live = LinkedHashSet::from_vec(match cf.mc().get_ir_block_liveout(&block) {
Some(liveout) => liveout.to_vec(),
None => panic!("cannot find liveout for block {}", block)
});
let range = cf.mc.get_block_range(&block);
let range = cf.mc().get_block_range(&block);
if range.is_none() {
continue;
}
......@@ -366,14 +366,14 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// for every inst I in reverse order
for i in range.unwrap().rev() {
let src : Option<MuID> = {
if cf.mc.is_move(i) {
let src = cf.mc.get_inst_reg_uses(i);
let dst = cf.mc.get_inst_reg_defines(i);
if cf.mc().is_move(i) {
let src = cf.mc().get_inst_reg_uses(i);
let dst = cf.mc().get_inst_reg_defines(i);
// src: reg/imm/mem
// dest: reg/mem
// we dont care if src/dest is mem
if cf.mc.is_using_mem_op(i) {
if cf.mc().is_using_mem_op(i) {
None
} else {
if src.len() == 1 {
......@@ -392,7 +392,7 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
};
// for every definition D in I
for d in cf.mc.get_inst_reg_defines(i) {
for d in cf.mc().get_inst_reg_defines(i) {
// add an interference from D to every element E in Current_Live - {D}
// creating nodes if necessary
for e in current_live.iter() {
......@@ -413,13 +413,13 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
}
// for every definition D in I
for d in cf.mc.get_inst_reg_defines(i) {
for d in cf.mc().get_inst_reg_defines(i) {
// remove D from Current_Live
current_live.remove(d);
}
// for every use U in I
for u in cf.mc.get_inst_reg_uses(i) {
for u in cf.mc().get_inst_reg_uses(i) {
// add U to Current_live
current_live.insert(*u);
}
......@@ -443,7 +443,7 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
}
// Liveness Analysis
let n_insts = cf.mc.number_of_insts();
let n_insts = cf.mc().number_of_insts();
let mut live_in : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut live_out : Vec<Vec<MuID>> = vec![vec![]; n_insts];
let mut work_list : LinkedList<usize> = LinkedList::new();
......@@ -453,12 +453,12 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
for i in 0..n_insts {
let ref mut in_set = live_in[i];
for reg_id in cf.mc.get_inst_reg_defines(i) {
for reg_id in cf.mc().get_inst_reg_defines(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
}
for reg_id in cf.mc.get_inst_reg_uses(i) {
for reg_id in cf.mc().get_inst_reg_uses(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
......@@ -479,14 +479,14 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
let ref mut out_set = live_out[n];
// out = union(in[succ]) for all succs
for succ in cf.mc.get_succs(n) {
for succ in cf.mc().get_succs(n) {
trace!("add successor's livein {:?} to #{}", &live_in[*succ], n);
vec_utils::add_all(out_set, &live_in[*succ]);
}
// in = use(i.e. live_in) + (out - def)
let mut diff = out_set.clone();
for def in cf.mc.get_inst_reg_defines(n) {
for def in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def);
trace!("removing def: {}", *def);
trace!("diff = {:?}", diff);
......@@ -498,7 +498,7 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
trace!("in = (use) {:?}", in_set);
if vec_utils::add_all(in_set, &diff) {
for p in cf.mc.get_preds(n) {
for p in cf.mc().get_preds(n) {
work_list.push_front(*p);
}
}
......@@ -520,9 +520,9 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
let ref mut live = live_out[n];
let src : Option<MuID> = {
if cf.mc.is_move(n) {
let src = cf.mc.get_inst_reg_uses(n);
let dst = cf.mc.get_inst_reg_defines(n);
if cf.mc().is_move(n) {
let src = cf.mc().get_inst_reg_uses(n);
let dst = cf.mc().get_inst_reg_defines(n);
// src may be an immediate number
// but dest is definitly a register
......@@ -542,7 +542,7 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
}
};
for d in cf.mc.get_inst_reg_defines(n) {
for d in cf.mc().get_inst_reg_defines(n) {
for t in live.iter() {
if src.is_none() || (src.is_some() && *t != src.unwrap()) {
let from = ig.get_node(*d);
......@@ -560,11 +560,11 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
}
}
for d in cf.mc.get_inst_reg_defines(n) {
for d in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(live, *d);
}
for u in cf.mc.get_inst_reg_uses(n) {
for u in cf.mc().get_inst_reg_uses(n) {
live.push(*u);
}
}
......
......@@ -28,7 +28,7 @@ impl RegisterAllocation {
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
cf.mc.trace_mc();
cf.mc().trace_mc();
// initialize machine registers for the function context
init_machine_regs_for_func(&mut func.context);
......@@ -56,13 +56,13 @@ impl RegisterAllocation {
let machine_reg = coloring.ig.get_color_of(alias).unwrap();
trace!("replacing {} with {}", temp, machine_reg);
cf.mc.replace_reg(temp, machine_reg);
cf.mc_mut().replace_reg(temp, machine_reg);
cf.temps.insert(temp, machine_reg);
}
}
cf.mc.trace_mc();
cf.mc().trace_mc();
true
}
......
use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
use utils::LinkedHashMap;
use std::collections::HashMap;
use utils::POINTER_SIZE;
use vm::VM;
type SlotID = usize;
......@@ -10,48 +12,58 @@ type SlotID = usize;
// |---------------
// | return address
// | old RBP <- RBP
// | func ID
// | callee saved
// | spilled
// |---------------
// | alloca area
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
#[derive(RustcEncodable, RustcDecodable)]
pub struct Frame {
cur_slot_id: SlotID,
cur_offset: isize, // offset to rbp
allocated: LinkedHashMap<SlotID, FrameSlot>,
allocated: HashMap<SlotID, FrameSlot>,
}
impl Frame {
pub fn new() -> Frame {
Frame {
cur_slot_id: 0,
cur_offset: -POINTER_SIZE * 2, // reserve for old RBP and func ID
allocated: LinkedHashMap::new()
cur_offset: - (POINTER_SIZE as isize * 1), // reserve for old RBP
allocated: HashMap::new()
}
}
pub fn alloc_slot_for_callee_saved_reg(&mut self, reg: P<Value>, vm: &VM) -> P<Value> {
let slot = {
let ret = FrameSlot {
id: cur_slot_id,
offset: cur_offset,
value: reg.clone()
};
cur_slot_id += 1;
offset -= vm.get_type_size(reg.id());
ret
};
slot.make_memory_op(vm)
let slot = self.alloc_slot(&reg, vm);
slot.make_memory_op(reg.ty.clone(), vm)
}
pub fn alloc_slot_for_spilling(&mut self, reg: P<Value>, vm: &VM) -> P<Value> {
let slot = self.alloc_slot(&reg, vm);
slot.make_memory_op(reg.ty.clone(), vm)
}