codegen.rs 6.18 KB
Newer Older
1 2
use ast::ptr::P;
use ast::ir::*;
qinsoon's avatar
qinsoon committed
3
use runtime::ValueLocation;
4

qinsoon's avatar
qinsoon committed
5
use compiler::machine_code::MachineCode;
6
use compiler::backend::x86_64::ASMCodeGen;
7

8
pub trait CodeGenerator {
qinsoon's avatar
qinsoon committed
9
    fn start_code(&mut self, func_name: MuName) -> ValueLocation;
10
    fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode + Sync + Send>, ValueLocation);
11 12 13 14

    // generate unnamed sequence of linear code (no branch)
    fn start_code_sequence(&mut self);
    fn finish_code_sequence(&mut self) -> Box<MachineCode + Sync + Send>;
15 16 17
    
    fn print_cur_code(&self);
    
qinsoon's avatar
qinsoon committed
18
    fn start_block(&mut self, block_name: MuName);
qinsoon's avatar
qinsoon committed
19
    fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation;
qinsoon's avatar
qinsoon committed
20 21 22
    fn set_block_livein(&mut self, block_name: MuName, live_in: &Vec<P<Value>>);
    fn set_block_liveout(&mut self, block_name: MuName, live_out: &Vec<P<Value>>);
    fn end_block(&mut self, block_name: MuName);
23
    
24 25
    fn emit_nop(&mut self, bytes: usize);
    
26
    fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>);
27
    fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32);
28 29
    fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
    
30
    fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32);
31
    fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
qinsoon's avatar
qinsoon committed
32
    fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
33
    fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>); // store
34
    fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32);
qinsoon's avatar
qinsoon committed
35
    
36 37
    fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
    
38
    fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
39 40
    fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
    
qinsoon's avatar
qinsoon committed
41 42
    fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
    fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
43
    fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32);
qinsoon's avatar
qinsoon committed
44 45 46
    
    fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
    fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
47
    fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32);
qinsoon's avatar
qinsoon committed
48
    
49 50 51
    fn emit_mul_r64(&mut self, src: &P<Value>);
    fn emit_mul_mem64(&mut self, src: &P<Value>);
    
52 53 54 55 56 57 58 59 60 61 62
    fn emit_jmp(&mut self, dest: MuName);
    fn emit_je(&mut self, dest: MuName);
    fn emit_jne(&mut self, dest: MuName);
    fn emit_ja(&mut self, dest: MuName);
    fn emit_jae(&mut self, dest: MuName);
    fn emit_jb(&mut self, dest: MuName);
    fn emit_jbe(&mut self, dest: MuName);
    fn emit_jg(&mut self, dest: MuName);
    fn emit_jge(&mut self, dest: MuName);
    fn emit_jl(&mut self, dest: MuName);
    fn emit_jle(&mut self, dest: MuName);
qinsoon's avatar
qinsoon committed
63
    
qinsoon's avatar
qinsoon committed
64 65 66
    fn emit_call_near_rel32(&mut self, callsite: String, func: MuName) -> ValueLocation;
    fn emit_call_near_r64(&mut self, callsite: String, func: &P<Value>) -> ValueLocation;
    fn emit_call_near_mem64(&mut self, callsite: String, func: &P<Value>) -> ValueLocation;
67
    
qinsoon's avatar
qinsoon committed
68
    fn emit_ret(&mut self);
69
    
70
    fn emit_push_r64(&mut self, src: &P<Value>);
71
    fn emit_push_imm32(&mut self, src: i32);
72
    fn emit_pop_r64(&mut self, dest: &P<Value>);
73
}
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

use std::collections::HashMap;
use compiler::machine_code::CompiledFunction;
use vm::VM;

#[cfg(feature = "aot")]
pub fn spill_rewrite(
    spills: &HashMap<MuID, P<Value>>,
    func: &mut MuFunctionVersion,
    cf: &mut CompiledFunction,
    vm: &VM)
{
    // record code and their insertion point, so we can do the copy/insertion all at once
    let mut spill_code_before: HashMap<usize, Vec<Box<MachineCode>>> = HashMap::new();
    let mut spill_code_after: HashMap<usize, Vec<Box<MachineCode>>> = HashMap::new();

    // iterate through all instructions
    for i in 0..cf.mc().number_of_insts() {
        // find use of any register that gets spilled
        {
            let reg_uses = cf.mc().get_inst_reg_uses(i).to_vec();
            for reg in reg_uses {
                if spills.contains_key(&reg) {
                    // a register used here is spilled
                    let spill_mem = spills.get(&reg).unwrap();

                    // generate a random new temporary
                    let temp_ty = func.context.get_value(reg).unwrap().ty().clone();
                    let temp = func.new_ssa(vm.next_id(), temp_ty).clone_value();

                    // generate a load
                    let code = {
                        let mut codegen = ASMCodeGen::new();
                        codegen.start_code_sequence();
                        codegen.emit_mov_r64_mem64(&temp, spill_mem);

                        codegen.finish_code_sequence()
                    };
                    // record that this load will be inserted at i
                    if spill_code_before.contains_key(&i) {
                        spill_code_before.get_mut(&i).unwrap().push(code);
                    } else {
                        spill_code_before.insert(i, vec![code]);
                    }

                    // replace register reg with temp
                    cf.mc_mut().replace_reg_for_inst(reg, temp.id(), i);
                }
            }
        }

        // fine define of any register that gets spilled
        {
            let reg_defines = cf.mc().get_inst_reg_defines(i).to_vec();
            for reg in reg_defines {
                if spills.contains_key(&reg) {
                    let spill_mem = spills.get(&reg).unwrap();

                    let temp_ty = func.context.get_value(reg).unwrap().ty().clone();
                    let temp = func.new_ssa(vm.next_id(), temp_ty).clone_value();

                    let code = {
                        let mut codegen = ASMCodeGen::new();
                        codegen.start_code_sequence();
                        codegen.emit_mov_mem64_r64(spill_mem, &temp);

                        codegen.finish_code_sequence()
                    };

                    if spill_code_after.contains_key(&i) {
                        spill_code_after.get_mut(&i).unwrap().push(code);
                    } else {
                        spill_code_after.insert(i, vec![code]);
                    }

                    cf.mc_mut().replace_reg_for_inst(reg, temp.id(), i);
                }
            }
        }
    }

    // copy and insert the code
    unimplemented!()
}