inst_sel.rs 36.3 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::Instruction;
4 5
use ast::inst::Destination;
use ast::inst::DestArg;
qinsoon's avatar
qinsoon committed
6
use ast::inst::Instruction_;
7
use ast::inst::MemoryOrder;
8
use ast::op;
qinsoon's avatar
qinsoon committed
9
use ast::types;
qinsoon's avatar
qinsoon committed
10
use ast::types::*;
qinsoon's avatar
qinsoon committed
11
use vm::VM;
qinsoon's avatar
qinsoon committed
12
use vm::CompiledFunction;
13 14

use compiler::CompilerPass;
qinsoon's avatar
qinsoon committed
15 16 17
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
18

19 20
use std::collections::HashMap;

21
pub struct InstructionSelection {
22 23
    name: &'static str,
    
qinsoon's avatar
qinsoon committed
24
    backend: Box<CodeGenerator>
25 26
}

27 28 29 30 31 32 33 34 35 36 37 38 39 40
impl <'a> InstructionSelection {
    pub fn new() -> InstructionSelection {
        InstructionSelection{
            name: "Instruction Selection (x64)",
            backend: Box::new(ASMCodeGen::new())
        }
    }
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
41
    fn instruction_select(&mut self, node: &'a P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VM) {
qinsoon's avatar
qinsoon committed
42 43 44
        trace!("instsel on node {}", node);
        
        match node.v {
45 46
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
47 48 49
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
                        // move this to trace generation
                        // assert here
50 51 52 53 54
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
55
                            }
56
                        };
57
                        
qinsoon's avatar
qinsoon committed
58
                        let ops = inst.ops.borrow();
59
                        
60 61
                        self.process_dest(&ops, fallthrough_dest, cur_func, vm);
                        self.process_dest(&ops, branch_dest, cur_func, vm);
qinsoon's avatar
qinsoon committed
62 63
                        
                        let branch_target = cur_func.content.as_ref().unwrap().get_block(branch_dest.target);
64 65 66
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
67 68
                        if self.match_cmp_res(cond) {
                            trace!("emit cmp_eq-branch2");
69
                            match self.emit_cmp_res(cond, cur_func, vm) {
qinsoon's avatar
qinsoon committed
70 71 72 73 74 75 76 77 78 79
                                op::CmpOp::EQ => self.backend.emit_je(branch_target),
                                op::CmpOp::NE => self.backend.emit_jne(branch_target),
                                op::CmpOp::UGE => self.backend.emit_jae(branch_target),
                                op::CmpOp::UGT => self.backend.emit_ja(branch_target),
                                op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
                                op::CmpOp::ULT => self.backend.emit_jb(branch_target),
                                op::CmpOp::SGE => self.backend.emit_jge(branch_target),
                                op::CmpOp::SGT => self.backend.emit_jg(branch_target),
                                op::CmpOp::SLE => self.backend.emit_jle(branch_target),
                                op::CmpOp::SLT => self.backend.emit_jl(branch_target),
qinsoon's avatar
qinsoon committed
80 81 82 83
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
84
                            
85
                            let cond_reg = self.emit_ireg(cond, cur_func, vm);
86
                            
qinsoon's avatar
qinsoon committed
87 88 89
                            // emit: cmp cond_reg 1
                            self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
                            // emit: je #branch_dest
qinsoon's avatar
qinsoon committed
90
                            self.backend.emit_je(branch_target);                            
qinsoon's avatar
qinsoon committed
91 92
                        } else {
                            unimplemented!();
93
                        }
94 95
                    },
                    
qinsoon's avatar
qinsoon committed
96 97
                    Instruction_::Branch1(ref dest) => {
                        let ops = inst.ops.borrow();
98
                                            
99
                        self.process_dest(&ops, dest, cur_func, vm);
100
                        
qinsoon's avatar
qinsoon committed
101 102
                        let target = cur_func.content.as_ref().unwrap().get_block(dest.target);
                        
qinsoon's avatar
qinsoon committed
103
                        trace!("emit branch1");
104
                        // jmp
qinsoon's avatar
qinsoon committed
105
                        self.backend.emit_jmp(target);
106 107
                    },
                    
qinsoon's avatar
qinsoon committed
108 109
                    Instruction_::ExprCall{ref data, is_abort} => {
                        trace!("deal with pre-call convention");
110
                        
qinsoon's avatar
qinsoon committed
111
                        let ops = inst.ops.borrow();
112 113 114 115
                        let rets = inst.value.as_ref().unwrap();
                        let ref func = ops[data.func];
                        let ref func_sig = match func.v {
                            TreeNode_::Value(ref pv) => {
qinsoon's avatar
qinsoon committed
116 117 118 119
                                let ty : &MuType = &pv.ty;
                                match ty.v {
                                    MuType_::FuncRef(ref sig)
                                    | MuType_::UFuncPtr(ref sig) => sig,
120 121 122 123 124 125 126 127 128
                                    _ => panic!("expected funcref/ptr type")
                                }
                            },
                            _ => panic!("expected funcref/ptr type")
                        };
                        
                        debug_assert!(func_sig.ret_tys.len() == data.args.len());
                        debug_assert!(func_sig.arg_tys.len() == rets.len());
                                                
qinsoon's avatar
qinsoon committed
129
                        let mut gpr_arg_count = 0;
130
                        // TODO: let mut fpr_arg_count = 0;
131 132 133
                        for arg_index in data.args.iter() {
                            let ref arg = ops[*arg_index];
                            trace!("arg {}", arg);
qinsoon's avatar
qinsoon committed
134 135
                            
                            if self.match_ireg(arg) {
136
                                let arg = self.emit_ireg(arg, cur_func, vm);
qinsoon's avatar
qinsoon committed
137
                                
138 139 140 141 142 143 144
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
145 146 147
                            } else if self.match_iimm(arg) {
                                let arg = self.emit_get_iimm(arg);
                                
148 149 150 151 152 153 154
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
155 156
                            } else {
                                unimplemented!();
157
                            }
158 159
                        }
                        
160 161 162 163 164 165
                        // check direct call or indirect
                        if self.match_funcref_const(func) {
                            let target = self.emit_get_funcref_const(func);
                            
                            self.backend.emit_call_near_rel32(target);
                        } else if self.match_ireg(func) {
166
                            let target = self.emit_ireg(func, cur_func, vm);
167 168 169 170 171 172 173 174 175
                            
                            self.backend.emit_call_near_r64(&target);
                        } else if self.match_mem(func) {
                            let target = self.emit_mem(func);
                            
                            self.backend.emit_call_near_mem64(&target);
                        } else {
                            unimplemented!();
                        }
176
                        
qinsoon's avatar
qinsoon committed
177
                        // deal with ret vals
178
                        let mut gpr_ret_count = 0;
179
                        // TODO: let mut fpr_ret_count = 0;
180 181 182 183 184 185 186 187
                        for val in rets {
                            if val.is_int_reg() {
                                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                                    gpr_ret_count += 1;
                                } else {
                                    // get return value by stack
                                    unimplemented!();
188
                                }
189 190 191
                            } else {
                                // floating point register
                                unimplemented!();
192
                            }
193
                        }
194 195 196
                    },
                    
                    Instruction_::Return(_) => {
197
                        self.emit_common_epilogue(inst, cur_func, vm);
198
                        
qinsoon's avatar
qinsoon committed
199
                        self.backend.emit_ret();
200 201
                    },
                    
qinsoon's avatar
qinsoon committed
202 203 204
                    Instruction_::BinOp(op, op1, op2) => {
                        let ops = inst.ops.borrow();
                        
205 206
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
207 208 209
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");
                                    
210 211
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], cur_func, vm);
qinsoon's avatar
qinsoon committed
212 213 214 215 216 217 218 219 220
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit add-ireg-imm");
                                    
221
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
qinsoon's avatar
qinsoon committed
222 223 224 225 226 227 228 229 230 231 232 233 234
                                    let reg_op2 = self.emit_get_iimm(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
                                    self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
235
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
qinsoon's avatar
qinsoon committed
236 237 238 239 240 241 242 243 244 245 246 247 248
                                    let reg_op2 = self.emit_mem(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
249 250
                            },
                            op::BinOp::Sub => {
qinsoon's avatar
qinsoon committed
251 252 253
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
                                    
254 255
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], cur_func, vm);
qinsoon's avatar
qinsoon committed
256 257 258 259 260 261 262 263 264
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit sub-ireg-imm");

265
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
266
                                    let imm_op2 = self.emit_get_iimm(&ops[op2]);
qinsoon's avatar
qinsoon committed
267 268 269 270 271
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
272
                                    self.backend.emit_sub_r64_imm32(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
273 274 275 276 277 278
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
279
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
280
                                    let mem_op2 = self.emit_mem(&ops[op2]);
qinsoon's avatar
qinsoon committed
281 282 283 284 285
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // sub op2 res
286
                                    self.backend.emit_sub_r64_mem64(&res_tmp, &mem_op2);
qinsoon's avatar
qinsoon committed
287 288 289 290 291 292
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
293 294
                            },
                            op::BinOp::Mul => {
295 296 297 298
                                // mov op1 -> rax
                                let rax = x86_64::RAX.clone();
                                let op1 = &ops[op1];
                                if self.match_ireg(op1) {
299
                                    let reg_op1 = self.emit_ireg(op1, cur_func, vm);
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
                                    
                                    self.backend.emit_mov_r64_r64(&rax, &reg_op1);
                                } else if self.match_iimm(op1) {
                                    let imm_op1 = self.emit_get_iimm(op1);
                                    
                                    self.backend.emit_mov_r64_imm32(&rax, imm_op1);
                                } else if self.match_mem(op1) {
                                    let mem_op1 = self.emit_mem(op1);
                                    
                                    self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mul op2 -> rax
                                let op2 = &ops[op2];
                                if self.match_ireg(op2) {
317
                                    let reg_op2 = self.emit_ireg(op2, cur_func, vm);
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
                                    
                                    self.backend.emit_mul_r64(&reg_op2);
                                } else if self.match_iimm(op2) {
                                    let imm_op2 = self.emit_get_iimm(op2);
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
                                    let res_tmp = self.emit_get_result(node);
                                    self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
                                    
                                    self.backend.emit_mul_r64(&res_tmp);
                                } else if self.match_mem(op2) {
                                    let mem_op2 = self.emit_mem(op2);
                                    
                                    self.backend.emit_mul_mem64(&mem_op2);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
                                let res_tmp = self.emit_get_result(node);
                                self.backend.emit_mov_r64_r64(&res_tmp, &rax);
340 341 342
                            },
                            
                            _ => unimplemented!()
343 344
                        }
                    }
345
                    
346 347
                    // load on x64 generates mov inst (no matter what order is specified)
                    // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
348 349 350
                    Instruction_::Load{is_ptr, order, mem_loc} => {
                        let ops = inst.ops.borrow();
                        let ref loc_op = ops[mem_loc];
351 352 353 354 355 356 357 358 359
                        
                        // check order
                        match order {
                            MemoryOrder::Relaxed 
                            | MemoryOrder::Consume 
                            | MemoryOrder::Acquire
                            | MemoryOrder::SeqCst => {},
                            _ => panic!("didnt expect order {:?} with store inst", order)
                        }                        
360 361 362 363 364 365

                        let resolved_loc = self.emit_get_mem(loc_op, vm);                        
                        let res_temp = self.emit_get_result(node);
                        
                        if self.match_ireg(node) {
                            // emit mov(GPR)
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
                            self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
                    
                    Instruction_::Store{is_ptr, order, mem_loc, value} => {
                        let ops = inst.ops.borrow();
                        let ref loc_op = ops[mem_loc];
                        let ref val_op = ops[value];
                        
                        let generate_plain_mov : bool = {
                            match order {
                                MemoryOrder::Relaxed | MemoryOrder::Release => true,
                                MemoryOrder::SeqCst => false,
                                _ => panic!("didnt expect order {:?} with store inst", order)
                            }
                        };
                        
                        let resolved_loc = self.emit_get_mem(loc_op, vm);
                        
                        if self.match_ireg(val_op) {
                            let val = self.emit_ireg(val_op, cur_func, vm);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
                            } else {
                                unimplemented!()
                            }
                        } else if self.match_iimm(val_op) {
                            let val = self.emit_get_iimm(val_op);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
                            } else {
                                unimplemented!()
                            }
402 403 404 405 406
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
407 408 409 410 411 412
    
                    _ => unimplemented!()
                } // main switch
            },
            
            TreeNode_::Value(ref p) => {
qinsoon's avatar
qinsoon committed
413

414 415 416 417 418
            }
        }
    }
    
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
419
    fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, cur_func: &MuFunctionVersion, vm: &VM) {
420 421
        for i in 0..dest.args.len() {
            let ref dest_arg = dest.args[i];
422 423
            match dest_arg {
                &DestArg::Normal(op_index) => {
qinsoon's avatar
qinsoon committed
424
                    let ref arg = ops[op_index];
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
//                    match arg.op {
//                        OpCode::RegI64 
//                        | OpCode::RegFP
//                        | OpCode::IntImmI64
//                        | OpCode::FPImm => {
//                            // do nothing
//                        },
//                        _ => {
//                            trace!("nested: compute arg for branch");
//                            // nested: compute arg
//                            self.instruction_select(arg, cur_func);
//                            
//                            self.emit_get_result(arg);
//                        }
//                    }
//                    
                    let ref target_args = cur_func.content.as_ref().unwrap().get_block(dest.target).content.as_ref().unwrap().args;
                    let ref target_arg = target_args[i];
                    
444
                    self.emit_general_move(&arg, target_arg, cur_func, vm);
445 446 447 448
                },
                &DestArg::Freshbound(_) => unimplemented!()
            }
        }
qinsoon's avatar
qinsoon committed
449 450
    }
    
451
    fn emit_common_prologue(&mut self, args: &Vec<P<Value>>) {
452 453 454 455 456 457 458
        let block_name = "prologue";
        self.backend.start_block(block_name);
        
        // no livein
        // liveout = entry block's args
        self.backend.set_block_livein(block_name, &vec![]);
        self.backend.set_block_liveout(block_name, args);
459
        
460 461 462
        // push rbp
        self.backend.emit_push_r64(&x86_64::RBP);
        // mov rsp -> rbp
qinsoon's avatar
qinsoon committed
463
        self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
464
        
465
        // push all callee-saved registers
466 467 468 469 470 471
        for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            // not pushing rbp (as we have done taht)
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_push_r64(&reg);
            }
472 473 474 475
        }
        
        // unload arguments
        let mut gpr_arg_count = 0;
476
        // TODO: let mut fpr_arg_count = 0;
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
        for arg in args {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&arg, &x86_64::ARGUMENT_GPRs[gpr_arg_count]);
                    gpr_arg_count += 1;
                } else {
                    // unload from stack
                    unimplemented!();
                }
            } else if arg.is_fp_reg() {
                unimplemented!();
            } else {
                panic!("expect an arg value to be either int reg or fp reg");
            }
        }
492 493
        
        self.backend.end_block(block_name);
494 495
    }
    
qinsoon's avatar
qinsoon committed
496
    fn emit_common_epilogue(&mut self, ret_inst: &Instruction, cur_func: &MuFunctionVersion, vm: &VM) {
497 498
        // epilogue is not a block (its a few instruction inserted before return)
        // FIXME: this may change in the future
499
        
500
        // prepare return regs
501 502 503 504 505 506 507
        let ref ops = ret_inst.ops.borrow();
        let ret_val_indices = match ret_inst.v {
            Instruction_::Return(ref vals) => vals,
            _ => panic!("expected ret inst")
        };
        
        let mut gpr_ret_count = 0;
508
        // TODO: let mut fpr_ret_count = 0;
509 510 511
        for i in ret_val_indices {
            let ref ret_val = ops[*i];
            if self.match_ireg(ret_val) {
512
                let reg_ret_val = self.emit_ireg(ret_val, cur_func, vm);
513 514 515 516 517 518 519 520 521 522 523
                
                self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
                gpr_ret_count += 1;
            } else if self.match_iimm(ret_val) {
                let imm_ret_val = self.emit_get_iimm(ret_val);
                
                self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
                gpr_ret_count += 1;
            } else {
                unimplemented!();
            }
524 525 526 527 528 529 530 531
        }        
        
        // pop all callee-saved registers - reverse order
        for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_pop_r64(&reg);
            }
532
        }
533 534 535
        
        // pop rbp
        self.backend.emit_pop_r64(&x86_64::RBP);
536 537
    }
    
qinsoon's avatar
qinsoon committed
538 539 540 541 542 543 544 545 546 547 548 549
    fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
                    Instruction_::CmpOp(_, _, _) => true,
                    _ => false
                }
            }
            TreeNode_::Value(_) => false
        }
    }
    
qinsoon's avatar
qinsoon committed
550
    fn emit_cmp_res(&mut self, cond: &P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VM) -> op::CmpOp {
qinsoon's avatar
qinsoon committed
551 552 553 554 555 556 557 558 559 560 561
        match cond.v {
            TreeNode_::Instruction(ref inst) => {
                let ops = inst.ops.borrow();                
                
                match inst.v {
                    Instruction_::CmpOp(op, op1, op2) => {
                        let op1 = &ops[op1];
                        let op2 = &ops[op2];
                        
                        if op::is_int_cmp(op) {                        
                            if self.match_ireg(op1) && self.match_ireg(op2) {
562 563
                                let reg_op1 = self.emit_ireg(op1, cur_func, vm);
                                let reg_op2 = self.emit_ireg(op2, cur_func, vm);
qinsoon's avatar
qinsoon committed
564 565 566
                                
                                self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
                            } else if self.match_ireg(op1) && self.match_iimm(op2) {
567
                                let reg_op1 = self.emit_ireg(op1, cur_func, vm);
qinsoon's avatar
qinsoon committed
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
                                let iimm_op2 = self.emit_get_iimm(op2);
                                
                                self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
                            } else {
                                unimplemented!()
                            }
                        } else {
                            unimplemented!()
                        }
                        
                        op
                    }
                    
                    _ => panic!("expect cmp res to emit")
                }
            }
            _ => panic!("expect cmp res to emit")
        }
    }    
    
    fn match_ireg(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        return false;
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    if types::is_scalar(&value.ty) {
                        true
                    } else {
                        false
                    }
                } else {
                    false
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.is_int_reg()
            }
        }
    }
    
qinsoon's avatar
qinsoon committed
614
    fn emit_ireg(&mut self, op: &P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VM) -> P<Value> {
qinsoon's avatar
qinsoon committed
615 616
        match op.v {
            TreeNode_::Instruction(_) => {
617
                self.instruction_select(op, cur_func, vm);
qinsoon's avatar
qinsoon committed
618 619 620 621 622
                
                self.emit_get_result(op)
            },
            TreeNode_::Value(ref pv) => {
                match pv.v {
623 624 625
                    Value_::Constant(_)
                    | Value_::Global(_)
                    | Value_::Memory(_) => panic!("expected ireg"),
qinsoon's avatar
qinsoon committed
626 627
                    Value_::SSAVar(_) => {
                        pv.clone()
qinsoon's avatar
qinsoon committed
628
                    },
qinsoon's avatar
qinsoon committed
629 630 631 632 633
                }
            }
        }
    }
    
634
    #[allow(unused_variables)]
635 636 637 638
    fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
    fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
            _ => false
        }
    }
    
    fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> u32 {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::Int(val)) => {
                        val as u32
                    },
                    _ => panic!("expected iimm")
                }
            },
            _ => panic!("expected iimm")
        }
    }
    
qinsoon's avatar
qinsoon committed
660
    fn emit_get_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
661 662 663 664
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::SSAVar(_) => P(Value{
qinsoon's avatar
qinsoon committed
665 666
                        id: vm.next_id(),
                        name: None,
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
                        ty: types::get_referent_ty(& pv.ty).unwrap(),
                        v: Value_::Memory(MemoryLocation::Address{
                            base: pv.clone(),
                            offset: None,
                            index: None,
                            scale: None
                        })
                    }),
                    Value_::Global(ref glob) => {
                        if vm.is_running() {
                            // get address from vm
                            unimplemented!()
                        } else {
                            // symbolic
                            P(Value{
qinsoon's avatar
qinsoon committed
682 683
                                id: vm.next_id(),
                                name: None,
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
                                ty: types::get_referent_ty(&pv.ty).unwrap(),
                                v: Value_::Memory(MemoryLocation::Symbolic{
                                    base: Some(x86_64::RIP.clone()),
                                    label: glob.tag
                                })
                            })
                        }
                    },
                    Value_::Memory(_) => pv.clone(),
                    Value_::Constant(_) => unimplemented!()
                }
            }
            TreeNode_::Instruction(_) => unimplemented!()
        }
    }
    
700 701 702 703 704 705 706 707 708 709 710 711 712
    fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::FuncRef(_)) => true,
                    Value_::Constant(Constant::UFuncRef(_)) => true,
                    _ => false
                }
            },
            _ => false 
        }
    }
    
qinsoon's avatar
qinsoon committed
713
    fn emit_get_funcref_const(&mut self, op: &P<TreeNode>) -> MuName {
714 715 716 717 718 719 720 721 722 723 724 725
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::FuncRef(tag))
                    | Value_::Constant(Constant::UFuncRef(tag)) => tag,
                    _ => panic!("expected a (u)funcref const")
                }
            },
            _ => panic!("expected a (u)funcref const")
        }
    }
    
726
    #[allow(unused_variables)]
727 728 729 730
    fn match_mem(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
731
    #[allow(unused_variables)]
732 733 734 735
    fn emit_mem(&mut self, op: &P<TreeNode>) -> P<Value> {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
    fn emit_get_result(&mut self, node: &P<TreeNode>) -> P<Value> {
        match node.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        panic!("expected ONE result from the node {}", node);
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    value.clone()
                } else {
                    panic!("expected result from the node {}", node);
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.clone()
            }
        }
756 757
    }
    
qinsoon's avatar
qinsoon committed
758
    fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, cur_func: &MuFunctionVersion, vm: &VM) {
759 760 761 762
        let ref dst_ty = dest.ty;
        
        if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            if self.match_ireg(src) {
763
                let src_reg = self.emit_ireg(src, cur_func, vm);
764 765 766 767 768 769 770 771 772 773 774 775 776
                self.backend.emit_mov_r64_r64(dest, &src_reg);
            } else if self.match_iimm(src) {
                let src_imm = self.emit_get_iimm(src);
                self.backend.emit_mov_r64_imm32(dest, src_imm);
            } else {
                panic!("expected an int type op");
            }
        } else if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            unimplemented!()
        } else {
            panic!("unexpected type for move");
        } 
    }
777
}
778

779 780 781
impl CompilerPass for InstructionSelection {
    fn name(&self) -> &'static str {
        self.name
782
    }
783

784
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
785
    fn start_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
786
        debug!("{}", self.name());
787
        
qinsoon's avatar
qinsoon committed
788
        self.backend.start_code(func.name.unwrap());
789 790
        
        // prologue (get arguments from entry block first)        
791 792 793
        let entry_block = func.content.as_ref().unwrap().get_entry_block();
        let ref args = entry_block.content.as_ref().unwrap().args;
        self.emit_common_prologue(args);
794 795 796
    }

    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
797
    fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
798 799 800
        for block_id in func.block_trace.as_ref().unwrap() {
            let block = func.content.as_ref().unwrap().get_block(*block_id);
            let block_label = block.name.unwrap();
801
            
qinsoon's avatar
qinsoon committed
802
            self.backend.start_block(block_label);
803

804
            let block_content = block.content.as_ref().unwrap();
805 806
            
            // live in is args of the block
qinsoon's avatar
qinsoon committed
807
            self.backend.set_block_livein(block_label, &block_content.args);
808 809 810
            
            // live out is the union of all branch args of this block
            let live_out = block_content.get_out_arguments();
qinsoon's avatar
qinsoon committed
811
            self.backend.set_block_liveout(block_label, &live_out);
812

813
            for inst in block_content.body.iter() {
qinsoon's avatar
qinsoon committed
814
                self.instruction_select(inst, func, vm);
815
            }
816
            
qinsoon's avatar
qinsoon committed
817
            self.backend.end_block(block_label);
818 819
        }
    }
820 821
    
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
822
    fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
823 824
        self.backend.print_cur_code();
        
825 826
        let mc = self.backend.finish_code();
        let compiled_func = CompiledFunction {
qinsoon's avatar
qinsoon committed
827
            func_ver_id: func.id,
828
            temps: HashMap::new(),
829 830 831
            mc: mc
        };
        
qinsoon's avatar
qinsoon committed
832
        vm.add_compiled_func(compiled_func);
833
    }
834
}