To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

inst_sel.rs 36.8 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::Instruction;
4
5
use ast::inst::Destination;
use ast::inst::DestArg;
qinsoon's avatar
qinsoon committed
6
use ast::inst::Instruction_;
7
use ast::inst::MemoryOrder;
8
use ast::op;
qinsoon's avatar
qinsoon committed
9
use ast::types;
qinsoon's avatar
qinsoon committed
10
use ast::types::*;
qinsoon's avatar
qinsoon committed
11
use vm::VM;
qinsoon's avatar
qinsoon committed
12
use vm::CompiledFunction;
13
14

use compiler::CompilerPass;
qinsoon's avatar
qinsoon committed
15
16
17
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
18

19
20
use std::collections::HashMap;

21
pub struct InstructionSelection {
22
23
    name: &'static str,
    
qinsoon's avatar
qinsoon committed
24
    backend: Box<CodeGenerator>
25
26
}

27
28
29
30
31
32
33
34
35
36
37
38
39
40
impl <'a> InstructionSelection {
    pub fn new() -> InstructionSelection {
        InstructionSelection{
            name: "Instruction Selection (x64)",
            backend: Box::new(ASMCodeGen::new())
        }
    }
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
41
    fn instruction_select(&mut self, node: &'a P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VM) {
qinsoon's avatar
qinsoon committed
42
43
44
        trace!("instsel on node {}", node);
        
        match node.v {
45
46
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
47
48
49
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
                        // move this to trace generation
                        // assert here
50
51
52
53
54
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
55
                            }
56
                        };
57
                        
qinsoon's avatar
qinsoon committed
58
                        let ops = inst.ops.borrow();
59
                        
60
61
                        self.process_dest(&ops, fallthrough_dest, cur_func, vm);
                        self.process_dest(&ops, branch_dest, cur_func, vm);
qinsoon's avatar
qinsoon committed
62
63
                        
                        let branch_target = cur_func.content.as_ref().unwrap().get_block(branch_dest.target);
64
65
66
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
67
68
                        if self.match_cmp_res(cond) {
                            trace!("emit cmp_eq-branch2");
69
                            match self.emit_cmp_res(cond, cur_func, vm) {
qinsoon's avatar
qinsoon committed
70
71
72
73
74
75
76
77
78
79
                                op::CmpOp::EQ => self.backend.emit_je(branch_target),
                                op::CmpOp::NE => self.backend.emit_jne(branch_target),
                                op::CmpOp::UGE => self.backend.emit_jae(branch_target),
                                op::CmpOp::UGT => self.backend.emit_ja(branch_target),
                                op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
                                op::CmpOp::ULT => self.backend.emit_jb(branch_target),
                                op::CmpOp::SGE => self.backend.emit_jge(branch_target),
                                op::CmpOp::SGT => self.backend.emit_jg(branch_target),
                                op::CmpOp::SLE => self.backend.emit_jle(branch_target),
                                op::CmpOp::SLT => self.backend.emit_jl(branch_target),
qinsoon's avatar
qinsoon committed
80
81
82
83
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
84
                            
85
                            let cond_reg = self.emit_ireg(cond, cur_func, vm);
86
                            
qinsoon's avatar
qinsoon committed
87
88
89
                            // emit: cmp cond_reg 1
                            self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
                            // emit: je #branch_dest
qinsoon's avatar
qinsoon committed
90
                            self.backend.emit_je(branch_target);                            
qinsoon's avatar
qinsoon committed
91
92
                        } else {
                            unimplemented!();
93
                        }
94
95
                    },
                    
qinsoon's avatar
qinsoon committed
96
97
                    Instruction_::Branch1(ref dest) => {
                        let ops = inst.ops.borrow();
98
                                            
99
                        self.process_dest(&ops, dest, cur_func, vm);
100
                        
qinsoon's avatar
qinsoon committed
101
102
                        let target = cur_func.content.as_ref().unwrap().get_block(dest.target);
                        
qinsoon's avatar
qinsoon committed
103
                        trace!("emit branch1");
104
                        // jmp
qinsoon's avatar
qinsoon committed
105
                        self.backend.emit_jmp(target);
106
107
                    },
                    
qinsoon's avatar
qinsoon committed
108
109
                    Instruction_::ExprCall{ref data, is_abort} => {
                        trace!("deal with pre-call convention");
110
                        
qinsoon's avatar
qinsoon committed
111
                        let ops = inst.ops.borrow();
112
113
114
115
                        let rets = inst.value.as_ref().unwrap();
                        let ref func = ops[data.func];
                        let ref func_sig = match func.v {
                            TreeNode_::Value(ref pv) => {
qinsoon's avatar
qinsoon committed
116
117
118
119
                                let ty : &MuType = &pv.ty;
                                match ty.v {
                                    MuType_::FuncRef(ref sig)
                                    | MuType_::UFuncPtr(ref sig) => sig,
120
121
122
123
124
125
126
127
128
                                    _ => panic!("expected funcref/ptr type")
                                }
                            },
                            _ => panic!("expected funcref/ptr type")
                        };
                        
                        debug_assert!(func_sig.ret_tys.len() == data.args.len());
                        debug_assert!(func_sig.arg_tys.len() == rets.len());
                                                
qinsoon's avatar
qinsoon committed
129
                        let mut gpr_arg_count = 0;
130
                        // TODO: let mut fpr_arg_count = 0;
131
132
133
                        for arg_index in data.args.iter() {
                            let ref arg = ops[*arg_index];
                            trace!("arg {}", arg);
qinsoon's avatar
qinsoon committed
134
135
                            
                            if self.match_ireg(arg) {
136
                                let arg = self.emit_ireg(arg, cur_func, vm);
qinsoon's avatar
qinsoon committed
137
                                
138
139
140
141
142
143
144
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
145
146
147
                            } else if self.match_iimm(arg) {
                                let arg = self.emit_get_iimm(arg);
                                
148
149
150
151
152
153
154
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
155
156
                            } else {
                                unimplemented!();
157
                            }
158
159
                        }
                        
160
161
                        // check direct call or indirect
                        if self.match_funcref_const(func) {
qinsoon's avatar
qinsoon committed
162
163
                            let target_id = self.emit_get_funcref_const(func);
                            let funcs = vm.funcs().read().unwrap();
qinsoon's avatar
qinsoon committed
164
                            let target = funcs.get(&target_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
165
166
167
168
169
170
                                                        
                            if vm.is_running() {
                                unimplemented!()
                            } else {
                                self.backend.emit_call_near_rel32(target.name().unwrap());
                            }
171
                        } else if self.match_ireg(func) {
172
                            let target = self.emit_ireg(func, cur_func, vm);
173
174
175
176
177
178
179
180
181
                            
                            self.backend.emit_call_near_r64(&target);
                        } else if self.match_mem(func) {
                            let target = self.emit_mem(func);
                            
                            self.backend.emit_call_near_mem64(&target);
                        } else {
                            unimplemented!();
                        }
182
                        
qinsoon's avatar
qinsoon committed
183
                        // deal with ret vals
184
                        let mut gpr_ret_count = 0;
185
                        // TODO: let mut fpr_ret_count = 0;
186
187
188
189
190
191
192
193
                        for val in rets {
                            if val.is_int_reg() {
                                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                                    gpr_ret_count += 1;
                                } else {
                                    // get return value by stack
                                    unimplemented!();
194
                                }
195
196
197
                            } else {
                                // floating point register
                                unimplemented!();
198
                            }
199
                        }
200
201
202
                    },
                    
                    Instruction_::Return(_) => {
203
                        self.emit_common_epilogue(inst, cur_func, vm);
204
                        
qinsoon's avatar
qinsoon committed
205
                        self.backend.emit_ret();
206
207
                    },
                    
qinsoon's avatar
qinsoon committed
208
209
210
                    Instruction_::BinOp(op, op1, op2) => {
                        let ops = inst.ops.borrow();
                        
211
212
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
213
214
215
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");
                                    
216
217
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], cur_func, vm);
qinsoon's avatar
qinsoon committed
218
219
220
221
222
223
224
225
226
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit add-ireg-imm");
                                    
227
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
qinsoon's avatar
qinsoon committed
228
229
230
231
232
233
234
235
236
237
238
239
240
                                    let reg_op2 = self.emit_get_iimm(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
                                    self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
241
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
qinsoon's avatar
qinsoon committed
242
243
244
245
246
247
248
249
250
251
252
253
254
                                    let reg_op2 = self.emit_mem(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
255
256
                            },
                            op::BinOp::Sub => {
qinsoon's avatar
qinsoon committed
257
258
259
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
                                    
260
261
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], cur_func, vm);
qinsoon's avatar
qinsoon committed
262
263
264
265
266
267
268
269
270
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit sub-ireg-imm");

271
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
272
                                    let imm_op2 = self.emit_get_iimm(&ops[op2]);
qinsoon's avatar
qinsoon committed
273
274
275
276
277
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
278
                                    self.backend.emit_sub_r64_imm32(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
279
280
281
282
283
284
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
285
                                    let reg_op1 = self.emit_ireg(&ops[op1], cur_func, vm);
286
                                    let mem_op2 = self.emit_mem(&ops[op2]);
qinsoon's avatar
qinsoon committed
287
288
289
290
291
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // sub op2 res
292
                                    self.backend.emit_sub_r64_mem64(&res_tmp, &mem_op2);
qinsoon's avatar
qinsoon committed
293
294
295
296
297
298
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
299
300
                            },
                            op::BinOp::Mul => {
301
302
303
304
                                // mov op1 -> rax
                                let rax = x86_64::RAX.clone();
                                let op1 = &ops[op1];
                                if self.match_ireg(op1) {
305
                                    let reg_op1 = self.emit_ireg(op1, cur_func, vm);
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
                                    
                                    self.backend.emit_mov_r64_r64(&rax, &reg_op1);
                                } else if self.match_iimm(op1) {
                                    let imm_op1 = self.emit_get_iimm(op1);
                                    
                                    self.backend.emit_mov_r64_imm32(&rax, imm_op1);
                                } else if self.match_mem(op1) {
                                    let mem_op1 = self.emit_mem(op1);
                                    
                                    self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mul op2 -> rax
                                let op2 = &ops[op2];
                                if self.match_ireg(op2) {
323
                                    let reg_op2 = self.emit_ireg(op2, cur_func, vm);
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
                                    
                                    self.backend.emit_mul_r64(&reg_op2);
                                } else if self.match_iimm(op2) {
                                    let imm_op2 = self.emit_get_iimm(op2);
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
                                    let res_tmp = self.emit_get_result(node);
                                    self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
                                    
                                    self.backend.emit_mul_r64(&res_tmp);
                                } else if self.match_mem(op2) {
                                    let mem_op2 = self.emit_mem(op2);
                                    
                                    self.backend.emit_mul_mem64(&mem_op2);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
                                let res_tmp = self.emit_get_result(node);
                                self.backend.emit_mov_r64_r64(&res_tmp, &rax);
346
347
348
                            },
                            
                            _ => unimplemented!()
349
350
                        }
                    }
351
                    
352
353
                    // load on x64 generates mov inst (no matter what order is specified)
                    // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
354
355
356
                    Instruction_::Load{is_ptr, order, mem_loc} => {
                        let ops = inst.ops.borrow();
                        let ref loc_op = ops[mem_loc];
357
358
359
360
361
362
363
364
365
                        
                        // check order
                        match order {
                            MemoryOrder::Relaxed 
                            | MemoryOrder::Consume 
                            | MemoryOrder::Acquire
                            | MemoryOrder::SeqCst => {},
                            _ => panic!("didnt expect order {:?} with store inst", order)
                        }                        
366
367
368
369
370
371

                        let resolved_loc = self.emit_get_mem(loc_op, vm);                        
                        let res_temp = self.emit_get_result(node);
                        
                        if self.match_ireg(node) {
                            // emit mov(GPR)
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
                            self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
                    
                    Instruction_::Store{is_ptr, order, mem_loc, value} => {
                        let ops = inst.ops.borrow();
                        let ref loc_op = ops[mem_loc];
                        let ref val_op = ops[value];
                        
                        let generate_plain_mov : bool = {
                            match order {
                                MemoryOrder::Relaxed | MemoryOrder::Release => true,
                                MemoryOrder::SeqCst => false,
                                _ => panic!("didnt expect order {:?} with store inst", order)
                            }
                        };
                        
                        let resolved_loc = self.emit_get_mem(loc_op, vm);
                        
                        if self.match_ireg(val_op) {
                            let val = self.emit_ireg(val_op, cur_func, vm);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
                            } else {
                                unimplemented!()
                            }
                        } else if self.match_iimm(val_op) {
                            let val = self.emit_get_iimm(val_op);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
                            } else {
                                unimplemented!()
                            }
408
409
410
411
412
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
413
414
415
416
417
418
    
                    _ => unimplemented!()
                } // main switch
            },
            
            TreeNode_::Value(ref p) => {
qinsoon's avatar
qinsoon committed
419

420
421
422
423
424
            }
        }
    }
    
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
425
    fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, cur_func: &MuFunctionVersion, vm: &VM) {
426
427
        for i in 0..dest.args.len() {
            let ref dest_arg = dest.args[i];
428
429
            match dest_arg {
                &DestArg::Normal(op_index) => {
qinsoon's avatar
qinsoon committed
430
                    let ref arg = ops[op_index];
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
//                    match arg.op {
//                        OpCode::RegI64 
//                        | OpCode::RegFP
//                        | OpCode::IntImmI64
//                        | OpCode::FPImm => {
//                            // do nothing
//                        },
//                        _ => {
//                            trace!("nested: compute arg for branch");
//                            // nested: compute arg
//                            self.instruction_select(arg, cur_func);
//                            
//                            self.emit_get_result(arg);
//                        }
//                    }
//                    
                    let ref target_args = cur_func.content.as_ref().unwrap().get_block(dest.target).content.as_ref().unwrap().args;
                    let ref target_arg = target_args[i];
                    
450
                    self.emit_general_move(&arg, target_arg, cur_func, vm);
451
452
453
454
                },
                &DestArg::Freshbound(_) => unimplemented!()
            }
        }
qinsoon's avatar
qinsoon committed
455
456
    }
    
457
    fn emit_common_prologue(&mut self, args: &Vec<P<Value>>) {
458
459
460
461
462
463
464
        let block_name = "prologue";
        self.backend.start_block(block_name);
        
        // no livein
        // liveout = entry block's args
        self.backend.set_block_livein(block_name, &vec![]);
        self.backend.set_block_liveout(block_name, args);
qinsoon's avatar
qinsoon committed
465
        
466
467
468
        // push rbp
        self.backend.emit_push_r64(&x86_64::RBP);
        // mov rsp -> rbp
qinsoon's avatar
qinsoon committed
469
        self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
470
        
471
        // push all callee-saved registers
472
473
474
475
476
477
        for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            // not pushing rbp (as we have done taht)
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_push_r64(&reg);
            }
478
479
480
481
        }
        
        // unload arguments
        let mut gpr_arg_count = 0;
482
        // TODO: let mut fpr_arg_count = 0;
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
        for arg in args {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&arg, &x86_64::ARGUMENT_GPRs[gpr_arg_count]);
                    gpr_arg_count += 1;
                } else {
                    // unload from stack
                    unimplemented!();
                }
            } else if arg.is_fp_reg() {
                unimplemented!();
            } else {
                panic!("expect an arg value to be either int reg or fp reg");
            }
        }
498
499
        
        self.backend.end_block(block_name);
500
501
    }
    
qinsoon's avatar
qinsoon committed
502
    fn emit_common_epilogue(&mut self, ret_inst: &Instruction, cur_func: &MuFunctionVersion, vm: &VM) {
503
504
        // epilogue is not a block (its a few instruction inserted before return)
        // FIXME: this may change in the future
505
        
506
        // prepare return regs
507
508
509
510
511
512
513
        let ref ops = ret_inst.ops.borrow();
        let ret_val_indices = match ret_inst.v {
            Instruction_::Return(ref vals) => vals,
            _ => panic!("expected ret inst")
        };
        
        let mut gpr_ret_count = 0;
514
        // TODO: let mut fpr_ret_count = 0;
515
516
517
        for i in ret_val_indices {
            let ref ret_val = ops[*i];
            if self.match_ireg(ret_val) {
518
                let reg_ret_val = self.emit_ireg(ret_val, cur_func, vm);
519
520
521
522
523
524
525
526
527
528
529
                
                self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
                gpr_ret_count += 1;
            } else if self.match_iimm(ret_val) {
                let imm_ret_val = self.emit_get_iimm(ret_val);
                
                self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
                gpr_ret_count += 1;
            } else {
                unimplemented!();
            }
530
531
532
533
534
535
536
537
        }        
        
        // pop all callee-saved registers - reverse order
        for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_pop_r64(&reg);
            }
538
        }
539
540
541
        
        // pop rbp
        self.backend.emit_pop_r64(&x86_64::RBP);
542
543
    }
    
qinsoon's avatar
qinsoon committed
544
545
546
547
548
549
550
551
552
553
554
555
    fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
                    Instruction_::CmpOp(_, _, _) => true,
                    _ => false
                }
            }
            TreeNode_::Value(_) => false
        }
    }
    
qinsoon's avatar
qinsoon committed
556
    fn emit_cmp_res(&mut self, cond: &P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VM) -> op::CmpOp {
qinsoon's avatar
qinsoon committed
557
558
559
560
561
562
563
564
565
566
567
        match cond.v {
            TreeNode_::Instruction(ref inst) => {
                let ops = inst.ops.borrow();                
                
                match inst.v {
                    Instruction_::CmpOp(op, op1, op2) => {
                        let op1 = &ops[op1];
                        let op2 = &ops[op2];
                        
                        if op::is_int_cmp(op) {                        
                            if self.match_ireg(op1) && self.match_ireg(op2) {
568
569
                                let reg_op1 = self.emit_ireg(op1, cur_func, vm);
                                let reg_op2 = self.emit_ireg(op2, cur_func, vm);
qinsoon's avatar
qinsoon committed
570
571
572
                                
                                self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
                            } else if self.match_ireg(op1) && self.match_iimm(op2) {
573
                                let reg_op1 = self.emit_ireg(op1, cur_func, vm);
qinsoon's avatar
qinsoon committed
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
                                let iimm_op2 = self.emit_get_iimm(op2);
                                
                                self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
                            } else {
                                unimplemented!()
                            }
                        } else {
                            unimplemented!()
                        }
                        
                        op
                    }
                    
                    _ => panic!("expect cmp res to emit")
                }
            }
            _ => panic!("expect cmp res to emit")
        }
    }    
    
    fn match_ireg(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        return false;
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    if types::is_scalar(&value.ty) {
                        true
                    } else {
                        false
                    }
                } else {
                    false
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.is_int_reg()
            }
        }
    }
    
qinsoon's avatar
qinsoon committed
620
    fn emit_ireg(&mut self, op: &P<TreeNode>, cur_func: &MuFunctionVersion, vm: &VM) -> P<Value> {
qinsoon's avatar
qinsoon committed
621
622
        match op.v {
            TreeNode_::Instruction(_) => {
623
                self.instruction_select(op, cur_func, vm);
qinsoon's avatar
qinsoon committed
624
625
626
627
628
                
                self.emit_get_result(op)
            },
            TreeNode_::Value(ref pv) => {
                match pv.v {
629
                    Value_::Constant(_)
630
                    | Value_::Global(_)
631
                    | Value_::Memory(_) => panic!("expected ireg"),
qinsoon's avatar
qinsoon committed
632
633
                    Value_::SSAVar(_) => {
                        pv.clone()
qinsoon's avatar
qinsoon committed
634
                    },
qinsoon's avatar
qinsoon committed
635
636
637
638
639
                }
            }
        }
    }
    
640
    #[allow(unused_variables)]
641
642
643
644
    fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
    fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
            _ => false
        }
    }
    
    fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> u32 {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::Int(val)) => {
                        val as u32
                    },
                    _ => panic!("expected iimm")
                }
            },
            _ => panic!("expected iimm")
        }
    }
    
qinsoon's avatar
qinsoon committed
666
    fn emit_get_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
667
668
669
670
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::SSAVar(_) => P(Value{
671
                        hdr: MuEntityHeader::unnamed(vm.next_id()),
672
673
674
675
676
677
678
679
                        ty: types::get_referent_ty(& pv.ty).unwrap(),
                        v: Value_::Memory(MemoryLocation::Address{
                            base: pv.clone(),
                            offset: None,
                            index: None,
                            scale: None
                        })
                    }),
680
                    Value_::Global(_) => {
681
682
683
684
685
686
                        if vm.is_running() {
                            // get address from vm
                            unimplemented!()
                        } else {
                            // symbolic
                            P(Value{
687
                                hdr: MuEntityHeader::unnamed(vm.next_id()),
688
689
690
                                ty: types::get_referent_ty(&pv.ty).unwrap(),
                                v: Value_::Memory(MemoryLocation::Symbolic{
                                    base: Some(x86_64::RIP.clone()),
691
                                    label: pv.name().unwrap()
692
693
694
695
696
697
698
699
700
701
702
703
                                })
                            })
                        }
                    },
                    Value_::Memory(_) => pv.clone(),
                    Value_::Constant(_) => unimplemented!()
                }
            }
            TreeNode_::Instruction(_) => unimplemented!()
        }
    }
    
704
705
706
707
708
709
710
711
712
713
714
715
716
    fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::FuncRef(_)) => true,
                    Value_::Constant(Constant::UFuncRef(_)) => true,
                    _ => false
                }
            },
            _ => false 
        }
    }
    
qinsoon's avatar
qinsoon committed
717
    fn emit_get_funcref_const(&mut self, op: &P<TreeNode>) -> MuID {
718
719
720
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
qinsoon's avatar
qinsoon committed
721
722
                    Value_::Constant(Constant::FuncRef(id))
                    | Value_::Constant(Constant::UFuncRef(id)) => id,
723
724
725
726
727
728
729
                    _ => panic!("expected a (u)funcref const")
                }
            },
            _ => panic!("expected a (u)funcref const")
        }
    }
    
730
    #[allow(unused_variables)]
731
732
733
734
    fn match_mem(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
735
    #[allow(unused_variables)]
736
737
738
739
    fn emit_mem(&mut self, op: &P<TreeNode>) -> P<Value> {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
    fn emit_get_result(&mut self, node: &P<TreeNode>) -> P<Value> {
        match node.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        panic!("expected ONE result from the node {}", node);
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    value.clone()
                } else {
                    panic!("expected result from the node {}", node);
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.clone()
            }
        }
760
761
    }
    
qinsoon's avatar
qinsoon committed
762
    fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, cur_func: &MuFunctionVersion, vm: &VM) {
763
764
765
766
        let ref dst_ty = dest.ty;
        
        if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            if self.match_ireg(src) {
767
                let src_reg = self.emit_ireg(src, cur_func, vm);
768
769
770
771
772
773
774
775
776
777
778
779
780
                self.backend.emit_mov_r64_r64(dest, &src_reg);
            } else if self.match_iimm(src) {
                let src_imm = self.emit_get_iimm(src);
                self.backend.emit_mov_r64_imm32(dest, src_imm);
            } else {
                panic!("expected an int type op");
            }
        } else if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            unimplemented!()
        } else {
            panic!("unexpected type for move");
        } 
    }
781
}
782

783
784
785
impl CompilerPass for InstructionSelection {
    fn name(&self) -> &'static str {
        self.name
786
    }
787

788
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
789
    fn start_function(&mut self, vm: &VM, func_ver: &mut MuFunctionVersion) {
790
        debug!("{}", self.name());
qinsoon's avatar
qinsoon committed
791
        
qinsoon's avatar
qinsoon committed
792
        let funcs = vm.funcs().read().unwrap();
qinsoon's avatar
qinsoon committed
793
        let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
794
        self.backend.start_code(func.name().unwrap());
qinsoon's avatar
qinsoon committed
795
796
        
        // prologue (get arguments from entry block first)        
qinsoon's avatar
qinsoon committed
797
        let entry_block = func_ver.content.as_ref().unwrap().get_entry_block();
798
799
        let ref args = entry_block.content.as_ref().unwrap().args;
        self.emit_common_prologue(args);
800
801
802
    }

    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
803
    fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
804
805
        for block_id in func.block_trace.as_ref().unwrap() {
            let block = func.content.as_ref().unwrap().get_block(*block_id);
806
            let block_label = block.name().unwrap();
qinsoon's avatar
qinsoon committed
807
            
qinsoon's avatar
qinsoon committed
808
            self.backend.start_block(block_label);
809

810
            let block_content = block.content.as_ref().unwrap();
811
812
            
            // live in is args of the block
qinsoon's avatar
qinsoon committed
813
            self.backend.set_block_livein(block_label, &block_content.args);
814
815
816
            
            // live out is the union of all branch args of this block
            let live_out = block_content.get_out_arguments();
qinsoon's avatar
qinsoon committed
817
            self.backend.set_block_liveout(block_label, &live_out);
818

819
            for inst in block_content.body.iter() {
qinsoon's avatar
qinsoon committed
820
                self.instruction_select(inst, func, vm);
821
            }
822
            
qinsoon's avatar
qinsoon committed
823
            self.backend.end_block(block_label);
824
825
        }
    }
qinsoon's avatar
qinsoon committed
826
827
    
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
828
    fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
829
830
        self.backend.print_cur_code();
        
831
832
        let mc = self.backend.finish_code();
        let compiled_func = CompiledFunction {
qinsoon's avatar
qinsoon committed
833
            func_id: func.func_id,
834
            func_ver_id: func.id(),
835
            temps: HashMap::new(),
836
837
838
            mc: mc
        };
        
qinsoon's avatar
qinsoon committed
839
        vm.add_compiled_func(compiled_func);
qinsoon's avatar
qinsoon committed
840
    }
841
}