To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

inst_sel.rs 41.8 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::Instruction;
4
5
use ast::inst::Destination;
use ast::inst::DestArg;
qinsoon's avatar
qinsoon committed
6
use ast::inst::Instruction_;
7
use ast::inst::MemoryOrder;
8
use ast::op;
qinsoon's avatar
qinsoon committed
9
use ast::types;
qinsoon's avatar
qinsoon committed
10
use ast::types::*;
qinsoon's avatar
qinsoon committed
11
use vm::VM;
qinsoon's avatar
qinsoon committed
12
use vm::CompiledFunction;
13
14
15
16
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint;
17
18

use compiler::CompilerPass;
qinsoon's avatar
qinsoon committed
19
20
21
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
22

23
24
use std::collections::HashMap;

25
pub struct InstructionSelection {
26
27
    name: &'static str,
    
qinsoon's avatar
qinsoon committed
28
    backend: Box<CodeGenerator>
29
30
}

31
32
33
34
35
36
37
38
39
40
41
42
43
44
impl <'a> InstructionSelection {
    pub fn new() -> InstructionSelection {
        InstructionSelection{
            name: "Instruction Selection (x64)",
            backend: Box::new(ASMCodeGen::new())
        }
    }
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
45
    fn instruction_select(&mut self, node: &'a TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
qinsoon's avatar
qinsoon committed
46
47
48
        trace!("instsel on node {}", node);
        
        match node.v {
49
50
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
51
52
53
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
                        // move this to trace generation
                        // assert here
54
55
56
57
58
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
59
                            }
60
                        };
61
                        
qinsoon's avatar
qinsoon committed
62
                        let ops = inst.ops.read().unwrap();
63
                        
64
65
                        self.process_dest(&ops, fallthrough_dest, f_content, f_context, vm);
                        self.process_dest(&ops, branch_dest, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
66
                        
67
                        let branch_target = f_content.get_block(branch_dest.target);
68
69
70
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
71
72
                        if self.match_cmp_res(cond) {
                            trace!("emit cmp_eq-branch2");
73
                            match self.emit_cmp_res(cond, f_content, f_context, vm) {
qinsoon's avatar
qinsoon committed
74
75
76
77
78
79
80
81
82
83
                                op::CmpOp::EQ => self.backend.emit_je(branch_target),
                                op::CmpOp::NE => self.backend.emit_jne(branch_target),
                                op::CmpOp::UGE => self.backend.emit_jae(branch_target),
                                op::CmpOp::UGT => self.backend.emit_ja(branch_target),
                                op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
                                op::CmpOp::ULT => self.backend.emit_jb(branch_target),
                                op::CmpOp::SGE => self.backend.emit_jge(branch_target),
                                op::CmpOp::SGT => self.backend.emit_jg(branch_target),
                                op::CmpOp::SLE => self.backend.emit_jle(branch_target),
                                op::CmpOp::SLT => self.backend.emit_jl(branch_target),
qinsoon's avatar
qinsoon committed
84
85
86
87
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
88
                            
89
                            let cond_reg = self.emit_ireg(cond, f_content, f_context, vm);
90
                            
qinsoon's avatar
qinsoon committed
91
92
93
                            // emit: cmp cond_reg 1
                            self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
                            // emit: je #branch_dest
qinsoon's avatar
qinsoon committed
94
                            self.backend.emit_je(branch_target);                            
qinsoon's avatar
qinsoon committed
95
96
                        } else {
                            unimplemented!();
97
                        }
98
99
                    },
                    
qinsoon's avatar
qinsoon committed
100
                    Instruction_::Branch1(ref dest) => {
qinsoon's avatar
qinsoon committed
101
                        let ops = inst.ops.read().unwrap();
102
                                            
103
                        self.process_dest(&ops, dest, f_content, f_context, vm);
104
                        
105
                        let target = f_content.get_block(dest.target);
qinsoon's avatar
qinsoon committed
106
                        
qinsoon's avatar
qinsoon committed
107
                        trace!("emit branch1");
108
                        // jmp
qinsoon's avatar
qinsoon committed
109
                        self.backend.emit_jmp(target);
110
111
                    },
                    
qinsoon's avatar
qinsoon committed
112
113
                    Instruction_::ExprCall{ref data, is_abort} => {
                        trace!("deal with pre-call convention");
114
                        
qinsoon's avatar
qinsoon committed
115
                        let ops = inst.ops.read().unwrap();
116
117
118
119
                        let rets = inst.value.as_ref().unwrap();
                        let ref func = ops[data.func];
                        let ref func_sig = match func.v {
                            TreeNode_::Value(ref pv) => {
qinsoon's avatar
qinsoon committed
120
121
122
123
                                let ty : &MuType = &pv.ty;
                                match ty.v {
                                    MuType_::FuncRef(ref sig)
                                    | MuType_::UFuncPtr(ref sig) => sig,
124
125
126
127
128
129
130
131
132
                                    _ => panic!("expected funcref/ptr type")
                                }
                            },
                            _ => panic!("expected funcref/ptr type")
                        };
                        
                        debug_assert!(func_sig.ret_tys.len() == data.args.len());
                        debug_assert!(func_sig.arg_tys.len() == rets.len());
                                                
qinsoon's avatar
qinsoon committed
133
                        let mut gpr_arg_count = 0;
134
                        // TODO: let mut fpr_arg_count = 0;
135
136
137
                        for arg_index in data.args.iter() {
                            let ref arg = ops[*arg_index];
                            trace!("arg {}", arg);
qinsoon's avatar
qinsoon committed
138
139
                            
                            if self.match_ireg(arg) {
140
                                let arg = self.emit_ireg(arg, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
141
                                
142
143
144
145
146
147
148
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
149
150
151
                            } else if self.match_iimm(arg) {
                                let arg = self.emit_get_iimm(arg);
                                
152
153
154
155
156
157
158
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
159
160
                            } else {
                                unimplemented!();
161
                            }
162
163
                        }
                        
164
165
                        // check direct call or indirect
                        if self.match_funcref_const(func) {
qinsoon's avatar
qinsoon committed
166
167
                            let target_id = self.emit_get_funcref_const(func);
                            let funcs = vm.funcs().read().unwrap();
qinsoon's avatar
qinsoon committed
168
                            let target = funcs.get(&target_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
169
170
171
172
173
174
                                                        
                            if vm.is_running() {
                                unimplemented!()
                            } else {
                                self.backend.emit_call_near_rel32(target.name().unwrap());
                            }
175
                        } else if self.match_ireg(func) {
176
                            let target = self.emit_ireg(func, f_content, f_context, vm);
177
178
179
180
181
182
183
184
185
                            
                            self.backend.emit_call_near_r64(&target);
                        } else if self.match_mem(func) {
                            let target = self.emit_mem(func);
                            
                            self.backend.emit_call_near_mem64(&target);
                        } else {
                            unimplemented!();
                        }
186
                        
qinsoon's avatar
qinsoon committed
187
                        // deal with ret vals
188
                        let mut gpr_ret_count = 0;
189
                        // TODO: let mut fpr_ret_count = 0;
190
191
192
193
194
195
196
197
                        for val in rets {
                            if val.is_int_reg() {
                                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                                    gpr_ret_count += 1;
                                } else {
                                    // get return value by stack
                                    unimplemented!();
198
                                }
199
200
201
                            } else {
                                // floating point register
                                unimplemented!();
202
                            }
203
                        }
204
205
206
                    },
                    
                    Instruction_::Return(_) => {
207
                        self.emit_common_epilogue(inst, f_content, f_context, vm);
208
                        
qinsoon's avatar
qinsoon committed
209
                        self.backend.emit_ret();
210
211
                    },
                    
qinsoon's avatar
qinsoon committed
212
                    Instruction_::BinOp(op, op1, op2) => {
qinsoon's avatar
qinsoon committed
213
                        let ops = inst.ops.read().unwrap();
qinsoon's avatar
qinsoon committed
214
                        
215
216
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
217
218
219
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");
                                    
220
221
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
222
223
224
225
226
227
228
229
230
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit add-ireg-imm");
                                    
231
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
232
233
234
235
236
237
238
239
240
241
242
243
244
                                    let reg_op2 = self.emit_get_iimm(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
                                    self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
245
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
246
247
248
249
250
251
252
253
254
255
256
257
258
                                    let reg_op2 = self.emit_mem(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
259
260
                            },
                            op::BinOp::Sub => {
qinsoon's avatar
qinsoon committed
261
262
263
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
                                    
264
265
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
266
267
268
269
270
271
272
273
274
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit sub-ireg-imm");

275
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
276
                                    let imm_op2 = self.emit_get_iimm(&ops[op2]);
qinsoon's avatar
qinsoon committed
277
278
279
280
281
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
282
                                    self.backend.emit_sub_r64_imm32(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
283
284
285
286
287
288
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
289
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
290
                                    let mem_op2 = self.emit_mem(&ops[op2]);
qinsoon's avatar
qinsoon committed
291
292
293
294
295
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // sub op2 res
296
                                    self.backend.emit_sub_r64_mem64(&res_tmp, &mem_op2);
qinsoon's avatar
qinsoon committed
297
298
299
300
301
302
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
303
304
                            },
                            op::BinOp::Mul => {
305
306
307
308
                                // mov op1 -> rax
                                let rax = x86_64::RAX.clone();
                                let op1 = &ops[op1];
                                if self.match_ireg(op1) {
309
                                    let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
                                    
                                    self.backend.emit_mov_r64_r64(&rax, &reg_op1);
                                } else if self.match_iimm(op1) {
                                    let imm_op1 = self.emit_get_iimm(op1);
                                    
                                    self.backend.emit_mov_r64_imm32(&rax, imm_op1);
                                } else if self.match_mem(op1) {
                                    let mem_op1 = self.emit_mem(op1);
                                    
                                    self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mul op2 -> rax
                                let op2 = &ops[op2];
                                if self.match_ireg(op2) {
327
                                    let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
                                    
                                    self.backend.emit_mul_r64(&reg_op2);
                                } else if self.match_iimm(op2) {
                                    let imm_op2 = self.emit_get_iimm(op2);
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
                                    let res_tmp = self.emit_get_result(node);
                                    self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
                                    
                                    self.backend.emit_mul_r64(&res_tmp);
                                } else if self.match_mem(op2) {
                                    let mem_op2 = self.emit_mem(op2);
                                    
                                    self.backend.emit_mul_mem64(&mem_op2);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
                                let res_tmp = self.emit_get_result(node);
                                self.backend.emit_mov_r64_r64(&res_tmp, &rax);
350
351
352
                            },
                            
                            _ => unimplemented!()
353
354
                        }
                    }
355
                    
356
357
                    // load on x64 generates mov inst (no matter what order is specified)
                    // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
358
                    Instruction_::Load{is_ptr, order, mem_loc} => {
qinsoon's avatar
qinsoon committed
359
                        let ops = inst.ops.read().unwrap();
360
                        let ref loc_op = ops[mem_loc];
361
362
363
364
365
366
367
368
369
                        
                        // check order
                        match order {
                            MemoryOrder::Relaxed 
                            | MemoryOrder::Consume 
                            | MemoryOrder::Acquire
                            | MemoryOrder::SeqCst => {},
                            _ => panic!("didnt expect order {:?} with store inst", order)
                        }                        
370
371
372
373
374
375

                        let resolved_loc = self.emit_get_mem(loc_op, vm);                        
                        let res_temp = self.emit_get_result(node);
                        
                        if self.match_ireg(node) {
                            // emit mov(GPR)
376
377
378
379
380
381
382
383
                            self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
                    
                    Instruction_::Store{is_ptr, order, mem_loc, value} => {
qinsoon's avatar
qinsoon committed
384
                        let ops = inst.ops.read().unwrap();
385
386
387
388
389
390
391
392
393
394
395
396
397
398
                        let ref loc_op = ops[mem_loc];
                        let ref val_op = ops[value];
                        
                        let generate_plain_mov : bool = {
                            match order {
                                MemoryOrder::Relaxed | MemoryOrder::Release => true,
                                MemoryOrder::SeqCst => false,
                                _ => panic!("didnt expect order {:?} with store inst", order)
                            }
                        };
                        
                        let resolved_loc = self.emit_get_mem(loc_op, vm);
                        
                        if self.match_ireg(val_op) {
399
                            let val = self.emit_ireg(val_op, f_content, f_context, vm);
400
401
402
403
404
405
406
407
408
409
410
411
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
                            } else {
                                unimplemented!()
                            }
                        } else if self.match_iimm(val_op) {
                            let val = self.emit_get_iimm(val_op);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
                            } else {
                                unimplemented!()
                            }
412
413
414
415
416
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
417
418
                    
                    Instruction_::ThreadExit => {
419
                        // emit a call to swap_back_to_native_stack(sp_loc: Address)
420
421
422
423
424
425
                        
                        // get thread local and add offset to get sp_loc
                        let tl = self.emit_get_threadlocal(f_content, f_context, vm);
                        self.backend.emit_add_r64_imm32(&tl, *thread::NATIVE_SP_LOC_OFFSET as u32);
                        
                        self.emit_runtime_entry(&entrypoints::SWAP_BACK_TO_NATIVE_STACK, vec![tl.clone()], f_content, f_context, vm);
426
                    }
427
428
429
430
431
432
    
                    _ => unimplemented!()
                } // main switch
            },
            
            TreeNode_::Value(ref p) => {
433
        
434
435
436
437
            }
        }
    }
    
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
    fn emit_get_threadlocal (&mut self, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
        let mut rets = self.emit_runtime_entry(&entrypoints::GET_THREAD_LOCAL, vec![], f_content, f_context, vm);
        
        rets.pop().unwrap()
    }
    
    fn emit_runtime_entry (&mut self, entry: &RuntimeEntrypoint, args: Vec<P<Value>>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> Vec<P<Value>> {
        let sig = entry.sig.clone();
        
        let entry_name = {
            if vm.is_running() {
                unimplemented!()
            } else {
                let ref entry_loc = entry.aot;
                
                match entry_loc {
                    &ValueLocation::Relocatable(_, ref name) => name.clone(),
                    _ => panic!("expecting a relocatable value")
                }
            }
        };
        
        self.emit_c_call(entry_name, sig, args, None, f_content, f_context, vm)
    }
    
463
    #[allow(unused_variables)]
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
    fn emit_c_call (&mut self, func_name: CName, sig: P<CFuncSig>, args: Vec<P<Value>>, rets: Option<Vec<P<Value>>>, 
        f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> Vec<P<Value>> 
    {
        let mut gpr_arg_count = 0;
        for arg in args.iter() {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
                    unimplemented!()
                }
            } else if arg.is_int_const() {
                if x86_64::is_valid_x86_imm(arg) {                
                    let int_const = arg.extract_int_const() as u32;
                    
                    if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                        self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], int_const);
                        gpr_arg_count += 1;
                    } else {
                        // use stack to pass argument
                        unimplemented!()
                    }
                } else {
                    // put the constant to memory
                    unimplemented!()
                }
            } else {
                // floating point
                unimplemented!()
            }
        }
        
        // make call
        if vm.is_running() {
            unimplemented!()
        } else {
            self.backend.emit_call_near_rel32(func_name);
        }
        
        // deal with ret vals
        let mut return_vals = vec![];
        
        let mut gpr_ret_count = 0;
        for ret_index in 0..sig.ret_tys.len() {
            let ref ty = sig.ret_tys[ret_index];
            
            let ret_val = match rets {
                Some(ref rets) => rets[ret_index].clone(),
                None => {
                    let tmp_node = f_context.make_temporary(vm.next_id(), ty.clone());
                    tmp_node.clone_value()
                }
            };
            
            if ret_val.is_int_reg() {
                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&ret_val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                    gpr_ret_count += 1;
                } else {
                    // get return value by stack
                    unimplemented!()
                }
            } else {
                // floating point register
                unimplemented!()
            }
            
            return_vals.push(ret_val);            
        }
        
        return_vals
    }
    
    #[allow(unused_variables)]
    fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
541
542
        for i in 0..dest.args.len() {
            let ref dest_arg = dest.args[i];
543
544
            match dest_arg {
                &DestArg::Normal(op_index) => {
qinsoon's avatar
qinsoon committed
545
                    let ref arg = ops[op_index];
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
//                    match arg.op {
//                        OpCode::RegI64 
//                        | OpCode::RegFP
//                        | OpCode::IntImmI64
//                        | OpCode::FPImm => {
//                            // do nothing
//                        },
//                        _ => {
//                            trace!("nested: compute arg for branch");
//                            // nested: compute arg
//                            self.instruction_select(arg, cur_func);
//                            
//                            self.emit_get_result(arg);
//                        }
//                    }
//                    
562
                    let ref target_args = f_content.get_block(dest.target).content.as_ref().unwrap().args;
563
564
                    let ref target_arg = target_args[i];
                    
565
                    self.emit_general_move(&arg, target_arg, f_content, f_context, vm);
566
567
568
569
                },
                &DestArg::Freshbound(_) => unimplemented!()
            }
        }
qinsoon's avatar
qinsoon committed
570
571
    }
    
572
    fn emit_common_prologue(&mut self, args: &Vec<P<Value>>) {
573
574
        let block_name = "prologue".to_string();
        self.backend.start_block(block_name.clone());
575
576
577
        
        // no livein
        // liveout = entry block's args
578
579
        self.backend.set_block_livein(block_name.clone(), &vec![]);
        self.backend.set_block_liveout(block_name.clone(), args);
qinsoon's avatar
qinsoon committed
580
        
581
582
583
        // push rbp
        self.backend.emit_push_r64(&x86_64::RBP);
        // mov rsp -> rbp
qinsoon's avatar
qinsoon committed
584
        self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
585
        
586
        // push all callee-saved registers
587
588
589
590
591
592
        for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            // not pushing rbp (as we have done taht)
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_push_r64(&reg);
            }
593
594
595
596
        }
        
        // unload arguments
        let mut gpr_arg_count = 0;
597
        // TODO: let mut fpr_arg_count = 0;
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
        for arg in args {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&arg, &x86_64::ARGUMENT_GPRs[gpr_arg_count]);
                    gpr_arg_count += 1;
                } else {
                    // unload from stack
                    unimplemented!();
                }
            } else if arg.is_fp_reg() {
                unimplemented!();
            } else {
                panic!("expect an arg value to be either int reg or fp reg");
            }
        }
613
614
        
        self.backend.end_block(block_name);
615
616
    }
    
617
    fn emit_common_epilogue(&mut self, ret_inst: &Instruction, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
618
619
        // epilogue is not a block (its a few instruction inserted before return)
        // FIXME: this may change in the future
620
        
621
        // prepare return regs
qinsoon's avatar
qinsoon committed
622
        let ref ops = ret_inst.ops.read().unwrap();
623
624
625
626
627
628
        let ret_val_indices = match ret_inst.v {
            Instruction_::Return(ref vals) => vals,
            _ => panic!("expected ret inst")
        };
        
        let mut gpr_ret_count = 0;
629
        // TODO: let mut fpr_ret_count = 0;
630
631
632
        for i in ret_val_indices {
            let ref ret_val = ops[*i];
            if self.match_ireg(ret_val) {
633
                let reg_ret_val = self.emit_ireg(ret_val, f_content, f_context, vm);
634
635
636
637
638
639
640
641
642
643
644
                
                self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
                gpr_ret_count += 1;
            } else if self.match_iimm(ret_val) {
                let imm_ret_val = self.emit_get_iimm(ret_val);
                
                self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
                gpr_ret_count += 1;
            } else {
                unimplemented!();
            }
645
646
647
648
649
650
651
652
        }        
        
        // pop all callee-saved registers - reverse order
        for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_pop_r64(&reg);
            }
653
        }
654
655
656
        
        // pop rbp
        self.backend.emit_pop_r64(&x86_64::RBP);
657
658
    }
    
qinsoon's avatar
qinsoon committed
659
660
661
662
663
664
665
666
667
668
669
670
    fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
                    Instruction_::CmpOp(_, _, _) => true,
                    _ => false
                }
            }
            TreeNode_::Value(_) => false
        }
    }
    
671
    fn emit_cmp_res(&mut self, cond: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
qinsoon's avatar
qinsoon committed
672
673
        match cond.v {
            TreeNode_::Instruction(ref inst) => {
qinsoon's avatar
qinsoon committed
674
                let ops = inst.ops.read().unwrap();                
qinsoon's avatar
qinsoon committed
675
676
677
678
679
680
681
682
                
                match inst.v {
                    Instruction_::CmpOp(op, op1, op2) => {
                        let op1 = &ops[op1];
                        let op2 = &ops[op2];
                        
                        if op::is_int_cmp(op) {                        
                            if self.match_ireg(op1) && self.match_ireg(op2) {
683
684
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
685
686
687
                                
                                self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
                            } else if self.match_ireg(op1) && self.match_iimm(op2) {
688
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
                                let iimm_op2 = self.emit_get_iimm(op2);
                                
                                self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
                            } else {
                                unimplemented!()
                            }
                        } else {
                            unimplemented!()
                        }
                        
                        op
                    }
                    
                    _ => panic!("expect cmp res to emit")
                }
            }
            _ => panic!("expect cmp res to emit")
        }
    }    
    
qinsoon's avatar
qinsoon committed
709
    fn match_ireg(&mut self, op: &TreeNode) -> bool {
qinsoon's avatar
qinsoon committed
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        return false;
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    if types::is_scalar(&value.ty) {
                        true
                    } else {
                        false
                    }
                } else {
                    false
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.is_int_reg()
            }
        }
    }
    
735
    fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
qinsoon's avatar
qinsoon committed
736
737
        match op.v {
            TreeNode_::Instruction(_) => {
738
                self.instruction_select(op, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
739
740
741
742
743
                
                self.emit_get_result(op)
            },
            TreeNode_::Value(ref pv) => {
                match pv.v {
744
                    Value_::Constant(_)
745
                    | Value_::Global(_)
746
                    | Value_::Memory(_) => panic!("expected ireg"),
qinsoon's avatar
qinsoon committed
747
748
                    Value_::SSAVar(_) => {
                        pv.clone()
qinsoon's avatar
qinsoon committed
749
                    },
qinsoon's avatar
qinsoon committed
750
751
752
753
754
                }
            }
        }
    }
    
755
    #[allow(unused_variables)]
756
757
758
759
    fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
    fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
            _ => false
        }
    }
    
    fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> u32 {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::Int(val)) => {
                        val as u32
                    },
                    _ => panic!("expected iimm")
                }
            },
            _ => panic!("expected iimm")
        }
    }
    
qinsoon's avatar
qinsoon committed
781
    fn emit_get_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
782
783
784
785
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::SSAVar(_) => P(Value{
786
                        hdr: MuEntityHeader::unnamed(vm.next_id()),
787
788
789
790
791
792
793
794
                        ty: types::get_referent_ty(& pv.ty).unwrap(),
                        v: Value_::Memory(MemoryLocation::Address{
                            base: pv.clone(),
                            offset: None,
                            index: None,
                            scale: None
                        })
                    }),
795
                    Value_::Global(_) => {
796
797
798
799
800
801
                        if vm.is_running() {
                            // get address from vm
                            unimplemented!()
                        } else {
                            // symbolic
                            P(Value{
802
                                hdr: MuEntityHeader::unnamed(vm.next_id()),
803
804
805
                                ty: types::get_referent_ty(&pv.ty).unwrap(),
                                v: Value_::Memory(MemoryLocation::Symbolic{
                                    base: Some(x86_64::RIP.clone()),
806
                                    label: pv.name().unwrap()
807
808
809
810
811
812
813
814
815
816
817
818
                                })
                            })
                        }
                    },
                    Value_::Memory(_) => pv.clone(),
                    Value_::Constant(_) => unimplemented!()
                }
            }
            TreeNode_::Instruction(_) => unimplemented!()
        }
    }
    
819
820
821
822
823
824
825
826
827
828
829
830
831
    fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::FuncRef(_)) => true,
                    Value_::Constant(Constant::UFuncRef(_)) => true,
                    _ => false
                }
            },
            _ => false 
        }
    }
    
qinsoon's avatar
qinsoon committed
832
    fn emit_get_funcref_const(&mut self, op: &P<TreeNode>) -> MuID {
833
834
835
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
qinsoon's avatar
qinsoon committed
836
837
                    Value_::Constant(Constant::FuncRef(id))
                    | Value_::Constant(Constant::UFuncRef(id)) => id,
838
839
840
841
842
843
844
                    _ => panic!("expected a (u)funcref const")
                }
            },
            _ => panic!("expected a (u)funcref const")
        }
    }
    
845
    #[allow(unused_variables)]
846
847
848
849
    fn match_mem(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
850
    #[allow(unused_variables)]
851
852
853
854
    fn emit_mem(&mut self, op: &P<TreeNode>) -> P<Value> {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
855
    fn emit_get_result(&mut self, node: &TreeNode) -> P<Value> {
qinsoon's avatar
qinsoon committed
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
        match node.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        panic!("expected ONE result from the node {}", node);
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    value.clone()
                } else {
                    panic!("expected result from the node {}", node);
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.clone()
            }
        }
875
876
    }
    
877
    fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
878
879
880
881
        let ref dst_ty = dest.ty;
        
        if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            if self.match_ireg(src) {
882
                let src_reg = self.emit_ireg(src, f_content, f_context, vm);
883
884
885
886
887
888
889
890
891
892
893
894
895
                self.backend.emit_mov_r64_r64(dest, &src_reg);
            } else if self.match_iimm(src) {
                let src_imm = self.emit_get_iimm(src);
                self.backend.emit_mov_r64_imm32(dest, src_imm);
            } else {
                panic!("expected an int type op");
            }
        } else if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            unimplemented!()
        } else {
            panic!("unexpected type for move");
        } 
    }
896
}
897

898
899
900
impl CompilerPass for InstructionSelection {
    fn name(&self) -> &'static str {
        self.name
901
    }
902

903
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
904
    fn start_function(&mut self, vm: &VM, func_ver: &mut MuFunctionVersion) {
905
        debug!("{}", self.name());
qinsoon's avatar
qinsoon committed
906
        
qinsoon's avatar
qinsoon committed
907
        let funcs = vm.funcs().read().unwrap();
qinsoon's avatar
qinsoon committed
908
        let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
909
        self.backend.start_code(func.name().unwrap());
qinsoon's avatar
qinsoon committed
910
911
        
        // prologue (get arguments from entry block first)        
qinsoon's avatar
qinsoon committed
912
        let entry_block = func_ver.content.as_ref().unwrap().get_entry_block();
913
914
        let ref args = entry_block.content.as_ref().unwrap().args;
        self.emit_common_prologue(args);
915
916
917
    }

    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
918
    fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
919
920
        let f_content = func.content.as_ref().unwrap();
        
qinsoon's avatar
qinsoon committed
921
        for block_id in func.block_trace.as_ref().unwrap() {
922
            let block = f_content.get_block(*block_id);
923
            let block_label = block.name().unwrap();
qinsoon's avatar
qinsoon committed
924
            
925
            self.backend.start_block(block_label.clone());
926

927
            let block_content = block.content.as_ref().unwrap();
928
929
            
            // live in is args of the block
930
            self.backend.set_block_livein(block_label.clone(), &block_content.args);
931
932
933
            
            // live out is the union of all branch args of this block
            let live_out = block_content.get_out_arguments();
934
            self.backend.set_block_liveout(block_label.clone(), &live_out);
935

936
            for inst in block_content.body.iter() {
937
                self.instruction_select(&inst, f_content, &mut func.context, vm);
938
            }
939
            
qinsoon's avatar
qinsoon committed
940
            self.backend.end_block(block_label);
941
942
        }
    }
qinsoon's avatar
qinsoon committed
943
944
    
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
945
    fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
946
947
        self.backend.print_cur_code();
        
948
949
        let mc = self.backend.finish_code();
        let compiled_func = CompiledFunction {
qinsoon's avatar
qinsoon committed
950
            func_id: func.func_id,
951
            func_ver_id: func.id(),
952
            temps: HashMap::new(),
953
954
955
            mc: mc
        };
        
qinsoon's avatar
qinsoon committed
956
        vm.add_compiled_func(compiled_func);
qinsoon's avatar
qinsoon committed
957
    }
958
}