To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

inst_sel.rs 59.9 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::*;
4
use ast::op;
qinsoon's avatar
qinsoon committed
5
use ast::op::OpCode;
qinsoon's avatar
qinsoon committed
6
use ast::types;
qinsoon's avatar
qinsoon committed
7
use ast::types::*;
qinsoon's avatar
qinsoon committed
8
use vm::VM;
qinsoon's avatar
qinsoon committed
9
use runtime::mm;
10
11
12
13
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint;
14
15

use compiler::CompilerPass;
16
use compiler::backend;
qinsoon's avatar
qinsoon committed
17
18
19
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
qinsoon's avatar
qinsoon committed
20
21
use compiler::machine_code::CompiledFunction;
use compiler::frame::Frame;
22

23
24
use std::collections::HashMap;

25
pub struct InstructionSelection {
26
    name: &'static str,
27
28
    backend: Box<CodeGenerator>,
    
qinsoon's avatar
qinsoon committed
29
    current_callsite_id: usize,
qinsoon's avatar
qinsoon committed
30
31
    current_frame: Option<Frame>,
    current_block: Option<MuName>,
qinsoon's avatar
qinsoon committed
32
33
34
35
36
    current_func_start: Option<ValueLocation>,
    // key: block id, val: callsite that names the block as exception block
    current_exn_callsites: HashMap<MuID, Vec<ValueLocation>>,
    // key: block id, val: block location
    current_exn_blocks: HashMap<MuID, ValueLocation>     
37
38
}

39
impl <'a> InstructionSelection {
40
    pub fn new() -> InstructionSelection {
41
42
        InstructionSelection{
            name: "Instruction Selection (x64)",
43
            backend: Box::new(ASMCodeGen::new()),
qinsoon's avatar
qinsoon committed
44
            
qinsoon's avatar
qinsoon committed
45
            current_callsite_id: 0,
qinsoon's avatar
qinsoon committed
46
47
48
            current_frame: None,
            current_block: None,
            current_func_start: None,
qinsoon's avatar
qinsoon committed
49
50
51
            // key: block id, val: callsite that names the block as exception block
            current_exn_callsites: HashMap::new(), 
            current_exn_blocks: HashMap::new()
52
53
54
55
56
57
58
59
60
        }
    }
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
61
    fn instruction_select(&mut self, node: &'a TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
qinsoon's avatar
qinsoon committed
62
63
64
        trace!("instsel on node {}", node);
        
        match node.v {
65
66
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
67
68
69
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
                        // move this to trace generation
                        // assert here
70
71
72
73
74
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
75
                            }
76
                        };
77
                        
qinsoon's avatar
qinsoon committed
78
                        let ops = inst.ops.read().unwrap();
79
                        
80
81
                        self.process_dest(&ops, fallthrough_dest, f_content, f_context, vm);
                        self.process_dest(&ops, branch_dest, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
82
                        
83
                        let branch_target = f_content.get_block(branch_dest.target).name().unwrap();
84
85
86
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
87
88
                        if self.match_cmp_res(cond) {
                            trace!("emit cmp_eq-branch2");
89
                            match self.emit_cmp_res(cond, f_content, f_context, vm) {
qinsoon's avatar
qinsoon committed
90
91
92
93
94
95
96
97
98
99
                                op::CmpOp::EQ => self.backend.emit_je(branch_target),
                                op::CmpOp::NE => self.backend.emit_jne(branch_target),
                                op::CmpOp::UGE => self.backend.emit_jae(branch_target),
                                op::CmpOp::UGT => self.backend.emit_ja(branch_target),
                                op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
                                op::CmpOp::ULT => self.backend.emit_jb(branch_target),
                                op::CmpOp::SGE => self.backend.emit_jge(branch_target),
                                op::CmpOp::SGT => self.backend.emit_jg(branch_target),
                                op::CmpOp::SLE => self.backend.emit_jle(branch_target),
                                op::CmpOp::SLT => self.backend.emit_jl(branch_target),
qinsoon's avatar
qinsoon committed
100
101
102
103
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
104
                            
105
                            let cond_reg = self.emit_ireg(cond, f_content, f_context, vm);
106
                            
qinsoon's avatar
qinsoon committed
107
108
109
                            // emit: cmp cond_reg 1
                            self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
                            // emit: je #branch_dest
qinsoon's avatar
qinsoon committed
110
                            self.backend.emit_je(branch_target);                            
qinsoon's avatar
qinsoon committed
111
112
                        } else {
                            unimplemented!();
113
                        }
114
115
                    },
                    
qinsoon's avatar
qinsoon committed
116
                    Instruction_::Branch1(ref dest) => {
qinsoon's avatar
qinsoon committed
117
                        let ops = inst.ops.read().unwrap();
118
                                            
119
                        self.process_dest(&ops, dest, f_content, f_context, vm);
120
                        
121
                        let target = f_content.get_block(dest.target).name().unwrap();
qinsoon's avatar
qinsoon committed
122
                        
qinsoon's avatar
qinsoon committed
123
                        trace!("emit branch1");
124
                        // jmp
qinsoon's avatar
qinsoon committed
125
                        self.backend.emit_jmp(target);
126
127
                    },
                    
qinsoon's avatar
qinsoon committed
128
                    Instruction_::ExprCall{ref data, is_abort} => {
qinsoon's avatar
qinsoon committed
129
130
                        if is_abort {
                            unimplemented!()
131
                        }
132
                        
qinsoon's avatar
qinsoon committed
133
134
135
136
137
138
                        self.emit_mu_call(
                            inst, // inst: &Instruction,
                            data, // calldata: &CallData,
                            None, // resumption: Option<&ResumptionData>,
                            node, // cur_node: &TreeNode, 
                            f_content, f_context, vm);                         
139
140
                    },
                    
qinsoon's avatar
qinsoon committed
141
142
143
144
145
146
147
148
149
                    Instruction_::Call{ref data, ref resume} => {
                        self.emit_mu_call(
                            inst, 
                            data, 
                            Some(resume), 
                            node, 
                            f_content, f_context, vm);
                    }
                    
150
                    Instruction_::Return(_) => {
151
                        self.emit_common_epilogue(inst, f_content, f_context, vm);
152
                        
qinsoon's avatar
qinsoon committed
153
                        self.backend.emit_ret();
154
155
                    },
                    
qinsoon's avatar
qinsoon committed
156
                    Instruction_::BinOp(op, op1, op2) => {
qinsoon's avatar
qinsoon committed
157
                        let ops = inst.ops.read().unwrap();
qinsoon's avatar
qinsoon committed
158
                        
159
160
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
161
162
163
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");
                                    
164
165
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
166
167
168
169
170
171
172
173
174
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit add-ireg-imm");
                                    
175
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
176
                                    let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
qinsoon's avatar
qinsoon committed
177
178
179
180
181
182
183
184
185
186
187
188
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
                                    self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
189
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
190
191
192
193
194
195
196
197
198
199
200
201
202
                                    let reg_op2 = self.emit_mem(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
203
204
                            },
                            op::BinOp::Sub => {
qinsoon's avatar
qinsoon committed
205
206
207
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
                                    
208
209
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
210
211
212
213
214
215
216
217
218
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit sub-ireg-imm");

219
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
220
                                    let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
qinsoon's avatar
qinsoon committed
221
222
223
224
225
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
226
                                    self.backend.emit_sub_r64_imm32(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
227
228
229
230
231
232
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
233
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
234
                                    let mem_op2 = self.emit_mem(&ops[op2]);
qinsoon's avatar
qinsoon committed
235
236
237
238
239
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // sub op2 res
240
                                    self.backend.emit_sub_r64_mem64(&res_tmp, &mem_op2);
qinsoon's avatar
qinsoon committed
241
242
243
244
245
246
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
247
248
                            },
                            op::BinOp::Mul => {
249
250
251
252
                                // mov op1 -> rax
                                let rax = x86_64::RAX.clone();
                                let op1 = &ops[op1];
                                if self.match_ireg(op1) {
253
                                    let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
254
255
256
                                    
                                    self.backend.emit_mov_r64_r64(&rax, &reg_op1);
                                } else if self.match_iimm(op1) {
257
                                    let imm_op1 = self.node_iimm_to_i32(op1);
258
259
260
261
262
263
264
265
266
267
268
269
270
                                    
                                    self.backend.emit_mov_r64_imm32(&rax, imm_op1);
                                } else if self.match_mem(op1) {
                                    let mem_op1 = self.emit_mem(op1);
                                    
                                    self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mul op2 -> rax
                                let op2 = &ops[op2];
                                if self.match_ireg(op2) {
271
                                    let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
272
273
274
                                    
                                    self.backend.emit_mul_r64(&reg_op2);
                                } else if self.match_iimm(op2) {
275
                                    let imm_op2 = self.node_iimm_to_i32(op2);
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
                                    let res_tmp = self.emit_get_result(node);
                                    self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
                                    
                                    self.backend.emit_mul_r64(&res_tmp);
                                } else if self.match_mem(op2) {
                                    let mem_op2 = self.emit_mem(op2);
                                    
                                    self.backend.emit_mul_mem64(&mem_op2);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
                                let res_tmp = self.emit_get_result(node);
                                self.backend.emit_mov_r64_r64(&res_tmp, &rax);
294
295
296
                            },
                            
                            _ => unimplemented!()
297
298
                        }
                    }
299
                    
300
301
                    // load on x64 generates mov inst (no matter what order is specified)
                    // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
302
                    Instruction_::Load{is_ptr, order, mem_loc} => {
qinsoon's avatar
qinsoon committed
303
                        let ops = inst.ops.read().unwrap();
304
                        let ref loc_op = ops[mem_loc];
305
306
307
308
309
310
311
312
313
                        
                        // check order
                        match order {
                            MemoryOrder::Relaxed 
                            | MemoryOrder::Consume 
                            | MemoryOrder::Acquire
                            | MemoryOrder::SeqCst => {},
                            _ => panic!("didnt expect order {:?} with store inst", order)
                        }                        
314

315
                        let resolved_loc = self.node_mem_to_value(loc_op, vm);
316
317
318
319
                        let res_temp = self.emit_get_result(node);
                        
                        if self.match_ireg(node) {
                            // emit mov(GPR)
320
321
322
323
324
325
326
327
                            self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
                    
                    Instruction_::Store{is_ptr, order, mem_loc, value} => {
qinsoon's avatar
qinsoon committed
328
                        let ops = inst.ops.read().unwrap();
329
330
331
332
333
334
335
336
337
338
339
                        let ref loc_op = ops[mem_loc];
                        let ref val_op = ops[value];
                        
                        let generate_plain_mov : bool = {
                            match order {
                                MemoryOrder::Relaxed | MemoryOrder::Release => true,
                                MemoryOrder::SeqCst => false,
                                _ => panic!("didnt expect order {:?} with store inst", order)
                            }
                        };
                        
340
                        let resolved_loc = self.node_mem_to_value(loc_op, vm);
341
342
                        
                        if self.match_ireg(val_op) {
343
                            let val = self.emit_ireg(val_op, f_content, f_context, vm);
344
345
346
347
348
349
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
                            } else {
                                unimplemented!()
                            }
                        } else if self.match_iimm(val_op) {
350
                            let val = self.node_iimm_to_i32(val_op);
351
352
353
354
355
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
                            } else {
                                unimplemented!()
                            }
356
357
358
359
360
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
361
                    
362
363
364
365
366
367
                    Instruction_::GetIRef(op_index) => {
                        let ops = inst.ops.read().unwrap();
                        
                        let ref op = ops[op_index];
                        let res_tmp = self.emit_get_result(node);
                        
368
369
370
371
372
373
                        let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
                        if hdr_size == 0 {
                            self.emit_general_move(&op, &res_tmp, f_content, f_context, vm);
                        } else {
                            self.emit_lea_base_offset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
                        }
374
375
                    }
                    
376
                    Instruction_::ThreadExit => {
377
                        // emit a call to swap_back_to_native_stack(sp_loc: Address)
378
379
                        
                        // get thread local and add offset to get sp_loc
qinsoon's avatar
qinsoon committed
380
                        let tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
381
                        self.backend.emit_add_r64_imm32(&tl, *thread::NATIVE_SP_LOC_OFFSET as i32);
382
                        
qinsoon's avatar
qinsoon committed
383
                        self.emit_runtime_entry(&entrypoints::SWAP_BACK_TO_NATIVE_STACK, vec![tl.clone()], None, Some(node), f_content, f_context, vm);
384
                    }
qinsoon's avatar
qinsoon committed
385
386
387
                    
                    Instruction_::New(ref ty) => {
                        let ty_info = vm.get_backend_type_info(ty.id());
388
389
                        let ty_size = ty_info.size;
                        let ty_align= ty_info.alignment;
qinsoon's avatar
qinsoon committed
390
                        
391
                        if ty_size > mm::LARGE_OBJECT_THRESHOLD {
qinsoon's avatar
qinsoon committed
392
393
394
                            // emit large object allocation
                            unimplemented!()
                        } else {
395
396
397
                            // emit immix allocation fast path
                            
                            // ASM: %tl = get_thread_local()
qinsoon's avatar
qinsoon committed
398
                            let tmp_tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
399
400
401
                            
                            // ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
                            let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
qinsoon's avatar
qinsoon committed
402
                            let tmp_cursor = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
403
                            self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
404
405
406
                            
                            // alignup cursor (cursor + align - 1 & !(align - 1))
                            // ASM: lea align-1(%cursor) -> %start
407
                            let align = ty_info.alignment as i32;
qinsoon's avatar
qinsoon committed
408
                            let tmp_start = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
409
                            self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
410
                            // ASM: and %start, !(align-1) -> %start
411
                            self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1));
412
413
414
                            
                            // bump cursor
                            // ASM: lea size(%start) -> %end
qinsoon's avatar
qinsoon committed
415
                            let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
416
                            self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
417
418
419
420
                            
                            // check with limit
                            // ASM: cmp %end, [%tl + allocator_offset + limit_offset]
                            let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
qinsoon's avatar
qinsoon committed
421
                            let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
422
423
424
                            self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
                            
                            // branch to slow path if end > limit
425
                            // ASM: jl alloc_slow
426
                            let slowpath = format!("{}_allocslow", node.id());
427
                            self.backend.emit_jl(slowpath.clone());
428
429
                            
                            // update cursor
430
431
                            // ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
                            self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
                            
                            // put start as result
                            // ASM: mov %start -> %result
                            let tmp_res = self.emit_get_result(node);
                            self.backend.emit_mov_r64_r64(&tmp_res, &tmp_start);
                            
                            // ASM jmp alloc_end
                            let allocend = format!("{}_allocend", node.id());
                            self.backend.emit_jmp(allocend.clone());
                            
                            // finishing current block
                            self.backend.end_block(self.current_block.as_ref().unwrap().clone());
                            
                            // alloc_slow: 
                            // call alloc_slow(size, align) -> %ret
                            // new block (no livein)
                            self.current_block = Some(slowpath.clone());
                            self.backend.start_block(slowpath.clone());
                            self.backend.set_block_livein(slowpath.clone(), &vec![]); 
451
452
453

                            // arg1: allocator address                            
                            let allocator_offset = *thread::ALLOCATOR_OFFSET;
qinsoon's avatar
qinsoon committed
454
                            let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
455
456
                            self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
                            // arg2: size                            
457
                            let const_size = self.make_value_int_const(ty_size as u64, vm);
458
                            // arg3: align
459
                            let const_align= self.make_value_int_const(ty_align as u64, vm);
460
                            
461
462
                            let rets = self.emit_runtime_entry(
                                &entrypoints::ALLOC_SLOW,
463
                                vec![tmp_allocator, const_size, const_align],
464
465
466
                                Some(vec![
                                    tmp_res.clone()
                                ]),
qinsoon's avatar
qinsoon committed
467
                                Some(node), f_content, f_context, vm
468
469
                            );
                            
470
                            // end block (no liveout other than result)
471
472
473
474
475
476
                            self.backend.end_block(slowpath.clone());
                            self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
                            
                            // block: alloc_end
                            self.backend.start_block(allocend.clone());
                            self.current_block = Some(allocend.clone());
qinsoon's avatar
qinsoon committed
477
478
                        }
                    }
qinsoon's avatar
qinsoon committed
479
480
481
482
483
484
485
486
487
                    
                    Instruction_::Throw(op_index) => {
                        let ops = inst.ops.read().unwrap();
                        let ref exception_obj = ops[op_index];
                        
                        self.emit_runtime_entry(
                            &entrypoints::THROW_EXCEPTION, 
                            vec![exception_obj.clone_value()], 
                            None,
qinsoon's avatar
qinsoon committed
488
                            Some(node), f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
489
                    }
490
491
492
493
494
495
    
                    _ => unimplemented!()
                } // main switch
            },
            
            TreeNode_::Value(ref p) => {
496
        
497
498
499
500
            }
        }
    }
    
501
502
503
504
    fn make_temporary(&mut self, f_context: &mut FunctionContext, ty: P<MuType>, vm: &VM) -> P<Value> {
        f_context.make_temporary(vm.next_id(), ty).clone_value()
    }
    
505
    fn make_memory_op_base_offset (&mut self, base: &P<Value>, offset: i32, ty: P<MuType>, vm: &VM) -> P<Value> {
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
        P(Value{
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            ty: ty.clone(),
            v: Value_::Memory(MemoryLocation::Address{
                base: base.clone(),
                offset: Some(self.make_value_int_const(offset as u64, vm)),
                index: None,
                scale: None
            })
        })
    }
    
    fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> {
        P(Value{
            hdr: MuEntityHeader::unnamed(vm.next_id()),
qinsoon's avatar
qinsoon committed
521
            ty: UINT64_TYPE.clone(),
522
523
524
525
            v: Value_::Constant(Constant::Int(val))
        })
    } 
    
526
    fn emit_load_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
527
528
529
530
531
        let mem = self.make_memory_op_base_offset(base, offset, dest.ty.clone(), vm);
        
        self.backend.emit_mov_r64_mem64(dest, &mem);
    }
    
532
    fn emit_store_base_offset (&mut self, base: &P<Value>, offset: i32, src: &P<Value>, vm: &VM) {
533
534
535
536
537
        let mem = self.make_memory_op_base_offset(base, offset, src.ty.clone(), vm);
        
        self.backend.emit_mov_mem64_r64(&mem, src);
    }
    
538
    fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
qinsoon's avatar
qinsoon committed
539
        let mem = self.make_memory_op_base_offset(base, offset, ADDRESS_TYPE.clone(), vm);
540
541
542
543
        
        self.backend.emit_lea_r64(dest, &mem);
    }
    
qinsoon's avatar
qinsoon committed
544
545
    fn emit_get_threadlocal (
        &mut self, 
qinsoon's avatar
qinsoon committed
546
        cur_node: Option<&TreeNode>,
qinsoon's avatar
qinsoon committed
547
548
549
550
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) -> P<Value> {
        let mut rets = self.emit_runtime_entry(&entrypoints::GET_THREAD_LOCAL, vec![], None, cur_node, f_content, f_context, vm);
551
552
553
554
        
        rets.pop().unwrap()
    }
    
555
556
557
558
    // ret: Option<Vec<P<Value>>
    // if ret is Some, return values will put stored in given temporaries
    // otherwise create temporaries
    // always returns result temporaries (given or created)
qinsoon's avatar
qinsoon committed
559
560
561
562
563
    fn emit_runtime_entry (
        &mut self, 
        entry: &RuntimeEntrypoint, 
        args: Vec<P<Value>>, 
        rets: Option<Vec<P<Value>>>,
qinsoon's avatar
qinsoon committed
564
        cur_node: Option<&TreeNode>, 
qinsoon's avatar
qinsoon committed
565
566
567
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) -> Vec<P<Value>> {
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
        let sig = entry.sig.clone();
        
        let entry_name = {
            if vm.is_running() {
                unimplemented!()
            } else {
                let ref entry_loc = entry.aot;
                
                match entry_loc {
                    &ValueLocation::Relocatable(_, ref name) => name.clone(),
                    _ => panic!("expecting a relocatable value")
                }
            }
        };
        
qinsoon's avatar
qinsoon committed
583
        self.emit_c_call(entry_name, sig, args, rets, cur_node, f_content, f_context, vm)
584
585
    }
    
586
587
588
589
    // returns the stack arg offset - we will need this to collapse stack after the call
    fn emit_precall_convention(
        &mut self,
        args: &Vec<P<Value>>, 
590
        vm: &VM) -> usize {
591
592
593
594
595
596
        // if we need to save caller saved regs
        // put it here (since this is fastpath compile, we wont have them)
        
        // put args into registers if we can
        // in the meantime record args that do not fit in registers
        let mut stack_args : Vec<P<Value>> = vec![];        
597
598
599
600
601
602
603
604
        let mut gpr_arg_count = 0;
        for arg in args.iter() {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
605
                    stack_args.push(arg.clone());
606
607
608
                }
            } else if arg.is_int_const() {
                if x86_64::is_valid_x86_imm(arg) {                
609
                    let int_const = arg.extract_int_const() as i32;
610
611
612
613
614
615
                    
                    if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                        self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], int_const);
                        gpr_arg_count += 1;
                    } else {
                        // use stack to pass argument
616
                        stack_args.push(arg.clone());
617
618
619
620
621
                    }
                } else {
                    // put the constant to memory
                    unimplemented!()
                }
622
623
624
625
626
627
            } else if arg.is_mem() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_mem64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
628
                    stack_args.push(arg.clone());
629
                }
630
631
632
633
634
            } else {
                // floating point
                unimplemented!()
            }
        }
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669

        if !stack_args.is_empty() {
            // deal with stack arg, put them on stack
            // in reverse order, i.e. push the rightmost arg first to stack
            stack_args.reverse();

            // "The end of the input argument area shall be aligned on a 16
            // (32, if __m256 is passed on stack) byte boundary." - x86 ABI
            // if we need to special align the args, we do it now
            // (then the args will be put to stack following their regular alignment)
            let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect();
            let (stack_arg_size, _, stack_arg_offsets) = backend::sequetial_layout(&stack_arg_tys, vm);
            let mut stack_arg_size_with_padding = stack_arg_size;
            if stack_arg_size % 16 == 0 {
                // do not need to adjust rsp
            } else if stack_arg_size % 8 == 0 {
                // adjust rsp by -8 (push a random padding value)
                self.backend.emit_push_imm32(0x7777);
                stack_arg_size_with_padding += 8;
            } else {
                panic!("expecting stack arguments to be at least 8-byte aligned, but it has size of {}", stack_arg_size);
            }

            // now, we just put all the args on the stack
            {
                let mut index = 0;
                for arg in stack_args {
                    self.emit_store_base_offset(&x86_64::RSP, - (stack_arg_offsets[index] as i32), &arg, vm);
                    index += 1;
                }

                self.backend.emit_add_r64_imm32(&x86_64::RSP, - (stack_arg_size as i32));
            }

            stack_arg_size_with_padding
670
        } else {
671
            0
672
        }
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
    }

    fn emit_postcall_convention(
        &mut self,
        sig: &P<CFuncSig>,
        rets: &Option<Vec<P<Value>>>,
        precall_stack_arg_size: usize,
        f_context: &mut FunctionContext,
        vm: &VM
    ) -> Vec<P<Value>> {
        // deal with ret vals
        let mut return_vals = vec![];

        let mut gpr_ret_count = 0;
        for ret_index in 0..sig.ret_tys.len() {
            let ref ty = sig.ret_tys[ret_index];

            let ret_val = match rets {
                &Some(ref rets) => rets[ret_index].clone(),
                &None => {
                    let tmp_node = f_context.make_temporary(vm.next_id(), ty.clone());
                    tmp_node.clone_value()
                }
            };

            if ret_val.is_int_reg() {
                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&ret_val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                    gpr_ret_count += 1;
                } else {
                    // get return value by stack
                    unimplemented!()
                }
            } else {
                // floating point register
                unimplemented!()
709
            }
710
711

            return_vals.push(ret_val);
712
        }
713
714
715
716
717
718
719

        // remove stack_args
        if precall_stack_arg_size != 0 {
            self.backend.emit_add_r64_imm32(&x86_64::RSP, precall_stack_arg_size as i32);
        }

        return_vals
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
    }
    
    #[allow(unused_variables)]
    // ret: Option<Vec<P<Value>>
    // if ret is Some, return values will put stored in given temporaries
    // otherwise create temporaries
    // always returns result temporaries (given or created)
    fn emit_c_call (
        &mut self, 
        func_name: CName, 
        sig: P<CFuncSig>, 
        args: Vec<P<Value>>, 
        rets: Option<Vec<P<Value>>>,
        cur_node: Option<&TreeNode>,
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) -> Vec<P<Value>> 
    {
738
        let stack_arg_size = self.emit_precall_convention(&args, vm);
739
        
740
741
742
743
        // make call
        if vm.is_running() {
            unimplemented!()
        } else {
qinsoon's avatar
qinsoon committed
744
            let callsite = self.new_callsite_label(cur_node);
qinsoon's avatar
qinsoon committed
745
746
747
            self.backend.emit_call_near_rel32(callsite, func_name);
            
            // record exception block (CCall may have an exception block)
qinsoon's avatar
qinsoon committed
748
749
750
751
752
            if cur_node.is_some() {
                let cur_node = cur_node.unwrap(); 
                if cur_node.op == OpCode::CCall {
                    unimplemented!()
                }
qinsoon's avatar
qinsoon committed
753
            }
754
755
        }
        
756
        self.emit_postcall_convention(&sig, &rets, stack_arg_size, f_context, vm)
757
758
    }
    
qinsoon's avatar
qinsoon committed
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
    fn emit_mu_call(
        &mut self,
        inst: &Instruction,
        calldata: &CallData,
        resumption: Option<&ResumptionData>,
        cur_node: &TreeNode, 
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) {
        trace!("deal with pre-call convention");
        
        let ops = inst.ops.read().unwrap();
        let ref func = ops[calldata.func];
        let ref func_sig = match func.v {
            TreeNode_::Value(ref pv) => {
                let ty : &MuType = &pv.ty;
                match ty.v {
                    MuType_::FuncRef(ref sig)
                    | MuType_::UFuncPtr(ref sig) => sig,
                    _ => panic!("expected funcref/ptr type")
                }
            },
            _ => panic!("expected funcref/ptr type")
        };
        
784
        debug_assert!(func_sig.arg_tys.len() == calldata.args.len());
qinsoon's avatar
qinsoon committed
785
786
        if cfg!(debug_assertions) {
            if inst.value.is_some() {
787
                assert!(func_sig.ret_tys.len() == inst.value.as_ref().unwrap().len());
qinsoon's avatar
qinsoon committed
788
            } else {
789
                assert!(func_sig.ret_tys.len() == 0, "expect call inst's value doesnt match reg args. value: {:?}, ret args: {:?}", inst.value, func_sig.ret_tys);
qinsoon's avatar
qinsoon committed
790
791
            }
        }
792

793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
        // prepare args (they could be instructions, we need to emit inst and get value)
        let mut arg_values = vec![];
        for arg_index in calldata.args.iter() {
            let ref arg = ops[*arg_index];

            if self.match_ireg(arg) {
                let arg = self.emit_ireg(arg, f_content, f_context, vm);
                arg_values.push(arg);
            } else if self.match_iimm(arg) {
                let arg = self.node_iimm_to_value(arg);
                arg_values.push(arg);
            } else {
                unimplemented!();
            }
        }
        let stack_arg_size = self.emit_precall_convention(&arg_values, vm);
qinsoon's avatar
qinsoon committed
809
        
810
        trace!("generating call inst");
qinsoon's avatar
qinsoon committed
811
812
813
        // check direct call or indirect
        let callsite = {
            if self.match_funcref_const(func) {
814
                let target_id = self.node_funcref_const_to_id(func);
qinsoon's avatar
qinsoon committed
815
816
817
818
819
820
                let funcs = vm.funcs().read().unwrap();
                let target = funcs.get(&target_id).unwrap().read().unwrap();
                                            
                if vm.is_running() {
                    unimplemented!()
                } else {
qinsoon's avatar
qinsoon committed
821
                    let callsite = self.new_callsite_label(Some(cur_node));
qinsoon's avatar
qinsoon committed
822
823
824
825
826
                    self.backend.emit_call_near_rel32(callsite, target.name().unwrap())
                }
            } else if self.match_ireg(func) {
                let target = self.emit_ireg(func, f_content, f_context, vm);
                
qinsoon's avatar
qinsoon committed
827
                let callsite = self.new_callsite_label(Some(cur_node));
qinsoon's avatar
qinsoon committed
828
829
830
831
                self.backend.emit_call_near_r64(callsite, &target)
            } else if self.match_mem(func) {
                let target = self.emit_mem(func);
                
qinsoon's avatar
qinsoon committed
832
                let callsite = self.new_callsite_label(Some(cur_node));
qinsoon's avatar
qinsoon committed
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
                self.backend.emit_call_near_mem64(callsite, &target)
            } else {
                unimplemented!()
            }
        };
        
        // record exception branch
        if resumption.is_some() {
            let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
            let target_block = exn_dest.target;
            
            if self.current_exn_callsites.contains_key(&target_block) {
                let callsites = self.current_exn_callsites.get_mut(&target_block).unwrap();
                callsites.push(callsite);
            } else {
                let mut callsites = vec![];
                callsites.push(callsite);
                self.current_exn_callsites.insert(target_block, callsites);
            } 
        }
        
        // deal with ret vals
855
856
857
        self.emit_postcall_convention(
            &func_sig, &inst.value,
            stack_arg_size, f_context, vm);
qinsoon's avatar
qinsoon committed
858
859
    }
    
860
861
    #[allow(unused_variables)]
    fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
862
863
        for i in 0..dest.args.len() {
            let ref dest_arg = dest.args[i];
864
865
            match dest_arg {
                &DestArg::Normal(op_index) => {
qinsoon's avatar
qinsoon committed
866
                    let ref arg = ops[op_index];
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
//                    match arg.op {
//                        OpCode::RegI64 
//                        | OpCode::RegFP
//                        | OpCode::IntImmI64
//                        | OpCode::FPImm => {
//                            // do nothing
//                        },
//                        _ => {
//                            trace!("nested: compute arg for branch");
//                            // nested: compute arg
//                            self.instruction_select(arg, cur_func);
//                            
//                            self.emit_get_result(arg);
//                        }
//                    }
//                    
883
                    let ref target_args = f_content.get_block(dest.target).content.as_ref().unwrap().args;
884
885
                    let ref target_arg = target_args[i];
                    
886
                    self.emit_general_move(&arg, target_arg, f_content, f_context, vm);
887
888
889
890
                },
                &DestArg::Freshbound(_) => unimplemented!()
            }
        }
qinsoon's avatar
qinsoon committed
891
892
    }
    
qinsoon's avatar
qinsoon committed
893
    fn emit_common_prologue(&mut self, args: &Vec<P<Value>>, vm: &VM) {
894
895
        let block_name = "prologue".to_string();
        self.backend.start_block(block_name.clone());
896
897
898
        
        // no livein
        // liveout = entry block's args
899
900
        self.backend.set_block_livein(block_name.clone(), &vec![]);
        self.backend.set_block_liveout(block_name.clone(), args);
qinsoon's avatar
qinsoon committed
901
        
902
903
904
        // push rbp
        self.backend.emit_push_r64(&x86_64::RBP);
        // mov rsp -> rbp
qinsoon's avatar
qinsoon committed
905
        self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
906
        
907
        // push all callee-saved registers
qinsoon's avatar
qinsoon committed
908
909
910
911
912
913
914
915
916
        {
            let frame = self.current_frame.as_mut().unwrap();
            for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
                let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
                // not pushing rbp (as we have done taht)
                if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                    self.backend.emit_push_r64(&reg);
                    frame.alloc_slot_for_callee_saved_reg(reg.clone(), vm);
                }
917
            }
918
919
920
921
        }
        
        // unload arguments
        let mut gpr_arg_count = 0;
922
        // TODO: let mut fpr_arg_count = 0;
923
924
925
926
927
        // initial stack arg is at RBP+16
        //   arg           <- RBP + 16
        //   return addr
        //   old RBP       <- RBP
        let mut stack_arg_offset : i32 = 16;
928
929
930
931
932
933
934
        for arg in args {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&arg, &x86_64::ARGUMENT_GPRs[gpr_arg_count]);
                    gpr_arg_count += 1;
                } else {
                    // unload from stack
935
936
937
938
939
                    self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
                    
                    // move stack_arg_offset by the size of 'arg'
                    let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
                    stack_arg_offset += arg_size as i32;
940
941
942
943
                }
            } else if arg.is_fp_reg() {
                unimplemented!();
            } else {
944
945
                // args that are not fp or int (possibly struct/array/etc)
                unimplemented!();
946
947
            }
        }
948
949
        
        self.backend.end_block(block_name);
950
951
    }
    
952
    fn emit_common_epilogue(&mut self, ret_inst: &Instruction, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
953
954
        // epilogue is not a block (its a few instruction inserted before return)
        // FIXME: this may change in the future
955
        
956
        // prepare return regs
qinsoon's avatar
qinsoon committed
957
        let ref ops = ret_inst.ops.read().unwrap();
958
959
960
961
962
963
        let ret_val_indices = match ret_inst.v {
            Instruction_::Return(ref vals) => vals,
            _ => panic!("expected ret inst")
        };
        
        let mut gpr_ret_count = 0;
964
        // TODO: let mut fpr_ret_count = 0;
965
966
967
        for i in ret_val_indices {
            let ref ret_val = ops[*i];
            if self.match_ireg(ret_val) {
968
                let reg_ret_val = self.emit_ireg(ret_val, f_content, f_context, vm);
969
970
971
972
                
                self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
                gpr_ret_count += 1;
            } else if self.match_iimm(ret_val) {
973
                let imm_ret_val = self.node_iimm_to_i32(ret_val);
974
975
976
977
978
979
                
                self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
                gpr_ret_count += 1;
            } else {
                unimplemented!();
            }
980
981
982
983
984
985
986
987
        }        
        
        // pop all callee-saved registers - reverse order
        for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_pop_r64(&reg);
            }
988
        }
989
990
991
        
        // pop rbp
        self.backend.emit_pop_r64(&x86_64::RBP);
992
993
    }
    
qinsoon's avatar
qinsoon committed
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
    fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
                    Instruction_::CmpOp(_, _, _) => true,
                    _ => false
                }
            }
            TreeNode_::Value(_) => false
        }
    }
    
1006
    fn emit_cmp_res(&mut self, cond: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
qinsoon's avatar
qinsoon committed
1007
1008
        match cond.v {
            TreeNode_::Instruction(ref inst) => {
qinsoon's avatar
qinsoon committed
1009
                let ops = inst.ops.read().unwrap();                
qinsoon's avatar
qinsoon committed
1010
1011
1012
1013
1014
1015
1016
1017
                
                match inst.v {
                    Instruction_::CmpOp(op, op1, op2) => {
                        let op1 = &ops[op1];
                        let op2 = &ops[op2];
                        
                        if op::is_int_cmp(op) {                        
                            if self.match_ireg(op1) && self.match_ireg(op2) {
1018
1019
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
1020
1021
1022
                                
                                self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
                            } else if self.match_ireg(op1) && self.match_iimm(op2) {
1023
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
1024
                                let iimm_op2 = self.node_iimm_to_i32(op2);
qinsoon's avatar
qinsoon committed
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
                                
                                self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
                            } else {
                                unimplemented!()
                            }
                        } else {
                            unimplemented!()
                        }
                        
                        op
                    }
                    
                    _ => panic!("expect cmp res to emit")
                }
            }
            _ => panic!("expect cmp res to emit")
        }
    }    
    
qinsoon's avatar
qinsoon committed
1044
    fn match_ireg(&mut self, op: &TreeNode) -> bool {
qinsoon's avatar
qinsoon committed
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        return false;
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    if types::is_scalar(&value.ty) {
                        true
                    } else {
                        false
                    }
                } else {
                    false
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.is_int_reg()
            }
        }
    }
    
1070
    fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
qinsoon's avatar
qinsoon committed
1071
1072
        match op.v {
            TreeNode_::Instruction(_) => {
1073
                self.instruction_select(op, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
1074
1075
1076
1077
1078
                
                self.emit_get_result(op)
            },
            TreeNode_::Value(ref pv) => {
                match pv.v {
1079
                    Value_::Constant(_)
1080
                    | Value_::Global(_)
1081
                    | Value_::Memory(_) => panic!("expected ireg"),
qinsoon's avatar
qinsoon committed
1082
1083
                    Value_::SSAVar(_) => {
                        pv.clone()
qinsoon's avatar
qinsoon committed
1084
                    },
qinsoon's avatar
qinsoon committed
1085
1086
1087
1088
1089
                }
            }
        }
    }
    
1090
    #[allow(unused_variables)]
1091
1092
1093
1094
    fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
1095
1096
1097
1098
1099
1100
1101
    fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
            _ => false
        }
    }
    
1102
    fn node_iimm_to_i32(&mut self, op: &P<TreeNode>) -> i32 {
qinsoon's avatar
qinsoon committed
1103
1104
1105
1106
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::Int(val)) => {
1107
                        val as i32
qinsoon's avatar
qinsoon committed
1108
1109
1110
1111
1112
1113
1114
                    },
                    _ => panic!("expected iimm")
                }
            },
            _ => panic!("expected iimm")
        }
    }
1115
1116
1117
1118
1119
1120
1121
1122
1123

    fn node_iimm_to_value(&mut self, op: &P<TreeNode>) -> P<Value> {
        match op.v {
            TreeNode_::Value(ref pv) => {
                pv.clone()
            }
            _ => panic!("expected iimm")
        }
    }
qinsoon's avatar
qinsoon committed
1124
    
1125
    fn node_mem_to_value(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
1126
1127
1128
1129
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::SSAVar(_) => P(Value{
1130
                        hdr: MuEntityHeader::unnamed(vm.next_id()),
1131
1132
1133
1134
1135
1136
1137
1138
                        ty: types::get_referent_ty(& pv.ty).unwrap(),
                        v: Value_::Memory(MemoryLocation::Address{
                            base: pv.clone(),
                            offset: None,
                            index: None,
                            scale: None
                        })
                    }),
1139
                    Value_::Global(_) => {
1140
1141
1142
1143
1144
1145
                        if vm.is_running() {
                            // get address from vm
                            unimplemented!()
                        } else {
                            // symbolic
                            P(Value{
1146
                                hdr: MuEntityHeader::unnamed(vm.next_id()),
1147
1148
1149
                                ty: types::get_referent_ty(&pv.ty).unwrap(),
                                v: Value_::Memory(MemoryLocation::Symbolic{
                                    base: Some(x86_64::RIP.clone()),
1150
                                    label: pv.name().unwrap()