To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

inst_sel.rs 59.2 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::*;
4
use ast::op;
qinsoon's avatar
qinsoon committed
5
use ast::op::OpCode;
qinsoon's avatar
qinsoon committed
6
use ast::types;
qinsoon's avatar
qinsoon committed
7
use ast::types::*;
qinsoon's avatar
qinsoon committed
8
use vm::VM;
qinsoon's avatar
qinsoon committed
9
use runtime::mm;
10
11
12
13
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint;
14
15

use compiler::CompilerPass;
16
use compiler::backend;
qinsoon's avatar
qinsoon committed
17
18
19
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
qinsoon's avatar
qinsoon committed
20
21
use compiler::machine_code::CompiledFunction;
use compiler::frame::Frame;
22

23
24
use std::collections::HashMap;

25
pub struct InstructionSelection {
26
27
    name: &'static str,
    
28
29
    backend: Box<CodeGenerator>,
    
qinsoon's avatar
qinsoon committed
30
    current_callsite_id: usize,
qinsoon's avatar
qinsoon committed
31
32
    current_frame: Option<Frame>,
    current_block: Option<MuName>,
qinsoon's avatar
qinsoon committed
33
34
35
36
37
    current_func_start: Option<ValueLocation>,
    // key: block id, val: callsite that names the block as exception block
    current_exn_callsites: HashMap<MuID, Vec<ValueLocation>>,
    // key: block id, val: block location
    current_exn_blocks: HashMap<MuID, ValueLocation>     
38
39
}

40
41
42
43
impl <'a> InstructionSelection {
    pub fn new() -> InstructionSelection {
        InstructionSelection{
            name: "Instruction Selection (x64)",
44
            backend: Box::new(ASMCodeGen::new()),
qinsoon's avatar
qinsoon committed
45
            
qinsoon's avatar
qinsoon committed
46
            current_callsite_id: 0,
qinsoon's avatar
qinsoon committed
47
48
49
            current_frame: None,
            current_block: None,
            current_func_start: None,
qinsoon's avatar
qinsoon committed
50
51
52
            // key: block id, val: callsite that names the block as exception block
            current_exn_callsites: HashMap::new(), 
            current_exn_blocks: HashMap::new()
53
54
55
56
57
58
59
60
61
        }
    }
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
62
    fn instruction_select(&mut self, node: &'a TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
qinsoon's avatar
qinsoon committed
63
64
65
        trace!("instsel on node {}", node);
        
        match node.v {
66
67
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
68
69
70
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
                        // move this to trace generation
                        // assert here
71
72
73
74
75
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
76
                            }
77
                        };
78
                        
qinsoon's avatar
qinsoon committed
79
                        let ops = inst.ops.read().unwrap();
80
                        
81
82
                        self.process_dest(&ops, fallthrough_dest, f_content, f_context, vm);
                        self.process_dest(&ops, branch_dest, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
83
                        
84
                        let branch_target = f_content.get_block(branch_dest.target).name().unwrap();
85
86
87
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
88
89
                        if self.match_cmp_res(cond) {
                            trace!("emit cmp_eq-branch2");
90
                            match self.emit_cmp_res(cond, f_content, f_context, vm) {
qinsoon's avatar
qinsoon committed
91
92
93
94
95
96
97
98
99
100
                                op::CmpOp::EQ => self.backend.emit_je(branch_target),
                                op::CmpOp::NE => self.backend.emit_jne(branch_target),
                                op::CmpOp::UGE => self.backend.emit_jae(branch_target),
                                op::CmpOp::UGT => self.backend.emit_ja(branch_target),
                                op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
                                op::CmpOp::ULT => self.backend.emit_jb(branch_target),
                                op::CmpOp::SGE => self.backend.emit_jge(branch_target),
                                op::CmpOp::SGT => self.backend.emit_jg(branch_target),
                                op::CmpOp::SLE => self.backend.emit_jle(branch_target),
                                op::CmpOp::SLT => self.backend.emit_jl(branch_target),
qinsoon's avatar
qinsoon committed
101
102
103
104
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
105
                            
106
                            let cond_reg = self.emit_ireg(cond, f_content, f_context, vm);
107
                            
qinsoon's avatar
qinsoon committed
108
109
110
                            // emit: cmp cond_reg 1
                            self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
                            // emit: je #branch_dest
qinsoon's avatar
qinsoon committed
111
                            self.backend.emit_je(branch_target);                            
qinsoon's avatar
qinsoon committed
112
113
                        } else {
                            unimplemented!();
114
                        }
115
116
                    },
                    
qinsoon's avatar
qinsoon committed
117
                    Instruction_::Branch1(ref dest) => {
qinsoon's avatar
qinsoon committed
118
                        let ops = inst.ops.read().unwrap();
119
                                            
120
                        self.process_dest(&ops, dest, f_content, f_context, vm);
121
                        
122
                        let target = f_content.get_block(dest.target).name().unwrap();
qinsoon's avatar
qinsoon committed
123
                        
qinsoon's avatar
qinsoon committed
124
                        trace!("emit branch1");
125
                        // jmp
qinsoon's avatar
qinsoon committed
126
                        self.backend.emit_jmp(target);
127
128
                    },
                    
qinsoon's avatar
qinsoon committed
129
                    Instruction_::ExprCall{ref data, is_abort} => {
qinsoon's avatar
qinsoon committed
130
131
                        if is_abort {
                            unimplemented!()
132
                        }
133
                        
qinsoon's avatar
qinsoon committed
134
135
136
137
138
139
                        self.emit_mu_call(
                            inst, // inst: &Instruction,
                            data, // calldata: &CallData,
                            None, // resumption: Option<&ResumptionData>,
                            node, // cur_node: &TreeNode, 
                            f_content, f_context, vm);                         
140
141
                    },
                    
qinsoon's avatar
qinsoon committed
142
143
144
145
146
147
148
149
150
                    Instruction_::Call{ref data, ref resume} => {
                        self.emit_mu_call(
                            inst, 
                            data, 
                            Some(resume), 
                            node, 
                            f_content, f_context, vm);
                    }
                    
151
                    Instruction_::Return(_) => {
152
                        self.emit_common_epilogue(inst, f_content, f_context, vm);
153
                        
qinsoon's avatar
qinsoon committed
154
                        self.backend.emit_ret();
155
156
                    },
                    
qinsoon's avatar
qinsoon committed
157
                    Instruction_::BinOp(op, op1, op2) => {
qinsoon's avatar
qinsoon committed
158
                        let ops = inst.ops.read().unwrap();
qinsoon's avatar
qinsoon committed
159
                        
160
161
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
162
163
164
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");
                                    
165
166
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
167
168
169
170
171
172
173
174
175
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit add-ireg-imm");
                                    
176
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
177
178
179
180
181
182
183
184
185
186
187
188
189
                                    let reg_op2 = self.emit_get_iimm(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
                                    self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
190
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
191
192
193
194
195
196
197
198
199
200
201
202
203
                                    let reg_op2 = self.emit_mem(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
204
205
                            },
                            op::BinOp::Sub => {
qinsoon's avatar
qinsoon committed
206
207
208
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
                                    
209
210
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
211
212
213
214
215
216
217
218
219
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit sub-ireg-imm");

220
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
221
                                    let imm_op2 = self.emit_get_iimm(&ops[op2]);
qinsoon's avatar
qinsoon committed
222
223
224
225
226
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
227
                                    self.backend.emit_sub_r64_imm32(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
228
229
230
231
232
233
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
234
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
235
                                    let mem_op2 = self.emit_mem(&ops[op2]);
qinsoon's avatar
qinsoon committed
236
237
238
239
240
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // sub op2 res
241
                                    self.backend.emit_sub_r64_mem64(&res_tmp, &mem_op2);
qinsoon's avatar
qinsoon committed
242
243
244
245
246
247
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
248
249
                            },
                            op::BinOp::Mul => {
250
251
252
253
                                // mov op1 -> rax
                                let rax = x86_64::RAX.clone();
                                let op1 = &ops[op1];
                                if self.match_ireg(op1) {
254
                                    let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
                                    
                                    self.backend.emit_mov_r64_r64(&rax, &reg_op1);
                                } else if self.match_iimm(op1) {
                                    let imm_op1 = self.emit_get_iimm(op1);
                                    
                                    self.backend.emit_mov_r64_imm32(&rax, imm_op1);
                                } else if self.match_mem(op1) {
                                    let mem_op1 = self.emit_mem(op1);
                                    
                                    self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mul op2 -> rax
                                let op2 = &ops[op2];
                                if self.match_ireg(op2) {
272
                                    let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
                                    
                                    self.backend.emit_mul_r64(&reg_op2);
                                } else if self.match_iimm(op2) {
                                    let imm_op2 = self.emit_get_iimm(op2);
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
                                    let res_tmp = self.emit_get_result(node);
                                    self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
                                    
                                    self.backend.emit_mul_r64(&res_tmp);
                                } else if self.match_mem(op2) {
                                    let mem_op2 = self.emit_mem(op2);
                                    
                                    self.backend.emit_mul_mem64(&mem_op2);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
                                let res_tmp = self.emit_get_result(node);
                                self.backend.emit_mov_r64_r64(&res_tmp, &rax);
295
296
297
                            },
                            
                            _ => unimplemented!()
298
299
                        }
                    }
300
                    
301
302
                    // load on x64 generates mov inst (no matter what order is specified)
                    // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
303
                    Instruction_::Load{is_ptr, order, mem_loc} => {
qinsoon's avatar
qinsoon committed
304
                        let ops = inst.ops.read().unwrap();
305
                        let ref loc_op = ops[mem_loc];
306
307
308
309
310
311
312
313
314
                        
                        // check order
                        match order {
                            MemoryOrder::Relaxed 
                            | MemoryOrder::Consume 
                            | MemoryOrder::Acquire
                            | MemoryOrder::SeqCst => {},
                            _ => panic!("didnt expect order {:?} with store inst", order)
                        }                        
315
316
317
318
319
320

                        let resolved_loc = self.emit_get_mem(loc_op, vm);                        
                        let res_temp = self.emit_get_result(node);
                        
                        if self.match_ireg(node) {
                            // emit mov(GPR)
321
322
323
324
325
326
327
328
                            self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
                    
                    Instruction_::Store{is_ptr, order, mem_loc, value} => {
qinsoon's avatar
qinsoon committed
329
                        let ops = inst.ops.read().unwrap();
330
331
332
333
334
335
336
337
338
339
340
341
342
343
                        let ref loc_op = ops[mem_loc];
                        let ref val_op = ops[value];
                        
                        let generate_plain_mov : bool = {
                            match order {
                                MemoryOrder::Relaxed | MemoryOrder::Release => true,
                                MemoryOrder::SeqCst => false,
                                _ => panic!("didnt expect order {:?} with store inst", order)
                            }
                        };
                        
                        let resolved_loc = self.emit_get_mem(loc_op, vm);
                        
                        if self.match_ireg(val_op) {
344
                            let val = self.emit_ireg(val_op, f_content, f_context, vm);
345
346
347
348
349
350
351
352
353
354
355
356
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
                            } else {
                                unimplemented!()
                            }
                        } else if self.match_iimm(val_op) {
                            let val = self.emit_get_iimm(val_op);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
                            } else {
                                unimplemented!()
                            }
357
358
359
360
361
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
362
                    
363
364
365
366
367
368
                    Instruction_::GetIRef(op_index) => {
                        let ops = inst.ops.read().unwrap();
                        
                        let ref op = ops[op_index];
                        let res_tmp = self.emit_get_result(node);
                        
369
370
371
372
373
374
                        let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
                        if hdr_size == 0 {
                            self.emit_general_move(&op, &res_tmp, f_content, f_context, vm);
                        } else {
                            self.emit_lea_base_offset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
                        }
375
376
                    }
                    
377
                    Instruction_::ThreadExit => {
378
                        // emit a call to swap_back_to_native_stack(sp_loc: Address)
379
380
                        
                        // get thread local and add offset to get sp_loc
qinsoon's avatar
qinsoon committed
381
                        let tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
382
                        self.backend.emit_add_r64_imm32(&tl, *thread::NATIVE_SP_LOC_OFFSET as i32);
383
                        
qinsoon's avatar
qinsoon committed
384
                        self.emit_runtime_entry(&entrypoints::SWAP_BACK_TO_NATIVE_STACK, vec![tl.clone()], None, Some(node), f_content, f_context, vm);
385
                    }
qinsoon's avatar
qinsoon committed
386
387
388
                    
                    Instruction_::New(ref ty) => {
                        let ty_info = vm.get_backend_type_info(ty.id());
389
390
                        let ty_size = ty_info.size;
                        let ty_align= ty_info.alignment;
qinsoon's avatar
qinsoon committed
391
                        
392
                        if ty_size > mm::LARGE_OBJECT_THRESHOLD {
qinsoon's avatar
qinsoon committed
393
394
395
                            // emit large object allocation
                            unimplemented!()
                        } else {
396
397
398
                            // emit immix allocation fast path
                            
                            // ASM: %tl = get_thread_local()
qinsoon's avatar
qinsoon committed
399
                            let tmp_tl = self.emit_get_threadlocal(Some(node), f_content, f_context, vm);
400
401
402
                            
                            // ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
                            let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
qinsoon's avatar
qinsoon committed
403
                            let tmp_cursor = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
404
                            self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
405
406
407
                            
                            // alignup cursor (cursor + align - 1 & !(align - 1))
                            // ASM: lea align-1(%cursor) -> %start
408
                            let align = ty_info.alignment as i32;
qinsoon's avatar
qinsoon committed
409
                            let tmp_start = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
410
                            self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
411
                            // ASM: and %start, !(align-1) -> %start
412
                            self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1));
413
414
415
                            
                            // bump cursor
                            // ASM: lea size(%start) -> %end
qinsoon's avatar
qinsoon committed
416
                            let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
417
                            self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
418
419
420
421
                            
                            // check with limit
                            // ASM: cmp %end, [%tl + allocator_offset + limit_offset]
                            let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
qinsoon's avatar
qinsoon committed
422
                            let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
423
424
425
                            self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
                            
                            // branch to slow path if end > limit
426
                            // ASM: jl alloc_slow
427
                            let slowpath = format!("{}_allocslow", node.id());
428
                            self.backend.emit_jl(slowpath.clone());
429
430
                            
                            // update cursor
431
432
                            // ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
                            self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
                            
                            // put start as result
                            // ASM: mov %start -> %result
                            let tmp_res = self.emit_get_result(node);
                            self.backend.emit_mov_r64_r64(&tmp_res, &tmp_start);
                            
                            // ASM jmp alloc_end
                            let allocend = format!("{}_allocend", node.id());
                            self.backend.emit_jmp(allocend.clone());
                            
                            // finishing current block
                            self.backend.end_block(self.current_block.as_ref().unwrap().clone());
                            
                            // alloc_slow: 
                            // call alloc_slow(size, align) -> %ret
                            // new block (no livein)
                            self.current_block = Some(slowpath.clone());
                            self.backend.start_block(slowpath.clone());
                            self.backend.set_block_livein(slowpath.clone(), &vec![]); 
452
453
454

                            // arg1: allocator address                            
                            let allocator_offset = *thread::ALLOCATOR_OFFSET;
qinsoon's avatar
qinsoon committed
455
                            let tmp_allocator = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
456
457
                            self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
                            // arg2: size                            
458
                            let const_size = self.make_value_int_const(ty_size as u64, vm);
459
                            // arg3: align
460
                            let const_align= self.make_value_int_const(ty_align as u64, vm);
461
                            
462
463
                            let rets = self.emit_runtime_entry(
                                &entrypoints::ALLOC_SLOW,
464
                                vec![tmp_allocator, const_size, const_align],
465
466
467
                                Some(vec![
                                    tmp_res.clone()
                                ]),
qinsoon's avatar
qinsoon committed
468
                                Some(node), f_content, f_context, vm
469
470
                            );
                            
471
                            // end block (no liveout other than result)
472
473
474
475
476
477
                            self.backend.end_block(slowpath.clone());
                            self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
                            
                            // block: alloc_end
                            self.backend.start_block(allocend.clone());
                            self.current_block = Some(allocend.clone());
qinsoon's avatar
qinsoon committed
478
479
                        }
                    }
qinsoon's avatar
qinsoon committed
480
481
482
483
484
485
486
487
488
                    
                    Instruction_::Throw(op_index) => {
                        let ops = inst.ops.read().unwrap();
                        let ref exception_obj = ops[op_index];
                        
                        self.emit_runtime_entry(
                            &entrypoints::THROW_EXCEPTION, 
                            vec![exception_obj.clone_value()], 
                            None,
qinsoon's avatar
qinsoon committed
489
                            Some(node), f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
490
                    }
491
492
493
494
495
496
    
                    _ => unimplemented!()
                } // main switch
            },
            
            TreeNode_::Value(ref p) => {
497
        
498
499
500
501
            }
        }
    }
    
502
503
504
505
    fn make_temporary(&mut self, f_context: &mut FunctionContext, ty: P<MuType>, vm: &VM) -> P<Value> {
        f_context.make_temporary(vm.next_id(), ty).clone_value()
    }
    
506
    fn make_memory_op_base_offset (&mut self, base: &P<Value>, offset: i32, ty: P<MuType>, vm: &VM) -> P<Value> {
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
        P(Value{
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            ty: ty.clone(),
            v: Value_::Memory(MemoryLocation::Address{
                base: base.clone(),
                offset: Some(self.make_value_int_const(offset as u64, vm)),
                index: None,
                scale: None
            })
        })
    }
    
    fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> {
        P(Value{
            hdr: MuEntityHeader::unnamed(vm.next_id()),
qinsoon's avatar
qinsoon committed
522
            ty: UINT64_TYPE.clone(),
523
524
525
526
            v: Value_::Constant(Constant::Int(val))
        })
    } 
    
527
    fn emit_load_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
528
529
530
531
532
        let mem = self.make_memory_op_base_offset(base, offset, dest.ty.clone(), vm);
        
        self.backend.emit_mov_r64_mem64(dest, &mem);
    }
    
533
    fn emit_store_base_offset (&mut self, base: &P<Value>, offset: i32, src: &P<Value>, vm: &VM) {
534
535
536
537
538
        let mem = self.make_memory_op_base_offset(base, offset, src.ty.clone(), vm);
        
        self.backend.emit_mov_mem64_r64(&mem, src);
    }
    
539
    fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
qinsoon's avatar
qinsoon committed
540
        let mem = self.make_memory_op_base_offset(base, offset, ADDRESS_TYPE.clone(), vm);
541
542
543
544
        
        self.backend.emit_lea_r64(dest, &mem);
    }
    
qinsoon's avatar
qinsoon committed
545
546
    fn emit_get_threadlocal (
        &mut self, 
qinsoon's avatar
qinsoon committed
547
        cur_node: Option<&TreeNode>,
qinsoon's avatar
qinsoon committed
548
549
550
551
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) -> P<Value> {
        let mut rets = self.emit_runtime_entry(&entrypoints::GET_THREAD_LOCAL, vec![], None, cur_node, f_content, f_context, vm);
552
553
554
555
        
        rets.pop().unwrap()
    }
    
556
557
558
559
    // ret: Option<Vec<P<Value>>
    // if ret is Some, return values will put stored in given temporaries
    // otherwise create temporaries
    // always returns result temporaries (given or created)
qinsoon's avatar
qinsoon committed
560
561
562
563
564
    fn emit_runtime_entry (
        &mut self, 
        entry: &RuntimeEntrypoint, 
        args: Vec<P<Value>>, 
        rets: Option<Vec<P<Value>>>,
qinsoon's avatar
qinsoon committed
565
        cur_node: Option<&TreeNode>, 
qinsoon's avatar
qinsoon committed
566
567
568
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) -> Vec<P<Value>> {
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
        let sig = entry.sig.clone();
        
        let entry_name = {
            if vm.is_running() {
                unimplemented!()
            } else {
                let ref entry_loc = entry.aot;
                
                match entry_loc {
                    &ValueLocation::Relocatable(_, ref name) => name.clone(),
                    _ => panic!("expecting a relocatable value")
                }
            }
        };
        
qinsoon's avatar
qinsoon committed
584
        self.emit_c_call(entry_name, sig, args, rets, cur_node, f_content, f_context, vm)
585
586
    }
    
587
588
589
590
591
592
593
594
595
596
597
    // returns the stack arg offset - we will need this to collapse stack after the call
    fn emit_precall_convention(
        &mut self,
        args: &Vec<P<Value>>, 
        vm: &VM) -> i32 {
        // if we need to save caller saved regs
        // put it here (since this is fastpath compile, we wont have them)
        
        // put args into registers if we can
        // in the meantime record args that do not fit in registers
        let mut stack_args : Vec<P<Value>> = vec![];        
598
599
600
601
602
603
604
605
        let mut gpr_arg_count = 0;
        for arg in args.iter() {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
606
                    stack_args.push(arg.clone());
607
608
609
                }
            } else if arg.is_int_const() {
                if x86_64::is_valid_x86_imm(arg) {                
610
                    let int_const = arg.extract_int_const() as i32;
611
612
613
614
615
616
                    
                    if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                        self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], int_const);
                        gpr_arg_count += 1;
                    } else {
                        // use stack to pass argument
617
                        stack_args.push(arg.clone());
618
619
620
621
622
                    }
                } else {
                    // put the constant to memory
                    unimplemented!()
                }
623
624
625
626
627
628
            } else if arg.is_mem() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_mem64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
629
                    stack_args.push(arg.clone());
630
                }
631
632
633
634
635
636
            } else {
                // floating point
                unimplemented!()
            }
        }
        
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
        // deal with stack arg, put them on stack
        // in reverse order, i.e. push the rightmost arg first to stack
        stack_args.reverse();
        // "The end of the input argument area shall be aligned on a 16 
        // (32, if __m256 is passed on stack) byte boundary." - x86 ABI 
        // if we need to special align the args, we do it now
        // (then the args will be put to stack following their regular alignment)
        let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect();
        let (stack_arg_size, _, stack_arg_offsets) = backend::sequetial_layout(&stack_arg_tys, vm);
        if stack_arg_size % 16 == 0 {
            // do not need to adjust rsp
        } else if stack_arg_size % 8 == 0 {
            // adjust rsp by -8 (push a random padding value)
            self.backend.emit_push_imm32(0x7777);
        } else {
            panic!("expecting stack arguments to be at least 8-byte aligned, but it has size of {}", stack_arg_size); 
        }
        // now, we just put all the args on the stack
        {
            let mut index = 0;
            for arg in stack_args {
                self.emit_store_base_offset(&x86_64::RSP, - (stack_arg_offsets[index] as i32), &arg, vm);
                index += 1;
            }
            
            self.backend.emit_add_r64_imm32(&x86_64::RSP, - (stack_arg_size as i32));
        }
        
        - (stack_arg_size as i32)
    }
    
    #[allow(unused_variables)]
    // ret: Option<Vec<P<Value>>
    // if ret is Some, return values will put stored in given temporaries
    // otherwise create temporaries
    // always returns result temporaries (given or created)
    fn emit_c_call (
        &mut self, 
        func_name: CName, 
        sig: P<CFuncSig>, 
        args: Vec<P<Value>>, 
        rets: Option<Vec<P<Value>>>,
        cur_node: Option<&TreeNode>,
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) -> Vec<P<Value>> 
    {
        self.emit_precall_convention(&args, vm);
        
686
687
688
689
        // make call
        if vm.is_running() {
            unimplemented!()
        } else {
qinsoon's avatar
qinsoon committed
690
            let callsite = self.new_callsite_label(cur_node);
qinsoon's avatar
qinsoon committed
691
692
693
            self.backend.emit_call_near_rel32(callsite, func_name);
            
            // record exception block (CCall may have an exception block)
qinsoon's avatar
qinsoon committed
694
695
696
697
698
            if cur_node.is_some() {
                let cur_node = cur_node.unwrap(); 
                if cur_node.op == OpCode::CCall {
                    unimplemented!()
                }
qinsoon's avatar
qinsoon committed
699
            }
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
        }
        
        // deal with ret vals
        let mut return_vals = vec![];
        
        let mut gpr_ret_count = 0;
        for ret_index in 0..sig.ret_tys.len() {
            let ref ty = sig.ret_tys[ret_index];
            
            let ret_val = match rets {
                Some(ref rets) => rets[ret_index].clone(),
                None => {
                    let tmp_node = f_context.make_temporary(vm.next_id(), ty.clone());
                    tmp_node.clone_value()
                }
            };
            
            if ret_val.is_int_reg() {
                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&ret_val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                    gpr_ret_count += 1;
                } else {
                    // get return value by stack
                    unimplemented!()
                }
            } else {
                // floating point register
                unimplemented!()
            }
            
            return_vals.push(ret_val);            
        }
        
        return_vals
    }
    
qinsoon's avatar
qinsoon committed
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
    fn emit_mu_call(
        &mut self,
        inst: &Instruction,
        calldata: &CallData,
        resumption: Option<&ResumptionData>,
        cur_node: &TreeNode, 
        f_content: &FunctionContent, 
        f_context: &mut FunctionContext, 
        vm: &VM) {
        trace!("deal with pre-call convention");
        
        let ops = inst.ops.read().unwrap();
        let ref func = ops[calldata.func];
        let ref func_sig = match func.v {
            TreeNode_::Value(ref pv) => {
                let ty : &MuType = &pv.ty;
                match ty.v {
                    MuType_::FuncRef(ref sig)
                    | MuType_::UFuncPtr(ref sig) => sig,
                    _ => panic!("expected funcref/ptr type")
                }
            },
            _ => panic!("expected funcref/ptr type")
        };
        
761
        debug_assert!(func_sig.arg_tys.len() == calldata.args.len());
qinsoon's avatar
qinsoon committed
762
763
        if cfg!(debug_assertions) {
            if inst.value.is_some() {
764
                assert!(func_sig.ret_tys.len() == inst.value.as_ref().unwrap().len());
qinsoon's avatar
qinsoon committed
765
            } else {
766
                assert!(func_sig.ret_tys.len() == 0, "expect call inst's value doesnt match reg args. value: {:?}, ret args: {:?}", inst.value, func_sig.ret_tys);
qinsoon's avatar
qinsoon committed
767
768
            }
        }
769
770
771

        let arg_values = calldata.args.iter().map(|x| ops[*x].clone_value()).collect();
        self.emit_precall_convention(&arg_values, vm);
qinsoon's avatar
qinsoon committed
772
773
774
775
776
777
778
779
780
781
782
783
        
        trace!("genearting call inst");
        // check direct call or indirect
        let callsite = {
            if self.match_funcref_const(func) {
                let target_id = self.emit_get_funcref_const(func);
                let funcs = vm.funcs().read().unwrap();
                let target = funcs.get(&target_id).unwrap().read().unwrap();
                                            
                if vm.is_running() {
                    unimplemented!()
                } else {
qinsoon's avatar
qinsoon committed
784
                    let callsite = self.new_callsite_label(Some(cur_node));
qinsoon's avatar
qinsoon committed
785
786
787
788
789
                    self.backend.emit_call_near_rel32(callsite, target.name().unwrap())
                }
            } else if self.match_ireg(func) {
                let target = self.emit_ireg(func, f_content, f_context, vm);
                
qinsoon's avatar
qinsoon committed
790
                let callsite = self.new_callsite_label(Some(cur_node));
qinsoon's avatar
qinsoon committed
791
792
793
794
                self.backend.emit_call_near_r64(callsite, &target)
            } else if self.match_mem(func) {
                let target = self.emit_mem(func);
                
qinsoon's avatar
qinsoon committed
795
                let callsite = self.new_callsite_label(Some(cur_node));
qinsoon's avatar
qinsoon committed
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
                self.backend.emit_call_near_mem64(callsite, &target)
            } else {
                unimplemented!()
            }
        };
        
        // record exception branch
        if resumption.is_some() {
            let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
            let target_block = exn_dest.target;
            
            if self.current_exn_callsites.contains_key(&target_block) {
                let callsites = self.current_exn_callsites.get_mut(&target_block).unwrap();
                callsites.push(callsite);
            } else {
                let mut callsites = vec![];
                callsites.push(callsite);
                self.current_exn_callsites.insert(target_block, callsites);
            } 
        }
        
        // deal with ret vals
        if inst.value.is_some() {
            let rets = inst.value.as_ref().unwrap();
            trace!("deal with return values");
            let mut gpr_ret_count = 0;
            // TODO: let mut fpr_ret_count = 0;
            for val in rets {
                if val.is_int_reg() {
                    if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                        self.backend.emit_mov_r64_r64(&val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                        gpr_ret_count += 1;
                    } else {
                        // get return value by stack
                        unimplemented!();
                    }
                } else {
                    // floating point register
                    unimplemented!();
                }
            }
        } else {
            trace!("no return value");
        }

    }
    
843
844
    #[allow(unused_variables)]
    fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
845
846
        for i in 0..dest.args.len() {
            let ref dest_arg = dest.args[i];
847
848
            match dest_arg {
                &DestArg::Normal(op_index) => {
qinsoon's avatar
qinsoon committed
849
                    let ref arg = ops[op_index];
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
//                    match arg.op {
//                        OpCode::RegI64 
//                        | OpCode::RegFP
//                        | OpCode::IntImmI64
//                        | OpCode::FPImm => {
//                            // do nothing
//                        },
//                        _ => {
//                            trace!("nested: compute arg for branch");
//                            // nested: compute arg
//                            self.instruction_select(arg, cur_func);
//                            
//                            self.emit_get_result(arg);
//                        }
//                    }
//                    
866
                    let ref target_args = f_content.get_block(dest.target).content.as_ref().unwrap().args;
867
868
                    let ref target_arg = target_args[i];
                    
869
                    self.emit_general_move(&arg, target_arg, f_content, f_context, vm);
870
871
872
873
                },
                &DestArg::Freshbound(_) => unimplemented!()
            }
        }
qinsoon's avatar
qinsoon committed
874
875
    }
    
qinsoon's avatar
qinsoon committed
876
    fn emit_common_prologue(&mut self, args: &Vec<P<Value>>, vm: &VM) {
877
878
        let block_name = "prologue".to_string();
        self.backend.start_block(block_name.clone());
879
880
881
        
        // no livein
        // liveout = entry block's args
882
883
        self.backend.set_block_livein(block_name.clone(), &vec![]);
        self.backend.set_block_liveout(block_name.clone(), args);
qinsoon's avatar
qinsoon committed
884
        
885
886
887
        // push rbp
        self.backend.emit_push_r64(&x86_64::RBP);
        // mov rsp -> rbp
qinsoon's avatar
qinsoon committed
888
        self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
889
        
890
        // push all callee-saved registers
qinsoon's avatar
qinsoon committed
891
892
893
894
895
896
897
898
899
        {
            let frame = self.current_frame.as_mut().unwrap();
            for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
                let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
                // not pushing rbp (as we have done taht)
                if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                    self.backend.emit_push_r64(&reg);
                    frame.alloc_slot_for_callee_saved_reg(reg.clone(), vm);
                }
900
            }
901
902
903
904
        }
        
        // unload arguments
        let mut gpr_arg_count = 0;
905
        // TODO: let mut fpr_arg_count = 0;
906
907
908
909
910
        // initial stack arg is at RBP+16
        //   arg           <- RBP + 16
        //   return addr
        //   old RBP       <- RBP
        let mut stack_arg_offset : i32 = 16;
911
912
913
914
915
916
917
        for arg in args {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&arg, &x86_64::ARGUMENT_GPRs[gpr_arg_count]);
                    gpr_arg_count += 1;
                } else {
                    // unload from stack
918
919
920
921
922
                    self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
                    
                    // move stack_arg_offset by the size of 'arg'
                    let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
                    stack_arg_offset += arg_size as i32;
923
924
925
926
                }
            } else if arg.is_fp_reg() {
                unimplemented!();
            } else {
927
928
                // args that are not fp or int (possibly struct/array/etc)
                unimplemented!();
929
930
            }
        }
931
932
        
        self.backend.end_block(block_name);
933
934
    }
    
935
    fn emit_common_epilogue(&mut self, ret_inst: &Instruction, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
936
937
        // epilogue is not a block (its a few instruction inserted before return)
        // FIXME: this may change in the future
938
        
939
        // prepare return regs
qinsoon's avatar
qinsoon committed
940
        let ref ops = ret_inst.ops.read().unwrap();
941
942
943
944
945
946
        let ret_val_indices = match ret_inst.v {
            Instruction_::Return(ref vals) => vals,
            _ => panic!("expected ret inst")
        };
        
        let mut gpr_ret_count = 0;
947
        // TODO: let mut fpr_ret_count = 0;
948
949
950
        for i in ret_val_indices {
            let ref ret_val = ops[*i];
            if self.match_ireg(ret_val) {
951
                let reg_ret_val = self.emit_ireg(ret_val, f_content, f_context, vm);
952
953
954
955
956
957
958
959
960
961
962
                
                self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
                gpr_ret_count += 1;
            } else if self.match_iimm(ret_val) {
                let imm_ret_val = self.emit_get_iimm(ret_val);
                
                self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
                gpr_ret_count += 1;
            } else {
                unimplemented!();
            }
963
964
965
966
967
968
969
970
        }        
        
        // pop all callee-saved registers - reverse order
        for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_pop_r64(&reg);
            }
971
        }
972
973
974
        
        // pop rbp
        self.backend.emit_pop_r64(&x86_64::RBP);
975
976
    }
    
qinsoon's avatar
qinsoon committed
977
978
979
980
981
982
983
984
985
986
987
988
    fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
                    Instruction_::CmpOp(_, _, _) => true,
                    _ => false
                }
            }
            TreeNode_::Value(_) => false
        }
    }
    
989
    fn emit_cmp_res(&mut self, cond: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
qinsoon's avatar
qinsoon committed
990
991
        match cond.v {
            TreeNode_::Instruction(ref inst) => {
qinsoon's avatar
qinsoon committed
992
                let ops = inst.ops.read().unwrap();                
qinsoon's avatar
qinsoon committed
993
994
995
996
997
998
999
1000
                
                match inst.v {
                    Instruction_::CmpOp(op, op1, op2) => {
                        let op1 = &ops[op1];
                        let op2 = &ops[op2];
                        
                        if op::is_int_cmp(op) {                        
                            if self.match_ireg(op1) && self.match_ireg(op2) {
1001
1002
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
1003
1004
1005
                                
                                self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
                            } else if self.match_ireg(op1) && self.match_iimm(op2) {
1006
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
                                let iimm_op2 = self.emit_get_iimm(op2);
                                
                                self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
                            } else {
                                unimplemented!()
                            }
                        } else {
                            unimplemented!()
                        }
                        
                        op
                    }
                    
                    _ => panic!("expect cmp res to emit")
                }
            }
            _ => panic!("expect cmp res to emit")
        }
    }    
    
qinsoon's avatar
qinsoon committed
1027
    fn match_ireg(&mut self, op: &TreeNode) -> bool {
qinsoon's avatar
qinsoon committed
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        return false;
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    if types::is_scalar(&value.ty) {
                        true
                    } else {
                        false
                    }
                } else {
                    false
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.is_int_reg()
            }
        }
    }
    
1053
    fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
qinsoon's avatar
qinsoon committed
1054
1055
        match op.v {
            TreeNode_::Instruction(_) => {
1056
                self.instruction_select(op, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
1057
1058
1059
1060
1061
                
                self.emit_get_result(op)
            },
            TreeNode_::Value(ref pv) => {
                match pv.v {
1062
                    Value_::Constant(_)
1063
                    | Value_::Global(_)
1064
                    | Value_::Memory(_) => panic!("expected ireg"),
qinsoon's avatar
qinsoon committed
1065
1066
                    Value_::SSAVar(_) => {
                        pv.clone()
qinsoon's avatar
qinsoon committed
1067
                    },
qinsoon's avatar
qinsoon committed
1068
1069
1070
1071
1072
                }
            }
        }
    }
    
1073
    #[allow(unused_variables)]
1074
1075
1076
1077
    fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
1078
1079
1080
1081
1082
1083
1084
    fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
            _ => false
        }
    }
    
1085
    fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> i32 {
qinsoon's avatar
qinsoon committed
1086
1087
1088
1089
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::Int(val)) => {
1090
                        val as i32
qinsoon's avatar
qinsoon committed
1091
1092
1093
1094
1095
1096
1097
1098
                    },
                    _ => panic!("expected iimm")
                }
            },
            _ => panic!("expected iimm")
        }
    }
    
qinsoon's avatar
qinsoon committed
1099
    fn emit_get_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
1100
1101
1102
1103
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::SSAVar(_) => P(Value{
1104
                        hdr: MuEntityHeader::unnamed(vm.next_id()),
1105
1106
1107
1108
1109
1110
1111
1112
                        ty: types::get_referent_ty(& pv.ty).unwrap(),
                        v: Value_::Memory(MemoryLocation::Address{
                            base: pv.clone(),
                            offset: None,
                            index: None,
                            scale: None
                        })
                    }),
1113
                    Value_::Global(_) => {
1114
1115
1116
1117
1118
1119
                        if vm.is_running() {
                            // get address from vm
                            unimplemented!()
                        } else {
                            // symbolic
                            P(Value{
1120
                                hdr: MuEntityHeader::unnamed(vm.next_id()),
1121
1122
1123
                                ty: types::get_referent_ty(&pv.ty).unwrap(),
                                v: Value_::Memory(MemoryLocation::Symbolic{
                                    base: Some(x86_64::RIP.clone()),
1124
                                    label: pv.name().unwrap()
1125
1126
1127
1128
1129
1130
1131
1132
                                })
                            })
                        }
                    },
                    Value_::Memory(_) => pv.clone(),
                    Value_::Constant(_) => unimplemented!()
                }
            }
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
            TreeNode_::Instruction(_) => self.emit_get_mem_from_inst(op, vm)
        }
    }
    
    fn emit_get_mem_from_inst(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                let ref ops = inst.ops.read().unwrap();
                
                match inst.v {
                    Instruction_::GetIRef(op_index) => {
                        let ref op = ops[op_index];
                        
qinsoon's avatar
qinsoon committed
1146
                        self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, ADDRESS_TYPE.clone(), vm) 
1147
1148
1149
1150
1151
                    }
                    _ => unimplemented!()
                }
            },
            _ => panic!("expecting a instruction that yields a memory address")
1152
1153
1154
        }
    }
    
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
    fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::FuncRef(_)) => true,
                    Value_::Constant(Constant::UFuncRef(_)) => true,
                    _ => false
                }
            },
            _ => false 
        }
    }
    
qinsoon's avatar
qinsoon committed
1168
    fn emit_get_funcref_const(&mut self, op: &P<TreeNode>) -> MuID {
1169
1170
1171
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
qinsoon's avatar
qinsoon committed
1172
1173
                    Value_::Constant(Constant::FuncRef(id))
                    | Value_::Constant(Constant::UFuncRef(id)) => id,
1174
1175
1176
1177
1178
1179
1180
                    _ => panic!("expected a (u)funcref const")
                }
            },
            _ => panic!("expected a (u)funcref const")
        }
    }
    
1181
    #[allow(unused_variables)]
1182
1183
1184
1185
    fn match_mem(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
1186
    #[allow(unused_variables)]
1187
1188
1189
1190
    fn emit_mem(&mut self, op: &P<TreeNode>) -> P<Value> {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
1191
    fn emit_get_result(&mut self, node: &TreeNode) -> P<Value> {
qinsoon's avatar
qinsoon committed
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
        match node.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        panic!("expected ONE result from the node {}", node);
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    value.clone()
                } else {
                    panic!("expected result from the node {}", node);
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.clone()
            }
        }
1211
1212
    }
    
1213
    fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
1214
1215
1216
1217
        let ref dst_ty = dest.ty;
        
        if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            if self.match_ireg(src) {
1218
                let src_reg = self.emit_ireg(src, f_content, f_context, vm);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
                self.backend.emit_mov_r64_r64(dest, &src_reg);
            } else if self.match_iimm(src) {
                let src_imm = self.emit_get_iimm(src);
                self.backend.emit_mov_r64_imm32(dest, src_imm);
            } else {
                panic!("expected an int type op");
            }