To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

inst_sel.rs 143 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::*;
4
use ast::op;
qinsoon's avatar
qinsoon committed
5
use ast::op::OpCode;
qinsoon's avatar
qinsoon committed
6
use ast::types;
qinsoon's avatar
qinsoon committed
7
use ast::types::*;
qinsoon's avatar
qinsoon committed
8
use vm::VM;
qinsoon's avatar
qinsoon committed
9
use runtime::mm;
10
11
12
13
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint;
14
15

use compiler::CompilerPass;
16
use compiler::backend;
qinsoon's avatar
qinsoon committed
17
use compiler::backend::PROLOGUE_BLOCK_NAME;
qinsoon's avatar
qinsoon committed
18
19
20
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
qinsoon's avatar
qinsoon committed
21
22
use compiler::machine_code::CompiledFunction;
use compiler::frame::Frame;
23

24
use std::collections::HashMap;
qinsoon's avatar
qinsoon committed
25
use std::any::Any;
26

27
pub struct InstructionSelection {
28
    name: &'static str,
29
30
    backend: Box<CodeGenerator>,
    
qinsoon's avatar
qinsoon committed
31
    current_callsite_id: usize,
qinsoon's avatar
qinsoon committed
32
33
    current_frame: Option<Frame>,
    current_block: Option<MuName>,
qinsoon's avatar
qinsoon committed
34
35
36
37
38
    current_func_start: Option<ValueLocation>,
    // key: block id, val: callsite that names the block as exception block
    current_exn_callsites: HashMap<MuID, Vec<ValueLocation>>,
    // key: block id, val: block location
    current_exn_blocks: HashMap<MuID, ValueLocation>     
39
40
}

41
impl <'a> InstructionSelection {
qinsoon's avatar
qinsoon committed
42
    #[cfg(feature = "aot")]
43
    pub fn new() -> InstructionSelection {
44
45
        InstructionSelection{
            name: "Instruction Selection (x64)",
46
            backend: Box::new(ASMCodeGen::new()),
qinsoon's avatar
qinsoon committed
47
            
qinsoon's avatar
qinsoon committed
48
            current_callsite_id: 0,
qinsoon's avatar
qinsoon committed
49
50
51
            current_frame: None,
            current_block: None,
            current_func_start: None,
qinsoon's avatar
qinsoon committed
52
53
54
            // key: block id, val: callsite that names the block as exception block
            current_exn_callsites: HashMap::new(), 
            current_exn_blocks: HashMap::new()
55
56
        }
    }
qinsoon's avatar
qinsoon committed
57
58
59
60
61

    #[cfg(feature = "jit")]
    pub fn new() -> InstructionSelection {
        unimplemented!()
    }
62
63
64
65
66
67
68
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
69
    fn instruction_select(&mut self, node: &'a TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
qinsoon's avatar
qinsoon committed
70
71
72
        trace!("instsel on node {}", node);
        
        match node.v {
73
74
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
75
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
qinsoon's avatar
qinsoon committed
76
                        trace!("instsel on BRANCH2");
77
78
                        // 'branch_if_true' == true, we emit cjmp the same as CmpOp  (je  for EQ, jne for NE)
                        // 'branch_if_true' == false, we emit opposite cjmp as CmpOp (jne for EQ, je  for NE)
79
80
81
82
83
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
84
                            }
85
                        };
86
                        
qinsoon's avatar
qinsoon committed
87
                        let ops = inst.ops.read().unwrap();
88
                        
89
90
                        self.process_dest(&ops, fallthrough_dest, f_content, f_context, vm);
                        self.process_dest(&ops, branch_dest, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
91
                        
92
                        let branch_target = f_content.get_block(branch_dest.target).name().unwrap();
93
94
95
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
96
                        if self.match_cmp_res(cond) {
97
                            trace!("emit cmp_res-branch2");
98
                            match self.emit_cmp_res(cond, f_content, f_context, vm) {
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
                                op::CmpOp::EQ => {
                                    if branch_if_true {
                                        self.backend.emit_je(branch_target);
                                    } else {
                                        self.backend.emit_jne(branch_target);
                                    }
                                },
                                op::CmpOp::NE => {
                                    if branch_if_true {
                                        self.backend.emit_jne(branch_target);
                                    } else {
                                        self.backend.emit_je(branch_target);
                                    }
                                },
                                op::CmpOp::UGE => {
                                    if branch_if_true {
                                        self.backend.emit_jae(branch_target);
                                    } else {
                                        self.backend.emit_jb(branch_target);
                                    }
                                },
                                op::CmpOp::UGT => {
                                    if branch_if_true {
                                        self.backend.emit_ja(branch_target);
                                    } else {
                                        self.backend.emit_jbe(branch_target);
                                    }
                                },
                                op::CmpOp::ULE => {
                                    if branch_if_true {
                                        self.backend.emit_jbe(branch_target);
                                    } else {
                                        self.backend.emit_ja(branch_target);
                                    }
                                },
                                op::CmpOp::ULT => {
                                    if branch_if_true {
                                        self.backend.emit_jb(branch_target);
                                    } else {
                                        self.backend.emit_jae(branch_target);
                                    }
                                },
                                op::CmpOp::SGE => {
                                    if branch_if_true {
                                        self.backend.emit_jge(branch_target);
                                    } else {
                                        self.backend.emit_jl(branch_target);
                                    }
                                },
                                op::CmpOp::SGT => {
                                    if branch_if_true {
                                        self.backend.emit_jg(branch_target);
                                    } else {
                                        self.backend.emit_jle(branch_target);
                                    }
                                },
                                op::CmpOp::SLE => {
                                    if branch_if_true {
                                        self.backend.emit_jle(branch_target);
                                    } else {
                                        self.backend.emit_jg(branch_target);
                                    }
                                },
                                op::CmpOp::SLT => {
                                    if branch_if_true {
                                        self.backend.emit_jl(branch_target);
                                    } else {
                                        self.backend.emit_jge(branch_target);
                                    }
                                },
qinsoon's avatar
qinsoon committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214

                                // floating point

                                op::CmpOp::FOEQ | op::CmpOp::FUEQ => {
                                    if branch_if_true {
                                        self.backend.emit_je(branch_target);
                                    } else {
                                        self.backend.emit_jne(branch_target);
                                    }
                                },
                                op::CmpOp::FONE | op::CmpOp::FUNE => {
                                    if branch_if_true {
                                        self.backend.emit_jne(branch_target);
                                    } else {
                                        self.backend.emit_je(branch_target);
                                    }
                                },
                                op::CmpOp::FOGT | op::CmpOp::FUGT => {
                                    if branch_if_true {
                                        self.backend.emit_ja(branch_target);
                                    } else {
                                        self.backend.emit_jbe(branch_target);
                                    }
                                },
                                op::CmpOp::FOGE | op::CmpOp::FUGE => {
                                    if branch_if_true {
                                        self.backend.emit_jae(branch_target);
                                    } else {
                                        self.backend.emit_jb(branch_target);
                                    }
                                },
                                op::CmpOp::FOLT | op::CmpOp::FULT => {
                                    if branch_if_true {
                                        self.backend.emit_jb(branch_target);
                                    } else {
                                        self.backend.emit_jae(branch_target);
                                    }
                                },
                                op::CmpOp::FOLE | op::CmpOp::FULE => {
                                    if branch_if_true {
                                        self.backend.emit_jbe(branch_target);
                                    } else {
                                        self.backend.emit_ja(branch_target);
                                    }
                                },

qinsoon's avatar
qinsoon committed
215
216
217
218
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
219
                            
220
                            let cond_reg = self.emit_ireg(cond, f_content, f_context, vm);
221
                            
qinsoon's avatar
qinsoon committed
222
                            // emit: cmp cond_reg 1
qinsoon's avatar
qinsoon committed
223
                            self.backend.emit_cmp_imm_r(1, &cond_reg);
qinsoon's avatar
qinsoon committed
224
                            // emit: je #branch_dest
225
                            self.backend.emit_je(branch_target);
qinsoon's avatar
qinsoon committed
226
227
                        } else {
                            unimplemented!();
228
                        }
229
                    },
qinsoon's avatar
qinsoon committed
230
231

                    Instruction_::Select{cond, true_val, false_val} => {
qinsoon's avatar
qinsoon committed
232
233
                        use ast::op::CmpOp::*;

qinsoon's avatar
qinsoon committed
234
                        trace!("instsel on SELECT");
qinsoon's avatar
qinsoon committed
235
236
237
238
239
240
241
242
243
                        let ops = inst.ops.read().unwrap();

                        let ref cond = ops[cond];
                        let ref true_val = ops[true_val];
                        let ref false_val = ops[false_val];

                        if self.match_ireg(true_val) {
                            // moving integers/pointers
                            let tmp_res   = self.get_result_value(node);
qinsoon's avatar
qinsoon committed
244

qinsoon's avatar
qinsoon committed
245
246
247
                            // generate compare
                            let cmpop = if self.match_cmp_res(cond) {
                                self.emit_cmp_res(cond, f_content, f_context, vm)
qinsoon's avatar
qinsoon committed
248
249
250
                            } else if self.match_ireg(cond) {
                                let tmp_cond = self.emit_ireg(cond, f_content, f_context, vm);
                                // emit: cmp cond_reg 1
qinsoon's avatar
qinsoon committed
251
                                self.backend.emit_cmp_imm_r(1, &tmp_cond);
qinsoon's avatar
qinsoon committed
252

qinsoon's avatar
qinsoon committed
253
                                EQ
qinsoon's avatar
qinsoon committed
254
                            } else {
qinsoon's avatar
qinsoon committed
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
                                panic!("expected ireg, found {}", cond)
                            };

                            // use cmov for 16/32/64bit integeer
                            // use jcc  for 8 bit
                            match tmp_res.ty.get_int_length() {
                                // cmov
                                Some(len) if len > 8 => {
                                    let tmp_true  = self.emit_ireg(true_val, f_content, f_context, vm);
                                    let tmp_false = self.emit_ireg(false_val, f_content, f_context, vm);

                                    // mov tmp_false -> tmp_res
                                    self.backend.emit_mov_r_r(&tmp_res, &tmp_false);

                                    match cmpop {
                                        EQ  => self.backend.emit_cmove_r_r (&tmp_res, &tmp_true),
                                        NE  => self.backend.emit_cmovne_r_r(&tmp_res, &tmp_true),
                                        SGE => self.backend.emit_cmovge_r_r(&tmp_res, &tmp_true),
                                        SGT => self.backend.emit_cmovg_r_r (&tmp_res, &tmp_true),
                                        SLE => self.backend.emit_cmovle_r_r(&tmp_res, &tmp_true),
                                        SLT => self.backend.emit_cmovl_r_r (&tmp_res, &tmp_true),
                                        UGE => self.backend.emit_cmovae_r_r(&tmp_res, &tmp_true),
                                        UGT => self.backend.emit_cmova_r_r (&tmp_res, &tmp_true),
                                        ULE => self.backend.emit_cmovbe_r_r(&tmp_res, &tmp_true),
                                        ULT => self.backend.emit_cmovb_r_r (&tmp_res, &tmp_true),

                                        FOEQ | FUEQ => self.backend.emit_cmove_r_r (&tmp_res, &tmp_true),
                                        FONE | FUNE => self.backend.emit_cmovne_r_r(&tmp_res, &tmp_true),
                                        FOGT | FUGT => self.backend.emit_cmova_r_r (&tmp_res, &tmp_true),
                                        FOGE | FUGE => self.backend.emit_cmovae_r_r(&tmp_res, &tmp_true),
                                        FOLT | FULT => self.backend.emit_cmovb_r_r (&tmp_res, &tmp_true),
                                        FOLE | FULE => self.backend.emit_cmovbe_r_r(&tmp_res, &tmp_true),

                                        _ => unimplemented!()
                                    }
                                }
                                // jcc
                                _ => {
                                    let blk_true = format!("{}_select_true", node.id());
                                    let blk_end   = format!("{}_select_end", node.id());

                                    // jump to blk_true if true
                                    match cmpop {
                                        EQ  => self.backend.emit_je (blk_true.clone()),
                                        NE  => self.backend.emit_jne(blk_true.clone()),
                                        SGE => self.backend.emit_jge(blk_true.clone()),
                                        SGT => self.backend.emit_jg (blk_true.clone()),
                                        SLE => self.backend.emit_jle(blk_true.clone()),
                                        SLT => self.backend.emit_jl (blk_true.clone()),
                                        UGE => self.backend.emit_jae(blk_true.clone()),
                                        UGT => self.backend.emit_ja (blk_true.clone()),
                                        ULE => self.backend.emit_jbe(blk_true.clone()),
                                        ULT => self.backend.emit_jb (blk_true.clone()),

                                        FOEQ | FUEQ => self.backend.emit_je (blk_true.clone()),
                                        FONE | FUNE => self.backend.emit_jne(blk_true.clone()),
                                        FOGT | FUGT => self.backend.emit_ja (blk_true.clone()),
                                        FOGE | FUGE => self.backend.emit_jae(blk_true.clone()),
                                        FOLT | FULT => self.backend.emit_jb (blk_true.clone()),
                                        FOLE | FULE => self.backend.emit_jbe(blk_true.clone()),

                                        _ => unimplemented!()
                                    }

                                    // mov false result here
                                    self.emit_move_node_to_value(&tmp_res, &false_val, f_content, f_context, vm);

                                    // jmp to end
                                    self.backend.emit_jmp(blk_end.clone());

                                    // finishing current block
                                    let cur_block = self.current_block.as_ref().unwrap().clone();
                                    self.backend.end_block(cur_block.clone());

                                    // blk_true:
                                    self.current_block = Some(blk_true.clone());
                                    self.backend.start_block(blk_true.clone());
                                    // mov true value -> result
                                    self.emit_move_node_to_value(&tmp_res, &true_val, f_content, f_context, vm);

                                    self.backend.end_block(blk_true.clone());

                                    // blk_end:
                                    self.backend.start_block(blk_end.clone());
                                    self.current_block = Some(blk_end.clone());
                                }
qinsoon's avatar
qinsoon committed
341
342
343
344
345
346
347
                            }
                        } else {
                            // moving vectors, floatingpoints
                            unimplemented!()
                        }
                    },

348
                    Instruction_::CmpOp(op, op1, op2) => {
qinsoon's avatar
qinsoon committed
349
350
                        use ast::op::CmpOp::*;

qinsoon's avatar
qinsoon committed
351
                        trace!("instsel on CMPOP");
352
353
354
355
                        let ops = inst.ops.read().unwrap();
                        let ref op1 = ops[op1];
                        let ref op2 = ops[op2];

qinsoon's avatar
qinsoon committed
356
                        let tmp_res = self.get_result_value(node);
qinsoon's avatar
qinsoon committed
357

qinsoon's avatar
qinsoon committed
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
                        // cmov only take (16/32/64bits registers)
                        // make res64, and set to zero
                        let tmp_res64 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
                        self.backend.emit_xor_r_r(&tmp_res64, &tmp_res64);

                        // set tmp1 as 1 (cmov doesnt allow immediate or reg8 as operand)
                        let tmp_1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
                        self.backend.emit_mov_r_imm(&tmp_1, 1);

                        // cmov 1 to result
                        match self.emit_cmp_res(node, f_content, f_context, vm) {
                            EQ  => self.backend.emit_cmove_r_r (&tmp_res64, &tmp_1),
                            NE  => self.backend.emit_cmovne_r_r(&tmp_res64, &tmp_1),
                            SGE => self.backend.emit_cmovge_r_r(&tmp_res64, &tmp_1),
                            SGT => self.backend.emit_cmovg_r_r (&tmp_res64, &tmp_1),
                            SLE => self.backend.emit_cmovle_r_r(&tmp_res64, &tmp_1),
                            SLT => self.backend.emit_cmovl_r_r (&tmp_res64, &tmp_1),
                            UGE => self.backend.emit_cmovae_r_r(&tmp_res64, &tmp_1),
                            UGT => self.backend.emit_cmova_r_r (&tmp_res64, &tmp_1),
                            ULE => self.backend.emit_cmovbe_r_r(&tmp_res64, &tmp_1),
                            ULT => self.backend.emit_cmovb_r_r (&tmp_res64, &tmp_1),

                            FOEQ | FUEQ => self.backend.emit_cmove_r_r (&tmp_res64, &tmp_1),
                            FONE | FUNE => self.backend.emit_cmovne_r_r(&tmp_res64, &tmp_1),
                            FOGT | FUGT => self.backend.emit_cmova_r_r (&tmp_res64, &tmp_1),
                            FOGE | FUGE => self.backend.emit_cmovae_r_r(&tmp_res64, &tmp_1),
                            FOLT | FULT => self.backend.emit_cmovb_r_r (&tmp_res64, &tmp_1),
                            FOLE | FULE => self.backend.emit_cmovbe_r_r(&tmp_res64, &tmp_1),

                            _ => unimplemented!()
388
                        }
qinsoon's avatar
qinsoon committed
389
390
391

                        // truncate tmp_res64 to tmp_res (probably u8)
                        self.backend.emit_mov_r_r(&tmp_res, &tmp_res64);
392
393
                    }

qinsoon's avatar
qinsoon committed
394
                    Instruction_::Branch1(ref dest) => {
qinsoon's avatar
qinsoon committed
395
                        trace!("instsel on BRANCH1");
qinsoon's avatar
qinsoon committed
396
                        let ops = inst.ops.read().unwrap();
397
                                            
398
                        self.process_dest(&ops, dest, f_content, f_context, vm);
399
                        
400
                        let target = f_content.get_block(dest.target).name().unwrap();
qinsoon's avatar
qinsoon committed
401
                        
qinsoon's avatar
qinsoon committed
402
                        trace!("emit branch1");
403
                        // jmp
qinsoon's avatar
qinsoon committed
404
                        self.backend.emit_jmp(target);
405
                    },
qinsoon's avatar
qinsoon committed
406
407

                    Instruction_::Switch{cond, ref default, ref branches} => {
qinsoon's avatar
qinsoon committed
408
                        trace!("instsel on SWITCH");
qinsoon's avatar
qinsoon committed
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
                        let ops = inst.ops.read().unwrap();

                        let ref cond = ops[cond];

                        if self.match_ireg(cond) {
                            let tmp_cond = self.emit_ireg(cond, f_content, f_context, vm);

                            // emit each branch
                            for &(case_op_index, ref case_dest) in branches {
                                let ref case_op = ops[case_op_index];

                                // process dest
                                self.process_dest(&ops, case_dest, f_content, f_context, vm);

                                let target = f_content.get_block(case_dest.target).name().unwrap();

                                if self.match_iimm(case_op) {
                                    let imm = self.node_iimm_to_i32(case_op);

                                    // cmp case cond
qinsoon's avatar
qinsoon committed
429
                                    self.backend.emit_cmp_imm_r(imm, &tmp_cond);
qinsoon's avatar
qinsoon committed
430
431
432
433
434
435
                                    // je dest
                                    self.backend.emit_je(target);
                                } else if self.match_ireg(case_op) {
                                    let tmp_case_op = self.emit_ireg(case_op, f_content, f_context, vm);

                                    // cmp case cond
qinsoon's avatar
qinsoon committed
436
                                    self.backend.emit_cmp_r_r(&tmp_case_op, &tmp_cond);
qinsoon's avatar
qinsoon committed
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
                                    // je dest
                                    self.backend.emit_je(target);
                                } else {
                                    panic!("expecting ireg cond to be either iimm or ireg: {}", cond);
                                }
                            }

                            // emit default
                            self.process_dest(&ops, default, f_content, f_context, vm);
                            
                            let default_target = f_content.get_block(default.target).name().unwrap();
                            self.backend.emit_jmp(default_target);
                        } else {
                            panic!("expecting cond in switch to be ireg: {}", cond);
                        }
                    }
453
                    
qinsoon's avatar
qinsoon committed
454
                    Instruction_::ExprCall{ref data, is_abort} => {
qinsoon's avatar
qinsoon committed
455
456
                        trace!("instsel on EXPRCALL");

qinsoon's avatar
qinsoon committed
457
458
                        if is_abort {
                            unimplemented!()
459
                        }
460
                        
qinsoon's avatar
qinsoon committed
461
462
463
464
465
                        self.emit_mu_call(
                            inst, // inst: &Instruction,
                            data, // calldata: &CallData,
                            None, // resumption: Option<&ResumptionData>,
                            node, // cur_node: &TreeNode, 
466
                            f_content, f_context, vm);
467
468
                    },
                    
qinsoon's avatar
qinsoon committed
469
                    Instruction_::Call{ref data, ref resume} => {
qinsoon's avatar
qinsoon committed
470
471
                        trace!("instsel on CALL");

qinsoon's avatar
qinsoon committed
472
473
474
475
476
477
                        self.emit_mu_call(
                            inst, 
                            data, 
                            Some(resume), 
                            node, 
                            f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
478
479
480
                    },

                    Instruction_::ExprCCall{ref data, is_abort} => {
qinsoon's avatar
qinsoon committed
481
482
                        trace!("instsel on EXPRCCALL");

qinsoon's avatar
qinsoon committed
483
484
485
486
487
488
489
490
                        if is_abort {
                            unimplemented!()
                        }

                        self.emit_c_call_ir(inst, data, None, node, f_content, f_context, vm);
                    }

                    Instruction_::CCall{ref data, ref resume} => {
qinsoon's avatar
qinsoon committed
491
492
                        trace!("instsel on CCALL");

qinsoon's avatar
qinsoon committed
493
                        self.emit_c_call_ir(inst, data, Some(resume), node, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
494
495
                    }
                    
496
                    Instruction_::Return(_) => {
qinsoon's avatar
qinsoon committed
497
498
                        trace!("instsel on RETURN");

499
                        self.emit_common_epilogue(inst, f_content, f_context, vm);
500
                        
qinsoon's avatar
qinsoon committed
501
                        self.backend.emit_ret();
502
503
                    },
                    
qinsoon's avatar
qinsoon committed
504
                    Instruction_::BinOp(op, op1, op2) => {
qinsoon's avatar
qinsoon committed
505
506
                        trace!("instsel on BINOP");

qinsoon's avatar
qinsoon committed
507
                        let ops = inst.ops.read().unwrap();
508
509

                        let res_tmp = self.get_result_value(node);
qinsoon's avatar
qinsoon committed
510
                        
511
512
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
513
                                if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
qinsoon's avatar
qinsoon committed
514
515
                                    trace!("emit add-ireg-imm");
                                    
516
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
517
                                    let reg_op2 = self.node_iimm_to_i32(&ops[op2]);
qinsoon's avatar
qinsoon committed
518
519
                                    
                                    // mov op1, res
qinsoon's avatar
qinsoon committed
520
                                    self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
qinsoon's avatar
qinsoon committed
521
                                    // add op2, res
qinsoon's avatar
qinsoon committed
522
                                    self.backend.emit_add_r_imm(&res_tmp, reg_op2);
qinsoon's avatar
qinsoon committed
523
524
525
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
526
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
shl    
qinsoon committed
527
                                    let reg_op2 = self.emit_mem(&ops[op2], vm);
qinsoon's avatar
qinsoon committed
528
529
                                    
                                    // mov op1, res
qinsoon's avatar
qinsoon committed
530
                                    self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
qinsoon's avatar
qinsoon committed
531
                                    // add op2 res
qinsoon's avatar
qinsoon committed
532
                                    self.backend.emit_add_r_mem(&res_tmp, &reg_op2);
qinsoon's avatar
qinsoon committed
533
534
535
536
537
538
539
                                } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");

                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
540
                                    self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
qinsoon's avatar
qinsoon committed
541
                                    // add op2 res
qinsoon's avatar
qinsoon committed
542
                                    self.backend.emit_add_r_r(&res_tmp, &reg_op2);
qinsoon's avatar
qinsoon committed
543
544
545
                                } else {
                                    unimplemented!()
                                }
546
547
                            },
                            op::BinOp::Sub => {
548
                                if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
qinsoon's avatar
qinsoon committed
549
550
                                    trace!("emit sub-ireg-imm");

551
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
552
                                    let imm_op2 = self.node_iimm_to_i32(&ops[op2]);
qinsoon's avatar
qinsoon committed
553
554
                                    
                                    // mov op1, res
qinsoon's avatar
qinsoon committed
555
                                    self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
qinsoon's avatar
qinsoon committed
556
                                    // add op2, res
qinsoon's avatar
qinsoon committed
557
                                    self.backend.emit_sub_r_imm(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
558
559
560
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
561
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
shl    
qinsoon committed
562
                                    let mem_op2 = self.emit_mem(&ops[op2], vm);
qinsoon's avatar
qinsoon committed
563
564
                                    
                                    // mov op1, res
qinsoon's avatar
qinsoon committed
565
                                    self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
qinsoon's avatar
qinsoon committed
566
                                    // sub op2 res
qinsoon's avatar
qinsoon committed
567
                                    self.backend.emit_sub_r_mem(&res_tmp, &mem_op2);
568
569
                                } else if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
570

571
572
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
573

574
                                    // mov op1, res
qinsoon's avatar
qinsoon committed
575
                                    self.backend.emit_mov_r_r(&res_tmp, &reg_op1);
576
                                    // add op2 res
qinsoon's avatar
qinsoon committed
577
                                    self.backend.emit_sub_r_r(&res_tmp, &reg_op2);
qinsoon's avatar
qinsoon committed
578
579
580
                                } else {
                                    unimplemented!()
                                }
581
                            },
582
583
584
585
586
587
588
589
590
591
592
                            op::BinOp::And => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

                                if self.match_ireg(op1) && self.match_iimm(op2) {
                                    trace!("emit and-ireg-iimm");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let imm_op2 = self.node_iimm_to_i32(op2);

                                    // mov op1 -> res
qinsoon's avatar
qinsoon committed
593
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
594
                                    // and op2, res -> res
qinsoon's avatar
qinsoon committed
595
                                    self.backend.emit_and_r_imm(&res_tmp, imm_op2);
596
597
598
599
600
601
602
                                } else if self.match_ireg(op1) && self.match_mem(op2) {
                                    trace!("emit and-ireg-mem");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let mem_op2 = self.emit_mem(op2, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
603
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
604
                                    // and op2, res -> res
qinsoon's avatar
qinsoon committed
605
                                    self.backend.emit_and_r_mem(&res_tmp, &mem_op2);
606
607
608
609
610
611
612
                                } else if self.match_ireg(op1) && self.match_ireg(op2) {
                                    trace!("emit and-ireg-ireg");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
613
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
614
                                    // and op2, res -> res
qinsoon's avatar
qinsoon committed
615
                                    self.backend.emit_and_r_r(&res_tmp, &tmp_op2);
616
617
618
619
                                } else {
                                    unimplemented!()
                                }
                            },
620
621
622
623
624
625
626
627
628
629
630
                            op::BinOp::Or => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

                                if self.match_ireg(op1) && self.match_iimm(op2) {
                                    trace!("emit or-ireg-iimm");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let imm_op2 = self.node_iimm_to_i32(op2);

                                    // mov op1 -> res
qinsoon's avatar
qinsoon committed
631
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
632
                                    // Or op2, res -> res
qinsoon's avatar
qinsoon committed
633
                                    self.backend.emit_or_r_imm(&res_tmp, imm_op2);
634
635
636
637
638
639
640
                                } else if self.match_ireg(op1) && self.match_mem(op2) {
                                    trace!("emit or-ireg-mem");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let mem_op2 = self.emit_mem(op2, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
641
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
642
                                    // Or op2, res -> res
qinsoon's avatar
qinsoon committed
643
                                    self.backend.emit_or_r_mem(&res_tmp, &mem_op2);
644
645
646
647
648
649
650
                                } else if self.match_ireg(op1) && self.match_ireg(op2) {
                                    trace!("emit or-ireg-ireg");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
651
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
652
                                    // Or op2, res -> res
qinsoon's avatar
qinsoon committed
653
                                    self.backend.emit_or_r_r(&res_tmp, &tmp_op2);
654
655
656
657
                                } else {
                                    unimplemented!()
                                }
                            },
658
659
660
661
662
663
664
665
666
667
668
                            op::BinOp::Xor => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

                                if self.match_ireg(op1) && self.match_iimm(op2) {
                                    trace!("emit xor-ireg-iimm");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let imm_op2 = self.node_iimm_to_i32(op2);

                                    // mov op1 -> res
qinsoon's avatar
qinsoon committed
669
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
670
                                    // xor op2, res -> res
qinsoon's avatar
qinsoon committed
671
                                    self.backend.emit_xor_r_imm(&res_tmp, imm_op2);
672
673
674
675
676
677
678
                                } else if self.match_ireg(op1) && self.match_mem(op2) {
                                    trace!("emit xor-ireg-mem");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let mem_op2 = self.emit_mem(op2, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
679
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
680
                                    // xor op2, res -> res
qinsoon's avatar
qinsoon committed
681
                                    self.backend.emit_xor_r_mem(&res_tmp, &mem_op2);
682
683
684
685
686
687
688
                                } else if self.match_ireg(op1) && self.match_ireg(op2) {
                                    trace!("emit xor-ireg-ireg");

                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                    let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);

                                    // mov op1, res
qinsoon's avatar
qinsoon committed
689
                                    self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
690
                                    // xor op2, res -> res
qinsoon's avatar
qinsoon committed
691
                                    self.backend.emit_xor_r_r(&res_tmp, &tmp_op2);
692
693
694
695
                                } else {
                                    unimplemented!()
                                }
                            }
696
                            op::BinOp::Mul => {
697
698
                                // mov op1 -> rax
                                let op1 = &ops[op1];
qinsoon's avatar
qinsoon committed
699
700
701
702
703
704
705
706
707

                                let mreg_op1 = match op1.clone_value().ty.get_int_length() {
                                    Some(64) => x86_64::RAX.clone(),
                                    Some(32) => x86_64::EAX.clone(),
                                    Some(16) => x86_64::AX.clone(),
                                    Some(8)  => x86_64::AL.clone(),
                                    _ => unimplemented!()
                                };

708
                                if self.match_iimm(op1) {
709
                                    let imm_op1 = self.node_iimm_to_i32(op1);
710
                                    
qinsoon's avatar
qinsoon committed
711
                                    self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
712
                                } else if self.match_mem(op1) {
qinsoon's avatar
shl    
qinsoon committed
713
                                    let mem_op1 = self.emit_mem(op1, vm);
714
                                    
qinsoon's avatar
qinsoon committed
715
                                    self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
716
717
718
                                } else if self.match_ireg(op1) {
                                    let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);

qinsoon's avatar
qinsoon committed
719
                                    self.backend.emit_mov_r_r(&mreg_op1, &reg_op1);
720
721
722
723
                                } else {
                                    unimplemented!();
                                }
                                
qinsoon's avatar
qinsoon committed
724
                                // mul op2
725
                                let op2 = &ops[op2];
726
                                if self.match_iimm(op2) {
727
                                    let imm_op2 = self.node_iimm_to_i32(op2);
728
729
730
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
qinsoon's avatar
qinsoon committed
731
                                    self.backend.emit_mov_r_imm(&res_tmp, imm_op2);
732
                                    
qinsoon's avatar
qinsoon committed
733
                                    self.backend.emit_mul_r(&res_tmp);
734
                                } else if self.match_mem(op2) {
qinsoon's avatar
shl    
qinsoon committed
735
                                    let mem_op2 = self.emit_mem(op2, vm);
736
                                    
qinsoon's avatar
qinsoon committed
737
                                    self.backend.emit_mul_mem(&mem_op2);
738
739
740
                                } else if self.match_ireg(op2) {
                                    let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);

qinsoon's avatar
qinsoon committed
741
                                    self.backend.emit_mul_r(&reg_op2);
742
743
744
745
746
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
qinsoon's avatar
qinsoon committed
747
748
749
750
751
752
753
754
                                match res_tmp.ty.get_int_length() {
                                    Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
                                    Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
                                    Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
                                    Some(8)  => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
                                    _ => unimplemented!()
                                }

755
                            },
qinsoon's avatar
qinsoon committed
756
757
758
759
                            op::BinOp::Udiv => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

qinsoon's avatar
sdiv    
qinsoon committed
760
                                self.emit_udiv(op1, op2, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
761

qinsoon's avatar
sdiv    
qinsoon committed
762
                                // mov rax -> result
qinsoon's avatar
qinsoon committed
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
                                match res_tmp.ty.get_int_length() {
                                    Some(64) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
                                    }
                                    Some(32) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
                                    }
                                    Some(16) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
                                    }
                                    Some(8)  => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
                                    }
                                    _ => unimplemented!()
                                }
qinsoon's avatar
sdiv    
qinsoon committed
778
779
780
781
                            },
                            op::BinOp::Sdiv => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];
qinsoon's avatar
qinsoon committed
782

qinsoon's avatar
sdiv    
qinsoon committed
783
                                self.emit_idiv(op1, op2, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
784
785

                                // mov rax -> result
qinsoon's avatar
qinsoon committed
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
                                match res_tmp.ty.get_int_length() {
                                    Some(64) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
                                    }
                                    Some(32) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
                                    }
                                    Some(16) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
                                    }
                                    Some(8)  => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
                                    }
                                    _ => unimplemented!()
                                }
qinsoon's avatar
sdiv    
qinsoon committed
801
                            },
qinsoon's avatar
qinsoon committed
802
803
804
805
806
807
808
                            op::BinOp::Urem => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

                                self.emit_udiv(op1, op2, f_content, f_context, vm);

                                // mov rdx -> result
qinsoon's avatar
qinsoon committed
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
                                match res_tmp.ty.get_int_length() {
                                    Some(64) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
                                    }
                                    Some(32) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
                                    }
                                    Some(16) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
                                    }
                                    Some(8)  => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
                                    }
                                    _ => unimplemented!()
                                }
qinsoon's avatar
qinsoon committed
824
825
826
827
828
829
830
831
                            },
                            op::BinOp::Srem => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

                                self.emit_idiv(op1, op2, f_content, f_context, vm);

                                // mov rdx -> result
qinsoon's avatar
qinsoon committed
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
                                match res_tmp.ty.get_int_length() {
                                    Some(64) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
                                    }
                                    Some(32) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
                                    }
                                    Some(16) => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
                                    }
                                    Some(8)  => {
                                        self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
                                    }
                                    _ => unimplemented!()
                                }
qinsoon's avatar
qinsoon committed
847
                            },
qinsoon's avatar
qinsoon committed
848

qinsoon's avatar
shl    
qinsoon committed
849
850
851
852
                            op::BinOp::Shl => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

853
854
855
                                if self.match_mem(op1) {
                                    unimplemented!()
                                } else if self.match_ireg(op1) {
qinsoon's avatar
shl    
qinsoon committed
856
857
                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);

858
859
860
861
                                    if self.match_iimm(op2) {
                                        let imm_op2 = self.node_iimm_to_i32(op2) as i8;

                                        // shl op1, op2 -> op1
qinsoon's avatar
qinsoon committed
862
                                        self.backend.emit_shl_r_imm8(&tmp_op1, imm_op2);
863
864

                                        // mov op1 -> result
qinsoon's avatar
qinsoon committed
865
                                        self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
866
                                    } else if self.match_ireg(op2) {
qinsoon's avatar
shl    
qinsoon committed
867
868
                                        let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);

qinsoon's avatar
qinsoon committed
869
870
                                        // mov op2 -> cl
                                        self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
qinsoon's avatar
shl    
qinsoon committed
871
872

                                        // shl op1, cl -> op1
qinsoon's avatar
qinsoon committed
873
                                        self.backend.emit_shl_r_cl(&tmp_op1);
qinsoon's avatar
shl    
qinsoon committed
874
875

                                        // mov op1 -> result
qinsoon's avatar
qinsoon committed
876
                                        self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
qinsoon's avatar
shl    
qinsoon committed
877
878
879
                                    } else {
                                        panic!("unexpected op2 (not ireg not iimm): {}", op2);
                                    }
880
881
                                } else {
                                    panic!("unexpected op1 (not ireg not mem): {}", op1);
qinsoon's avatar
shl    
qinsoon committed
882
                                }
qinsoon's avatar
qinsoon committed
883
884
885
886
887
                            },
                            op::BinOp::Lshr => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

888
889
890
                                if self.match_mem(op1) {
                                    unimplemented!()
                                } else if self.match_ireg(op1) {
qinsoon's avatar
qinsoon committed
891
892
                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);

893
894
895
896
                                    if self.match_iimm(op2) {
                                        let imm_op2 = self.node_iimm_to_i32(op2) as i8;

                                        // shr op1, op2 -> op1
qinsoon's avatar
qinsoon committed
897
                                        self.backend.emit_shr_r_imm8(&tmp_op1, imm_op2);
898
899

                                        // mov op1 -> result
qinsoon's avatar
qinsoon committed
900
                                        self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
901
                                    } else if self.match_ireg(op2) {
qinsoon's avatar
qinsoon committed
902
903
                                        let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);

qinsoon's avatar
qinsoon committed
904
905
                                        // mov op2 -> cl
                                        self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
qinsoon's avatar
qinsoon committed
906
907

                                        // shr op1, cl -> op1
qinsoon's avatar
qinsoon committed
908
                                        self.backend.emit_shr_r_cl(&tmp_op1);
qinsoon's avatar
qinsoon committed
909
910

                                        // mov op1 -> result
qinsoon's avatar
qinsoon committed
911
                                        self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
qinsoon's avatar
qinsoon committed
912
913
914
                                    } else {
                                        panic!("unexpected op2 (not ireg not iimm): {}", op2);
                                    }
915
916
                                } else {
                                    panic!("unexpected op1 (not ireg not mem): {}", op1);
qinsoon's avatar
qinsoon committed
917
918
919
920
921
922
                                }
                            },
                            op::BinOp::Ashr => {
                                let op1 = &ops[op1];
                                let op2 = &ops[op2];

923
924
925
                                if self.match_mem(op1) {
                                    unimplemented!()
                                } else if self.match_ireg(op1) {
qinsoon's avatar
qinsoon committed
926
927
                                    let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);

928
929
930
931
                                    if self.match_iimm(op2) {
                                        let imm_op2 = self.node_iimm_to_i32(op2) as i8;

                                        // sar op1, op2 -> op1
qinsoon's avatar
qinsoon committed
932
                                        self.backend.emit_sar_r_imm8(&tmp_op1, imm_op2);
933
934

                                        // mov op1 -> result
qinsoon's avatar
qinsoon committed
935
                                        self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
936
                                    } else if self.match_ireg(op2) {
qinsoon's avatar
qinsoon committed
937
938
                                        let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);

qinsoon's avatar
qinsoon committed
939
940
                                        // mov op2 -> cl
                                        self.backend.emit_mov_r_r(&x86_64::CL, &tmp_op2);
qinsoon's avatar
qinsoon committed
941
942

                                        // sar op1, cl -> op1
qinsoon's avatar
qinsoon committed
943
                                        self.backend.emit_sar_r_cl(&tmp_op1);
qinsoon's avatar
qinsoon committed
944
945

                                        // mov op1 -> result
qinsoon's avatar
qinsoon committed
946
                                        self.backend.emit_mov_r_r(&res_tmp, &tmp_op1);
947
                                    } else  {
qinsoon's avatar
qinsoon committed
948
949
                                        panic!("unexpected op2 (not ireg not iimm): {}", op2);
                                    }
950
951
                                } else {
                                    panic!("unexpected op1 (not ireg not mem): {}", op1);
qinsoon's avatar
qinsoon committed
952
953
954
                                }
                            },

qinsoon's avatar
shl    
qinsoon committed
955

qinsoon's avatar
qinsoon committed
956
957
                            // floating point
                            op::BinOp::FAdd => {
958
                                if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
qinsoon's avatar
qinsoon committed
959
960
961
                                    trace!("emit add-fpreg-mem");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
shl    
qinsoon committed
962
                                    let mem_op2 = self.emit_mem(&ops[op2], vm);
qinsoon's avatar
qinsoon committed
963
964
965
966
967

                                    // mov op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // sub op2 res
                                    self.backend.emit_addsd_f64_mem64(&res_tmp, &mem_op2);
968
969
970
971
972
973
974
975
976
977
                                } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
                                    trace!("emit add-fpreg-fpreg");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);

                                    // movsd op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_addsd_f64_f64(&res_tmp, &reg_op2);
qinsoon's avatar
qinsoon committed
978
                                } else {
qinsoon's avatar
qinsoon committed
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
                                    panic!("unexpected fadd: {}", node)
                                }
                            }

                            op::BinOp::FSub => {
                                if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-fpreg-mem");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let mem_op2 = self.emit_mem(&ops[op2], vm);

                                    // mov op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // sub op2 res
                                    self.backend.emit_subsd_f64_mem64(&res_tmp, &mem_op2);
                                } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
                                    trace!("emit sub-fpreg-fpreg");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);

                                    // movsd op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // sub op2 res
                                    self.backend.emit_subsd_f64_f64(&res_tmp, &reg_op2);
                                } else {
                                    panic!("unexpected fsub: {}", node)
                                }
                            }

                            op::BinOp::FMul => {
                                if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit mul-fpreg-mem");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let mem_op2 = self.emit_mem(&ops[op2], vm);

                                    // mov op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // mul op2 res
                                    self.backend.emit_mulsd_f64_mem64(&res_tmp, &mem_op2);
                                } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
                                    trace!("emit mul-fpreg-fpreg");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);

                                    // movsd op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // mul op2 res
                                    self.backend.emit_mulsd_f64_f64(&res_tmp, &reg_op2);
                                } else {
                                    panic!("unexpected fmul: {}", node)
                                }
                            }

                            op::BinOp::FDiv => {
                                if self.match_fpreg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit div-fpreg-mem");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let mem_op2 = self.emit_mem(&ops[op2], vm);

                                    // mov op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // div op2 res
                                    self.backend.emit_divsd_f64_mem64(&res_tmp, &mem_op2);
                                } else if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
                                    trace!("emit div-fpreg-fpreg");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);

                                    // movsd op1, res
                                    self.backend.emit_movsd_f64_f64(&res_tmp, &reg_op1);
                                    // div op2 res
                                    self.backend.emit_divsd_f64_f64(&res_tmp, &reg_op2);
                                } else {
                                    panic!("unexpected fdiv: {}", node)
qinsoon's avatar
qinsoon committed
1058
1059
                                }
                            }
qinsoon's avatar
qinsoon committed
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074

                            op::BinOp::FRem => {
                                if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
                                    trace!("emit frem-fpreg-fpreg");

                                    let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_fpreg(&ops[op2], f_content, f_context, vm);

                                    let reg_tmp = self.get_result_value(node);

                                    self.emit_runtime_entry(&entrypoints::FREM, vec![reg_op1.clone(), reg_op2.clone()], Some(vec![reg_tmp.clone()]), Some(node), f_content, f_context, vm);
                                } else {
                                    panic!("unexpected fdiv: {}", node)
                                }
                            }
1075
1076
                        }
                    }
qinsoon's avatar
qinsoon committed
1077
1078

                    Instruction_::ConvOp{operation, ref from_ty, ref to_ty, operand} => {
qinsoon's avatar
qinsoon committed
1079
1080
                        trace!("instsel on CONVOP");

qinsoon's avatar
qinsoon committed
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
                        let ops = inst.ops.read().unwrap();

                        let ref op = ops[operand];

                        match operation {
                            op::ConvOp::TRUNC => {
                                if self.match_ireg(op) {
                                    let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
                                    let tmp_res = self.get_result_value(node);

                                    // mov op -> result
qinsoon's avatar
qinsoon committed
1092
                                    self.backend.emit_mov_r_r(&tmp_res, &tmp_op);
qinsoon's avatar
qinsoon committed
1093
1094
1095
1096
1097
                                } else {
                                    panic!("unexpected op (expect ireg): {}", op);
                                }
                            }
                            op::ConvOp::ZEXT => {
1098
1099
1100
                                if self.match_ireg(op) {
                                    let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
                                    let tmp_res = self.get_result_value(node);
qinsoon's avatar
qinsoon committed
1101

1102
1103
1104
                                    // movz op -> result
                                    let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
                                    let to_ty_size   = vm.get_backend_type_info(to_ty.id()).size;
qinsoon's avatar
qinsoon committed
1105

1106
                                    if from_ty_size != to_ty_size {
qinsoon's avatar
qinsoon committed
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
                                        if from_ty_size == 4 && to_ty_size == 8 {
                                            // zero extend from 32 bits to 64 bits is a mov instruction
                                            // x86 does not have movzlq (32 to 64)

                                            // tmp_op is int32, but tmp_res is int64
                                            // we want to force a 32-to-32 mov, so high bits of the destination will be zeroed

                                            let tmp_res32 = unsafe {tmp_res.as_type(UINT32_TYPE.clone())};

                                            self.backend.emit_mov_r_r(&tmp_res32, &tmp_op);
                                        } else {
                                            self.backend.emit_movz_r_r(&tmp_res, &tmp_op);
                                        }
qinsoon's avatar
qinsoon committed
1120
                                    } else {
1121
                                        self.backend.emit_mov_r_r(&tmp_res, &tmp_op);
qinsoon's avatar
qinsoon committed
1122
                                    }
1123
1124
                                } else {
                                    panic!("unexpected op (expect ireg): {}", op);
qinsoon's avatar
qinsoon committed
1125
1126
1127
                                }
                            },
                            op::ConvOp::SEXT => {
1128
1129
1130
                                if self.match_ireg(op) {
                                    let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
                                    let tmp_res = self.get_result_value(node);
qinsoon's avatar
qinsoon committed
1131

1132
1133
1134
                                    // movs op -> result
                                    let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
                                    let to_ty_size   = vm.get_backend_type_info(to_ty.id()).size;
qinsoon's avatar
qinsoon committed
1135

1136
                                    if from_ty_size != to_ty_size {
qinsoon's avatar
qinsoon committed
1137
1138
                                        self.backend.emit_movs_r_r(&tmp_res, &tmp_op);
                                    } else {