inst_sel.rs 52 KB
Newer Older
1
use ast::ir::*;
2
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
3
use ast::inst::Instruction;
4 5
use ast::inst::Destination;
use ast::inst::DestArg;
qinsoon's avatar
qinsoon committed
6
use ast::inst::Instruction_;
7
use ast::inst::MemoryOrder;
8
use ast::op;
qinsoon's avatar
qinsoon committed
9
use ast::types;
qinsoon's avatar
qinsoon committed
10
use ast::types::*;
qinsoon's avatar
qinsoon committed
11
use vm::VM;
qinsoon's avatar
qinsoon committed
12
use vm::CompiledFunction;
13
use runtime;
qinsoon's avatar
qinsoon committed
14
use runtime::mm;
15 16 17 18
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint;
19 20

use compiler::CompilerPass;
qinsoon's avatar
qinsoon committed
21 22 23
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
24

25 26
use std::collections::HashMap;

27
pub struct InstructionSelection {
28 29
    name: &'static str,
    
30 31 32
    backend: Box<CodeGenerator>,
    
    current_block: Option<MuName>
33 34
}

35 36 37 38
impl <'a> InstructionSelection {
    pub fn new() -> InstructionSelection {
        InstructionSelection{
            name: "Instruction Selection (x64)",
39 40
            backend: Box::new(ASMCodeGen::new()),
            current_block: None
41 42 43 44 45 46 47 48 49
        }
    }
    
    // in this pass, we assume that
    // 1. all temporaries will use 64bit registers
    // 2. we do not need to backup/restore caller-saved registers
    // 3. we need to backup/restore all the callee-saved registers
    // if any of these assumption breaks, we will need to re-emit the code
    #[allow(unused_variables)]
50
    fn instruction_select(&mut self, node: &'a TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
qinsoon's avatar
qinsoon committed
51 52 53
        trace!("instsel on node {}", node);
        
        match node.v {
54 55
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
qinsoon's avatar
qinsoon committed
56 57 58
                    Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
                        // move this to trace generation
                        // assert here
59 60 61 62 63
                        let (fallthrough_dest, branch_dest, branch_if_true) = {
                            if true_prob > 0.5f32 {
                                (true_dest, false_dest, false)
                            } else {
                                (false_dest, true_dest, true)
64
                            }
65
                        };
66
                        
qinsoon's avatar
qinsoon committed
67
                        let ops = inst.ops.read().unwrap();
68
                        
69 70
                        self.process_dest(&ops, fallthrough_dest, f_content, f_context, vm);
                        self.process_dest(&ops, branch_dest, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
71
                        
72
                        let branch_target = f_content.get_block(branch_dest.target).name().unwrap();
73 74 75
    
                        let ref cond = ops[cond];
                        
qinsoon's avatar
qinsoon committed
76 77
                        if self.match_cmp_res(cond) {
                            trace!("emit cmp_eq-branch2");
78
                            match self.emit_cmp_res(cond, f_content, f_context, vm) {
qinsoon's avatar
qinsoon committed
79 80 81 82 83 84 85 86 87 88
                                op::CmpOp::EQ => self.backend.emit_je(branch_target),
                                op::CmpOp::NE => self.backend.emit_jne(branch_target),
                                op::CmpOp::UGE => self.backend.emit_jae(branch_target),
                                op::CmpOp::UGT => self.backend.emit_ja(branch_target),
                                op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
                                op::CmpOp::ULT => self.backend.emit_jb(branch_target),
                                op::CmpOp::SGE => self.backend.emit_jge(branch_target),
                                op::CmpOp::SGT => self.backend.emit_jg(branch_target),
                                op::CmpOp::SLE => self.backend.emit_jle(branch_target),
                                op::CmpOp::SLT => self.backend.emit_jl(branch_target),
qinsoon's avatar
qinsoon committed
89 90 91 92
                                _ => unimplemented!()
                            }
                        } else if self.match_ireg(cond) {
                            trace!("emit ireg-branch2");
93
                            
94
                            let cond_reg = self.emit_ireg(cond, f_content, f_context, vm);
95
                            
qinsoon's avatar
qinsoon committed
96 97 98
                            // emit: cmp cond_reg 1
                            self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
                            // emit: je #branch_dest
qinsoon's avatar
qinsoon committed
99
                            self.backend.emit_je(branch_target);                            
qinsoon's avatar
qinsoon committed
100 101
                        } else {
                            unimplemented!();
102
                        }
103 104
                    },
                    
qinsoon's avatar
qinsoon committed
105
                    Instruction_::Branch1(ref dest) => {
qinsoon's avatar
qinsoon committed
106
                        let ops = inst.ops.read().unwrap();
107
                                            
108
                        self.process_dest(&ops, dest, f_content, f_context, vm);
109
                        
110
                        let target = f_content.get_block(dest.target).name().unwrap();
qinsoon's avatar
qinsoon committed
111
                        
qinsoon's avatar
qinsoon committed
112
                        trace!("emit branch1");
113
                        // jmp
qinsoon's avatar
qinsoon committed
114
                        self.backend.emit_jmp(target);
115 116
                    },
                    
qinsoon's avatar
qinsoon committed
117 118
                    Instruction_::ExprCall{ref data, is_abort} => {
                        trace!("deal with pre-call convention");
119
                        
qinsoon's avatar
qinsoon committed
120
                        let ops = inst.ops.read().unwrap();
121 122 123 124
                        let rets = inst.value.as_ref().unwrap();
                        let ref func = ops[data.func];
                        let ref func_sig = match func.v {
                            TreeNode_::Value(ref pv) => {
qinsoon's avatar
qinsoon committed
125 126 127 128
                                let ty : &MuType = &pv.ty;
                                match ty.v {
                                    MuType_::FuncRef(ref sig)
                                    | MuType_::UFuncPtr(ref sig) => sig,
129 130 131 132 133 134 135 136 137
                                    _ => panic!("expected funcref/ptr type")
                                }
                            },
                            _ => panic!("expected funcref/ptr type")
                        };
                        
                        debug_assert!(func_sig.ret_tys.len() == data.args.len());
                        debug_assert!(func_sig.arg_tys.len() == rets.len());
                                                
qinsoon's avatar
qinsoon committed
138
                        let mut gpr_arg_count = 0;
139
                        // TODO: let mut fpr_arg_count = 0;
140 141 142
                        for arg_index in data.args.iter() {
                            let ref arg = ops[*arg_index];
                            trace!("arg {}", arg);
qinsoon's avatar
qinsoon committed
143 144
                            
                            if self.match_ireg(arg) {
145
                                let arg = self.emit_ireg(arg, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
146
                                
147 148 149 150 151 152 153
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
154 155 156
                            } else if self.match_iimm(arg) {
                                let arg = self.emit_get_iimm(arg);
                                
157 158 159 160 161 162 163
                                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                                    self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], arg);
                                    gpr_arg_count += 1;
                                } else {
                                    // use stack to pass argument
                                    unimplemented!();
                                }
qinsoon's avatar
qinsoon committed
164 165
                            } else {
                                unimplemented!();
166
                            }
167 168
                        }
                        
169 170
                        // check direct call or indirect
                        if self.match_funcref_const(func) {
qinsoon's avatar
qinsoon committed
171 172
                            let target_id = self.emit_get_funcref_const(func);
                            let funcs = vm.funcs().read().unwrap();
qinsoon's avatar
qinsoon committed
173
                            let target = funcs.get(&target_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
174 175 176 177 178 179
                                                        
                            if vm.is_running() {
                                unimplemented!()
                            } else {
                                self.backend.emit_call_near_rel32(target.name().unwrap());
                            }
180
                        } else if self.match_ireg(func) {
181
                            let target = self.emit_ireg(func, f_content, f_context, vm);
182 183 184 185 186 187 188 189 190
                            
                            self.backend.emit_call_near_r64(&target);
                        } else if self.match_mem(func) {
                            let target = self.emit_mem(func);
                            
                            self.backend.emit_call_near_mem64(&target);
                        } else {
                            unimplemented!();
                        }
191
                        
qinsoon's avatar
qinsoon committed
192
                        // deal with ret vals
193
                        let mut gpr_ret_count = 0;
194
                        // TODO: let mut fpr_ret_count = 0;
195 196 197 198 199 200 201 202
                        for val in rets {
                            if val.is_int_reg() {
                                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                                    self.backend.emit_mov_r64_r64(&val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                                    gpr_ret_count += 1;
                                } else {
                                    // get return value by stack
                                    unimplemented!();
203
                                }
204 205 206
                            } else {
                                // floating point register
                                unimplemented!();
207
                            }
208
                        }
209 210 211
                    },
                    
                    Instruction_::Return(_) => {
212
                        self.emit_common_epilogue(inst, f_content, f_context, vm);
213
                        
qinsoon's avatar
qinsoon committed
214
                        self.backend.emit_ret();
215 216
                    },
                    
qinsoon's avatar
qinsoon committed
217
                    Instruction_::BinOp(op, op1, op2) => {
qinsoon's avatar
qinsoon committed
218
                        let ops = inst.ops.read().unwrap();
qinsoon's avatar
qinsoon committed
219
                        
220 221
                        match op {
                            op::BinOp::Add => {
qinsoon's avatar
qinsoon committed
222 223 224
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-ireg-ireg");
                                    
225 226
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
227 228 229 230 231 232 233 234 235
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit add-ireg-imm");
                                    
236
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
237 238 239 240 241 242 243 244 245 246 247 248 249
                                    let reg_op2 = self.emit_get_iimm(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
                                    self.backend.emit_add_r64_imm32(&res_tmp, reg_op2);
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit add-ireg-mem");
                                    
250
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
251 252 253 254 255 256 257 258 259 260 261 262 263
                                    let reg_op2 = self.emit_mem(&ops[op2]);
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_add_r64_mem64(&res_tmp, &reg_op2);
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
264 265
                            },
                            op::BinOp::Sub => {
qinsoon's avatar
qinsoon committed
266 267 268
                                if self.match_ireg(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-ireg-ireg");
                                    
269 270
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
                                    let reg_op2 = self.emit_ireg(&ops[op2], f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
271 272 273 274 275 276 277 278 279
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2 res
                                    self.backend.emit_sub_r64_r64(&res_tmp, &reg_op2);
                                } else if self.match_ireg(&ops[op1]) && self.match_iimm(&ops[op2]) {
                                    trace!("emit sub-ireg-imm");

280
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
281
                                    let imm_op2 = self.emit_get_iimm(&ops[op2]);
qinsoon's avatar
qinsoon committed
282 283 284 285 286
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // add op2, res
287
                                    self.backend.emit_sub_r64_imm32(&res_tmp, imm_op2);
qinsoon's avatar
qinsoon committed
288 289 290 291 292 293
                                } else if self.match_iimm(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit sub-imm-ireg");
                                    unimplemented!();
                                } else if self.match_ireg(&ops[op1]) && self.match_mem(&ops[op2]) {
                                    trace!("emit sub-ireg-mem");
                                    
294
                                    let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
295
                                    let mem_op2 = self.emit_mem(&ops[op2]);
qinsoon's avatar
qinsoon committed
296 297 298 299 300
                                    let res_tmp = self.emit_get_result(node);
                                    
                                    // mov op1, res
                                    self.backend.emit_mov_r64_r64(&res_tmp, &reg_op1);
                                    // sub op2 res
301
                                    self.backend.emit_sub_r64_mem64(&res_tmp, &mem_op2);
qinsoon's avatar
qinsoon committed
302 303 304 305 306 307
                                } else if self.match_mem(&ops[op1]) && self.match_ireg(&ops[op2]) {
                                    trace!("emit add-mem-ireg");
                                    unimplemented!();
                                } else {
                                    unimplemented!()
                                }
308 309
                            },
                            op::BinOp::Mul => {
310 311 312 313
                                // mov op1 -> rax
                                let rax = x86_64::RAX.clone();
                                let op1 = &ops[op1];
                                if self.match_ireg(op1) {
314
                                    let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
                                    
                                    self.backend.emit_mov_r64_r64(&rax, &reg_op1);
                                } else if self.match_iimm(op1) {
                                    let imm_op1 = self.emit_get_iimm(op1);
                                    
                                    self.backend.emit_mov_r64_imm32(&rax, imm_op1);
                                } else if self.match_mem(op1) {
                                    let mem_op1 = self.emit_mem(op1);
                                    
                                    self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mul op2 -> rax
                                let op2 = &ops[op2];
                                if self.match_ireg(op2) {
332
                                    let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
                                    
                                    self.backend.emit_mul_r64(&reg_op2);
                                } else if self.match_iimm(op2) {
                                    let imm_op2 = self.emit_get_iimm(op2);
                                    
                                    // put imm in a temporary
                                    // here we use result reg as temporary
                                    let res_tmp = self.emit_get_result(node);
                                    self.backend.emit_mov_r64_imm32(&res_tmp, imm_op2);
                                    
                                    self.backend.emit_mul_r64(&res_tmp);
                                } else if self.match_mem(op2) {
                                    let mem_op2 = self.emit_mem(op2);
                                    
                                    self.backend.emit_mul_mem64(&mem_op2);
                                } else {
                                    unimplemented!();
                                }
                                
                                // mov rax -> result
                                let res_tmp = self.emit_get_result(node);
                                self.backend.emit_mov_r64_r64(&res_tmp, &rax);
355 356 357
                            },
                            
                            _ => unimplemented!()
358 359
                        }
                    }
360
                    
361 362
                    // load on x64 generates mov inst (no matter what order is specified)
                    // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
363
                    Instruction_::Load{is_ptr, order, mem_loc} => {
qinsoon's avatar
qinsoon committed
364
                        let ops = inst.ops.read().unwrap();
365
                        let ref loc_op = ops[mem_loc];
366 367 368 369 370 371 372 373 374
                        
                        // check order
                        match order {
                            MemoryOrder::Relaxed 
                            | MemoryOrder::Consume 
                            | MemoryOrder::Acquire
                            | MemoryOrder::SeqCst => {},
                            _ => panic!("didnt expect order {:?} with store inst", order)
                        }                        
375 376 377 378 379 380

                        let resolved_loc = self.emit_get_mem(loc_op, vm);                        
                        let res_temp = self.emit_get_result(node);
                        
                        if self.match_ireg(node) {
                            // emit mov(GPR)
381 382 383 384 385 386 387 388
                            self.backend.emit_mov_r64_mem64(&res_temp, &resolved_loc);
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
                    
                    Instruction_::Store{is_ptr, order, mem_loc, value} => {
qinsoon's avatar
qinsoon committed
389
                        let ops = inst.ops.read().unwrap();
390 391 392 393 394 395 396 397 398 399 400 401 402 403
                        let ref loc_op = ops[mem_loc];
                        let ref val_op = ops[value];
                        
                        let generate_plain_mov : bool = {
                            match order {
                                MemoryOrder::Relaxed | MemoryOrder::Release => true,
                                MemoryOrder::SeqCst => false,
                                _ => panic!("didnt expect order {:?} with store inst", order)
                            }
                        };
                        
                        let resolved_loc = self.emit_get_mem(loc_op, vm);
                        
                        if self.match_ireg(val_op) {
404
                            let val = self.emit_ireg(val_op, f_content, f_context, vm);
405 406 407 408 409 410 411 412 413 414 415 416
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_r64(&resolved_loc, &val);
                            } else {
                                unimplemented!()
                            }
                        } else if self.match_iimm(val_op) {
                            let val = self.emit_get_iimm(val_op);
                            if generate_plain_mov {
                                self.backend.emit_mov_mem64_imm32(&resolved_loc, val);
                            } else {
                                unimplemented!()
                            }
417 418 419 420 421
                        } else {
                            // emit mov(FPR)
                            unimplemented!()
                        }
                    }
422
                    
423 424 425 426 427 428
                    Instruction_::GetIRef(op_index) => {
                        let ops = inst.ops.read().unwrap();
                        
                        let ref op = ops[op_index];
                        let res_tmp = self.emit_get_result(node);
                        
429 430 431 432 433 434
                        let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
                        if hdr_size == 0 {
                            self.emit_general_move(&op, &res_tmp, f_content, f_context, vm);
                        } else {
                            self.emit_lea_base_offset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
                        }
435 436
                    }
                    
437
                    Instruction_::ThreadExit => {
438
                        // emit a call to swap_back_to_native_stack(sp_loc: Address)
439 440 441
                        
                        // get thread local and add offset to get sp_loc
                        let tl = self.emit_get_threadlocal(f_content, f_context, vm);
442
                        self.backend.emit_add_r64_imm32(&tl, *thread::NATIVE_SP_LOC_OFFSET as i32);
443
                        
444
                        self.emit_runtime_entry(&entrypoints::SWAP_BACK_TO_NATIVE_STACK, vec![tl.clone()], None, f_content, f_context, vm);
445
                    }
qinsoon's avatar
qinsoon committed
446 447 448
                    
                    Instruction_::New(ref ty) => {
                        let ty_info = vm.get_backend_type_info(ty.id());
449 450
                        let ty_size = ty_info.size;
                        let ty_align= ty_info.alignment;
qinsoon's avatar
qinsoon committed
451
                        
452
                        if ty_size > mm::LARGE_OBJECT_THRESHOLD {
qinsoon's avatar
qinsoon committed
453 454 455
                            // emit large object allocation
                            unimplemented!()
                        } else {
456 457 458 459 460 461 462 463
                            // emit immix allocation fast path
                            
                            // ASM: %tl = get_thread_local()
                            let tmp_tl = self.emit_get_threadlocal(f_content, f_context, vm);
                            
                            // ASM: mov [%tl + allocator_offset + cursor_offset] -> %cursor
                            let cursor_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_CURSOR_OFFSET;
                            let tmp_cursor = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
464
                            self.emit_load_base_offset(&tmp_cursor, &tmp_tl, cursor_offset as i32, vm);
465 466 467
                            
                            // alignup cursor (cursor + align - 1 & !(align - 1))
                            // ASM: lea align-1(%cursor) -> %start
468
                            let align = ty_info.alignment as i32;
469
                            let tmp_start = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
470
                            self.emit_lea_base_offset(&tmp_start, &tmp_cursor, align - 1, vm);
471
                            // ASM: and %start, !(align-1) -> %start
472
                            self.backend.emit_and_r64_imm32(&tmp_start, !(align - 1));
473 474 475 476
                            
                            // bump cursor
                            // ASM: lea size(%start) -> %end
                            let tmp_end = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
477
                            self.emit_lea_base_offset(&tmp_end, &tmp_start, ty_size as i32, vm);
478 479 480 481
                            
                            // check with limit
                            // ASM: cmp %end, [%tl + allocator_offset + limit_offset]
                            let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
482
                            let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, runtime::ADDRESS_TYPE.clone(), vm);
483 484 485
                            self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
                            
                            // branch to slow path if end > limit
486
                            // ASM: jl alloc_slow
487
                            let slowpath = format!("{}_allocslow", node.id());
488
                            self.backend.emit_jl(slowpath.clone());
489 490
                            
                            // update cursor
491 492
                            // ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
                            self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
                            
                            // put start as result
                            // ASM: mov %start -> %result
                            let tmp_res = self.emit_get_result(node);
                            self.backend.emit_mov_r64_r64(&tmp_res, &tmp_start);
                            
                            // ASM jmp alloc_end
                            let allocend = format!("{}_allocend", node.id());
                            self.backend.emit_jmp(allocend.clone());
                            
                            // finishing current block
                            self.backend.end_block(self.current_block.as_ref().unwrap().clone());
                            
                            // alloc_slow: 
                            // call alloc_slow(size, align) -> %ret
                            // new block (no livein)
                            self.current_block = Some(slowpath.clone());
                            self.backend.start_block(slowpath.clone());
                            self.backend.set_block_livein(slowpath.clone(), &vec![]); 
512 513 514 515 516 517

                            // arg1: allocator address                            
                            let allocator_offset = *thread::ALLOCATOR_OFFSET;
                            let tmp_allocator = self.make_temporary(f_context, runtime::ADDRESS_TYPE.clone(), vm);
                            self.emit_lea_base_offset(&tmp_allocator, &tmp_tl, allocator_offset as i32, vm);
                            // arg2: size                            
518
                            let const_size = self.make_value_int_const(ty_size as u64, vm);
519
                            // arg3: align
520
                            let const_align= self.make_value_int_const(ty_align as u64, vm);
521
                            
522 523
                            let rets = self.emit_runtime_entry(
                                &entrypoints::ALLOC_SLOW,
524
                                vec![tmp_allocator, const_size, const_align],
525 526 527 528 529 530
                                Some(vec![
                                    tmp_res.clone()
                                ]),
                                f_content, f_context, vm
                            );
                            
531
                            // end block (no liveout other than result)
532 533 534 535 536 537
                            self.backend.end_block(slowpath.clone());
                            self.backend.set_block_liveout(slowpath.clone(), &vec![tmp_res.clone()]);
                            
                            // block: alloc_end
                            self.backend.start_block(allocend.clone());
                            self.current_block = Some(allocend.clone());
qinsoon's avatar
qinsoon committed
538 539
                        }
                    }
540 541 542 543 544 545
    
                    _ => unimplemented!()
                } // main switch
            },
            
            TreeNode_::Value(ref p) => {
546
        
547 548 549 550
            }
        }
    }
    
551 552 553 554
    fn make_temporary(&mut self, f_context: &mut FunctionContext, ty: P<MuType>, vm: &VM) -> P<Value> {
        f_context.make_temporary(vm.next_id(), ty).clone_value()
    }
    
555
    fn make_memory_op_base_offset (&mut self, base: &P<Value>, offset: i32, ty: P<MuType>, vm: &VM) -> P<Value> {
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
        P(Value{
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            ty: ty.clone(),
            v: Value_::Memory(MemoryLocation::Address{
                base: base.clone(),
                offset: Some(self.make_value_int_const(offset as u64, vm)),
                index: None,
                scale: None
            })
        })
    }
    
    fn make_value_int_const (&mut self, val: u64, vm: &VM) -> P<Value> {
        P(Value{
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            ty: runtime::UINT64_TYPE.clone(),
            v: Value_::Constant(Constant::Int(val))
        })
    } 
    
576
    fn emit_load_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
577 578 579 580 581
        let mem = self.make_memory_op_base_offset(base, offset, dest.ty.clone(), vm);
        
        self.backend.emit_mov_r64_mem64(dest, &mem);
    }
    
582
    fn emit_store_base_offset (&mut self, base: &P<Value>, offset: i32, src: &P<Value>, vm: &VM) {
583 584 585 586 587
        let mem = self.make_memory_op_base_offset(base, offset, src.ty.clone(), vm);
        
        self.backend.emit_mov_mem64_r64(&mem, src);
    }
    
588
    fn emit_lea_base_offset (&mut self, dest: &P<Value>, base: &P<Value>, offset: i32, vm: &VM) {
589 590 591 592 593
        let mem = self.make_memory_op_base_offset(base, offset, runtime::ADDRESS_TYPE.clone(), vm);
        
        self.backend.emit_lea_r64(dest, &mem);
    }
    
594
    fn emit_get_threadlocal (&mut self, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
595
        let mut rets = self.emit_runtime_entry(&entrypoints::GET_THREAD_LOCAL, vec![], None, f_content, f_context, vm);
596 597 598 599
        
        rets.pop().unwrap()
    }
    
600 601 602 603 604
    // ret: Option<Vec<P<Value>>
    // if ret is Some, return values will put stored in given temporaries
    // otherwise create temporaries
    // always returns result temporaries (given or created)
    fn emit_runtime_entry (&mut self, entry: &RuntimeEntrypoint, args: Vec<P<Value>>, rets: Option<Vec<P<Value>>>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> Vec<P<Value>> {
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
        let sig = entry.sig.clone();
        
        let entry_name = {
            if vm.is_running() {
                unimplemented!()
            } else {
                let ref entry_loc = entry.aot;
                
                match entry_loc {
                    &ValueLocation::Relocatable(_, ref name) => name.clone(),
                    _ => panic!("expecting a relocatable value")
                }
            }
        };
        
620
        self.emit_c_call(entry_name, sig, args, rets, f_content, f_context, vm)
621 622
    }
    
623
    #[allow(unused_variables)]
624 625 626 627
    // ret: Option<Vec<P<Value>>
    // if ret is Some, return values will put stored in given temporaries
    // otherwise create temporaries
    // always returns result temporaries (given or created)
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
    fn emit_c_call (&mut self, func_name: CName, sig: P<CFuncSig>, args: Vec<P<Value>>, rets: Option<Vec<P<Value>>>, 
        f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> Vec<P<Value>> 
    {
        let mut gpr_arg_count = 0;
        for arg in args.iter() {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
                    unimplemented!()
                }
            } else if arg.is_int_const() {
                if x86_64::is_valid_x86_imm(arg) {                
643
                    let int_const = arg.extract_int_const() as i32;
644 645 646 647 648 649 650 651 652 653 654 655
                    
                    if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                        self.backend.emit_mov_r64_imm32(&x86_64::ARGUMENT_GPRs[gpr_arg_count], int_const);
                        gpr_arg_count += 1;
                    } else {
                        // use stack to pass argument
                        unimplemented!()
                    }
                } else {
                    // put the constant to memory
                    unimplemented!()
                }
656 657 658 659 660 661 662 663
            } else if arg.is_mem() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_mem64(&x86_64::ARGUMENT_GPRs[gpr_arg_count], &arg);
                    gpr_arg_count += 1;
                } else {
                    // use stack to pass argument
                    unimplemented!()
                }
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
            } else {
                // floating point
                unimplemented!()
            }
        }
        
        // make call
        if vm.is_running() {
            unimplemented!()
        } else {
            self.backend.emit_call_near_rel32(func_name);
        }
        
        // deal with ret vals
        let mut return_vals = vec![];
        
        let mut gpr_ret_count = 0;
        for ret_index in 0..sig.ret_tys.len() {
            let ref ty = sig.ret_tys[ret_index];
            
            let ret_val = match rets {
                Some(ref rets) => rets[ret_index].clone(),
                None => {
                    let tmp_node = f_context.make_temporary(vm.next_id(), ty.clone());
                    tmp_node.clone_value()
                }
            };
            
            if ret_val.is_int_reg() {
                if gpr_ret_count < x86_64::RETURN_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&ret_val, &x86_64::RETURN_GPRs[gpr_ret_count]);
                    gpr_ret_count += 1;
                } else {
                    // get return value by stack
                    unimplemented!()
                }
            } else {
                // floating point register
                unimplemented!()
            }
            
            return_vals.push(ret_val);            
        }
        
        return_vals
    }
    
    #[allow(unused_variables)]
    fn process_dest(&mut self, ops: &Vec<P<TreeNode>>, dest: &Destination, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
713 714
        for i in 0..dest.args.len() {
            let ref dest_arg = dest.args[i];
715 716
            match dest_arg {
                &DestArg::Normal(op_index) => {
qinsoon's avatar
qinsoon committed
717
                    let ref arg = ops[op_index];
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
//                    match arg.op {
//                        OpCode::RegI64 
//                        | OpCode::RegFP
//                        | OpCode::IntImmI64
//                        | OpCode::FPImm => {
//                            // do nothing
//                        },
//                        _ => {
//                            trace!("nested: compute arg for branch");
//                            // nested: compute arg
//                            self.instruction_select(arg, cur_func);
//                            
//                            self.emit_get_result(arg);
//                        }
//                    }
//                    
734
                    let ref target_args = f_content.get_block(dest.target).content.as_ref().unwrap().args;
735 736
                    let ref target_arg = target_args[i];
                    
737
                    self.emit_general_move(&arg, target_arg, f_content, f_context, vm);
738 739 740 741
                },
                &DestArg::Freshbound(_) => unimplemented!()
            }
        }
qinsoon's avatar
qinsoon committed
742 743
    }
    
744
    fn emit_common_prologue(&mut self, args: &Vec<P<Value>>) {
745 746
        let block_name = "prologue".to_string();
        self.backend.start_block(block_name.clone());
747 748 749
        
        // no livein
        // liveout = entry block's args
750 751
        self.backend.set_block_livein(block_name.clone(), &vec![]);
        self.backend.set_block_liveout(block_name.clone(), args);
752
        
753 754 755
        // push rbp
        self.backend.emit_push_r64(&x86_64::RBP);
        // mov rsp -> rbp
qinsoon's avatar
qinsoon committed
756
        self.backend.emit_mov_r64_r64(&x86_64::RBP, &x86_64::RSP);
757
        
758
        // push all callee-saved registers
759 760 761 762 763 764
        for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            // not pushing rbp (as we have done taht)
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_push_r64(&reg);
            }
765 766 767 768
        }
        
        // unload arguments
        let mut gpr_arg_count = 0;
769
        // TODO: let mut fpr_arg_count = 0;
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
        for arg in args {
            if arg.is_int_reg() {
                if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
                    self.backend.emit_mov_r64_r64(&arg, &x86_64::ARGUMENT_GPRs[gpr_arg_count]);
                    gpr_arg_count += 1;
                } else {
                    // unload from stack
                    unimplemented!();
                }
            } else if arg.is_fp_reg() {
                unimplemented!();
            } else {
                panic!("expect an arg value to be either int reg or fp reg");
            }
        }
785 786
        
        self.backend.end_block(block_name);
787 788
    }
    
789
    fn emit_common_epilogue(&mut self, ret_inst: &Instruction, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
790 791
        // epilogue is not a block (its a few instruction inserted before return)
        // FIXME: this may change in the future
792
        
793
        // prepare return regs
qinsoon's avatar
qinsoon committed
794
        let ref ops = ret_inst.ops.read().unwrap();
795 796 797 798 799 800
        let ret_val_indices = match ret_inst.v {
            Instruction_::Return(ref vals) => vals,
            _ => panic!("expected ret inst")
        };
        
        let mut gpr_ret_count = 0;
801
        // TODO: let mut fpr_ret_count = 0;
802 803 804
        for i in ret_val_indices {
            let ref ret_val = ops[*i];
            if self.match_ireg(ret_val) {
805
                let reg_ret_val = self.emit_ireg(ret_val, f_content, f_context, vm);
806 807 808 809 810 811 812 813 814 815 816
                
                self.backend.emit_mov_r64_r64(&x86_64::RETURN_GPRs[gpr_ret_count], &reg_ret_val);
                gpr_ret_count += 1;
            } else if self.match_iimm(ret_val) {
                let imm_ret_val = self.emit_get_iimm(ret_val);
                
                self.backend.emit_mov_r64_imm32(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
                gpr_ret_count += 1;
            } else {
                unimplemented!();
            }
817 818 819 820 821 822 823 824
        }        
        
        // pop all callee-saved registers - reverse order
        for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
            let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
            if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
                self.backend.emit_pop_r64(&reg);
            }
825
        }
826 827 828
        
        // pop rbp
        self.backend.emit_pop_r64(&x86_64::RBP);
829 830
    }
    
qinsoon's avatar
qinsoon committed
831 832 833 834 835 836 837 838 839 840 841 842
    fn match_cmp_res(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                match inst.v {
                    Instruction_::CmpOp(_, _, _) => true,
                    _ => false
                }
            }
            TreeNode_::Value(_) => false
        }
    }
    
843
    fn emit_cmp_res(&mut self, cond: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> op::CmpOp {
qinsoon's avatar
qinsoon committed
844 845
        match cond.v {
            TreeNode_::Instruction(ref inst) => {
qinsoon's avatar
qinsoon committed
846
                let ops = inst.ops.read().unwrap();                
qinsoon's avatar
qinsoon committed
847 848 849 850 851 852 853 854
                
                match inst.v {
                    Instruction_::CmpOp(op, op1, op2) => {
                        let op1 = &ops[op1];
                        let op2 = &ops[op2];
                        
                        if op::is_int_cmp(op) {                        
                            if self.match_ireg(op1) && self.match_ireg(op2) {
855 856
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
                                let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
857 858 859
                                
                                self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
                            } else if self.match_ireg(op1) && self.match_iimm(op2) {
860
                                let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
                                let iimm_op2 = self.emit_get_iimm(op2);
                                
                                self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
                            } else {
                                unimplemented!()
                            }
                        } else {
                            unimplemented!()
                        }
                        
                        op
                    }
                    
                    _ => panic!("expect cmp res to emit")
                }
            }
            _ => panic!("expect cmp res to emit")
        }
    }    
    
qinsoon's avatar
qinsoon committed
881
    fn match_ireg(&mut self, op: &TreeNode) -> bool {
qinsoon's avatar
qinsoon committed
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        return false;
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    if types::is_scalar(&value.ty) {
                        true
                    } else {
                        false
                    }
                } else {
                    false
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.is_int_reg()
            }
        }
    }
    
907
    fn emit_ireg(&mut self, op: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
qinsoon's avatar
qinsoon committed
908 909
        match op.v {
            TreeNode_::Instruction(_) => {
910
                self.instruction_select(op, f_content, f_context, vm);
qinsoon's avatar
qinsoon committed
911 912 913 914 915
                
                self.emit_get_result(op)
            },
            TreeNode_::Value(ref pv) => {
                match pv.v {
916
                    Value_::Constant(_)
917
                    | Value_::Global(_)
918
                    | Value_::Memory(_) => panic!("expected ireg"),
qinsoon's avatar
qinsoon committed
919 920
                    Value_::SSAVar(_) => {
                        pv.clone()
qinsoon's avatar
qinsoon committed
921
                    },
qinsoon's avatar
qinsoon committed
922 923 924 925 926
                }
            }
        }
    }
    
927
    #[allow(unused_variables)]
928 929 930 931
    fn match_fpreg(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
932 933 934 935 936 937 938
    fn match_iimm(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) if x86_64::is_valid_x86_imm(pv) => true,
            _ => false
        }
    }
    
939
    fn emit_get_iimm(&mut self, op: &P<TreeNode>) -> i32 {
qinsoon's avatar
qinsoon committed
940 941 942 943
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::Int(val)) => {
944
                        val as i32
qinsoon's avatar
qinsoon committed
945 946 947 948 949 950 951 952
                    },
                    _ => panic!("expected iimm")
                }
            },
            _ => panic!("expected iimm")
        }
    }
    
qinsoon's avatar
qinsoon committed
953
    fn emit_get_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
954 955 956 957
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::SSAVar(_) => P(Value{
958
                        hdr: MuEntityHeader::unnamed(vm.next_id()),
959 960 961 962 963 964 965 966
                        ty: types::get_referent_ty(& pv.ty).unwrap(),
                        v: Value_::Memory(MemoryLocation::Address{
                            base: pv.clone(),
                            offset: None,
                            index: None,
                            scale: None
                        })
                    }),
967
                    Value_::Global(_) => {
968 969 970 971 972 973
                        if vm.is_running() {
                            // get address from vm
                            unimplemented!()
                        } else {
                            // symbolic
                            P(Value{
974
                                hdr: MuEntityHeader::unnamed(vm.next_id()),
975 976 977
                                ty: types::get_referent_ty(&pv.ty).unwrap(),
                                v: Value_::Memory(MemoryLocation::Symbolic{
                                    base: Some(x86_64::RIP.clone()),
978
                                    label: pv.name().unwrap()
979 980 981 982 983 984 985 986
                                })
                            })
                        }
                    },
                    Value_::Memory(_) => pv.clone(),
                    Value_::Constant(_) => unimplemented!()
                }
            }
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
            TreeNode_::Instruction(_) => self.emit_get_mem_from_inst(op, vm)
        }
    }
    
    fn emit_get_mem_from_inst(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
        match op.v {
            TreeNode_::Instruction(ref inst) => {
                let ref ops = inst.ops.read().unwrap();
                
                match inst.v {
                    Instruction_::GetIRef(op_index) => {
                        let ref op = ops[op_index];
                        
                        self.make_memory_op_base_offset(&op.clone_value(), mm::objectmodel::OBJECT_HEADER_SIZE as i32, runtime::ADDRESS_TYPE.clone(), vm) 
                    }
                    _ => unimplemented!()
                }
            },
            _ => panic!("expecting a instruction that yields a memory address")
1006 1007 1008
        }
    }
    
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
    fn match_funcref_const(&mut self, op: &P<TreeNode>) -> bool {
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
                    Value_::Constant(Constant::FuncRef(_)) => true,
                    Value_::Constant(Constant::UFuncRef(_)) => true,
                    _ => false
                }
            },
            _ => false 
        }
    }
    
qinsoon's avatar
qinsoon committed
1022
    fn emit_get_funcref_const(&mut self, op: &P<TreeNode>) -> MuID {
1023 1024 1025
        match op.v {
            TreeNode_::Value(ref pv) => {
                match pv.v {
qinsoon's avatar
qinsoon committed
1026 1027
                    Value_::Constant(Constant::FuncRef(id))
                    | Value_::Constant(Constant::UFuncRef(id)) => id,
1028 1029 1030 1031 1032 1033 1034
                    _ => panic!("expected a (u)funcref const")
                }
            },
            _ => panic!("expected a (u)funcref const")
        }
    }
    
1035
    #[allow(unused_variables)]
1036 1037 1038 1039
    fn match_mem(&mut self, op: &P<TreeNode>) -> bool {
        unimplemented!()
    }
    
1040
    #[allow(unused_variables)]
1041 1042 1043 1044
    fn emit_mem(&mut self, op: &P<TreeNode>) -> P<Value> {
        unimplemented!()
    }
    
qinsoon's avatar
qinsoon committed
1045
    fn emit_get_result(&mut self, node: &TreeNode) -> P<Value> {
qinsoon's avatar
qinsoon committed
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
        match node.v {
            TreeNode_::Instruction(ref inst) => {
                if inst.value.is_some() {
                    if inst.value.as_ref().unwrap().len() > 1 {
                        panic!("expected ONE result from the node {}", node);
                    }
                    
                    let ref value = inst.value.as_ref().unwrap()[0];
                    
                    value.clone()
                } else {
                    panic!("expected result from the node {}", node);
                }
            }
            
            TreeNode_::Value(ref pv) => {
                pv.clone()
            }
        }
1065 1066
    }
    
1067
    fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
1068 1069 1070 1071
        let ref dst_ty = dest.ty;
        
        if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            if self.match_ireg(src) {
1072
                let src_reg = self.emit_ireg(src, f_content, f_context, vm);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
                self.backend.emit_mov_r64_r64(dest, &src_reg);
            } else if self.match_iimm(src) {
                let src_imm = self.emit_get_iimm(src);
                self.backend.emit_mov_r64_imm32(dest, src_imm);
            } else {
                panic!("expected an int type op");
            }
        } else if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
            unimplemented!()
        } else {
            panic!("unexpected type for move");
        } 
    }
1086
}
1087

1088 1089 1090
impl CompilerPass for InstructionSelection {
    fn name(&self) -> &'static str {
        self.name
1091
    }
1092

1093
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
1094
    fn start_function(&mut self, vm: &VM, func_ver: &mut MuFunctionVersion) {
1095
        debug!("{}", self.name());
1096
        
qinsoon's avatar
qinsoon committed
1097
        let funcs = vm.funcs().read().unwrap();
qinsoon's avatar
qinsoon committed
1098
        let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
1099
        self.backend.start_code(func.name().unwrap());
1100 1101
        
        // prologue (get arguments from entry block first)        
qinsoon's avatar
qinsoon committed
1102
        let entry_block = func_ver.content.as_ref().unwrap().get_entry_block();
1103 1104
        let ref args = entry_block.content.as_ref().unwrap().args;
        self.emit_common_prologue(args);
1105 1106 1107
    }

    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
1108
    fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
1109 1110
        let f_content = func.content.as_ref().unwrap();
        
qinsoon's avatar
qinsoon committed
1111
        for block_id in func.block_trace.as_ref().unwrap() {
1112
            let block = f_content.get_block(*block_id);
1113
            let block_label = block.name().unwrap();
1114
            
1115
            self.backend.start_block(block_label.clone());
1116
            self.current_block = Some(block_label.clone());
1117

1118
            let block_content = block.content.as_ref().unwrap();
1119 1120
            
            // live in is args of the block
1121
            self.backend.set_block_livein(block_label.clone(), &block_content.args);
1122 1123 1124
            
            // live out is the union of all branch args of this block
            let live_out = block_content.get_out_arguments();
1125

1126
            for inst in block_content.body.iter() {
1127
                self.instruction_select(&inst, f_content, &mut func.context, vm);
1128
            }
1129
            
1130 1131 1132 1133 1134 1135 1136 1137
            // we may start block a, and end with block b (instruction selection may create blocks)
            // we set liveout to current block 
            {
                let current_block = self.current_block.as_ref().unwrap();
                self.backend.set_block_liveout(current_block.clone(), &live_out);
                self.backend.end_block(current_block.clone());
            }            
            self.current_block = None;
1138 1139
        }
    }
1140 1141
    
    #[allow(unused_variables)]
qinsoon's avatar
qinsoon committed
1142
    fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
1143 1144
        self.backend.print_cur_code();
        
1145 1146
        let mc = self.backend.finish_code();
        let compiled_func = CompiledFunction {
qinsoon's avatar
qinsoon committed
1147
            func_id: func.func_id,
1148
            func_ver_id: func.id(),
1149
            temps: HashMap::new(),
1150 1151 1152
            mc: mc
        };
        
qinsoon's avatar
qinsoon committed
1153
        vm.add_compiled_func(compiled_func);
1154
    }
1155
}