GitLab will be upgraded on 30 Jan 2023 from 2.00 pm (AEDT) to 3.00 pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

mod.rs 20.4 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
// Copyright 2017 The Australian National University
2
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
3
4
5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
9
10
11
12
13
14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
/// A instruction selection pass. Uses simple tree pattern matching.
16
pub mod inst_sel;
qinsoon's avatar
qinsoon committed
17
/// A register allocation pass. Graph coloring.
18
pub mod reg_alloc;
qinsoon's avatar
qinsoon committed
19
/// A peephole optimization pass after register allocation.
20
pub mod peephole_opt;
qinsoon's avatar
qinsoon committed
21
/// Code emission pass. May as well emit dot graph for IR and generated code.
22
pub mod code_emission;
23

24
use std;
qinsoon's avatar
qinsoon committed
25
use utils::ByteSize;
26
use utils::math::align_up;
27
use runtime::mm;
28
use runtime::mm::common::gctype::{GCType, RefPattern, GCTYPE_INIT_ID};
29
use num::integer::lcm;
qinsoon's avatar
qinsoon committed
30

qinsoon's avatar
qinsoon committed
31
32
33
/// for ahead-of-time compilation (boot image making), the file contains a persisted VM,
/// a persisted heap, constants. This allows the VM to resume execution with
/// the same status as before persisting.
qinsoon's avatar
qinsoon committed
34
#[cfg(feature = "aot")]
qinsoon's avatar
qinsoon committed
35
pub const AOT_EMIT_CONTEXT_FILE: &'static str = "context.S";
36

37
pub const AOT_EMIT_SYM_TABLE_FILE: &'static str = "mu_sym_table.S";
38

qinsoon's avatar
qinsoon committed
39
// type alias to make backend code more readable
qinsoon's avatar
qinsoon committed
40
41
42
pub type Reg<'a> = &'a P<Value>;
pub type Mem<'a> = &'a P<Value>;

qinsoon's avatar
qinsoon committed
43
// re-export some common backend functions to allow target independent code
qinsoon's avatar
qinsoon committed
44

qinsoon's avatar
qinsoon committed
45
/// --- X86_64 backend ---
46
#[cfg(target_arch = "x86_64")]
47
#[path = "arch/x86_64/mod.rs"]
48
pub mod x86_64;
49

qinsoon's avatar
qinsoon committed
50
/// estimates how many machine instructions are needed for a Mu instruction
qinsoon's avatar
qinsoon committed
51
52
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
qinsoon's avatar
qinsoon committed
53
/// initializes machine registers in the function context
qinsoon's avatar
qinsoon committed
54
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
55
pub use compiler::backend::x86_64::init_machine_regs_for_func;
qinsoon's avatar
qinsoon committed
56
/// checks if two machine registers are alias (the same register)
57
58
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_aliased;
qinsoon's avatar
qinsoon committed
59
/// gets color for a machine register (e.g. AH, AX, EAX all have color of RAX)
qinsoon's avatar
qinsoon committed
60
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
61
pub use compiler::backend::x86_64::get_color_for_precolored;
qinsoon's avatar
qinsoon committed
62
/// returns the number of registers in a given RegGroup
qinsoon's avatar
qinsoon committed
63
#[cfg(target_arch = "x86_64")]
64
pub use compiler::backend::x86_64::number_of_usable_regs_in_group;
qinsoon's avatar
qinsoon committed
65
/// returns the number of all machine registers
qinsoon's avatar
qinsoon committed
66
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
67
pub use compiler::backend::x86_64::number_of_all_regs;
qinsoon's avatar
qinsoon committed
68
/// returns a hashmap of all the machine registers
qinsoon's avatar
qinsoon committed
69
70
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_regs;
qinsoon's avatar
qinsoon committed
71
/// returns all usable registers (machine registers that can be assigned to temporaries)
72
#[cfg(target_arch = "x86_64")]
73
pub use compiler::backend::x86_64::all_usable_regs;
qinsoon's avatar
qinsoon committed
74
/// returns RegGroup for a machine register
75
#[cfg(target_arch = "x86_64")]
76
pub use compiler::backend::x86_64::pick_group_for_reg;
qinsoon's avatar
qinsoon committed
77
/// checks if a register is callee saved
78
79
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_callee_saved;
qinsoon's avatar
qinsoon committed
80
/// number of callee saved registers
81
#[cfg(target_arch = "x86_64")]
82
pub use compiler::backend::x86_64::CALLEE_SAVED_COUNT;
qinsoon's avatar
qinsoon committed
83
/// gets offset for callee saved registers (used for exception table)
84
85
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_callee_saved_offset;
qinsoon's avatar
qinsoon committed
86
/// gets frame pointer for previous frame
87
88
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_frame_pointer;
qinsoon's avatar
qinsoon committed
89
/// gets return address for current frame
90
91
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_return_address;
qinsoon's avatar
qinsoon committed
92
/// sets frame pointer for previous frame
93
94
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_previous_frame_pointer;
qinsoon's avatar
qinsoon committed
95
/// sets return address for current frame
96
97
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_return_address;
qinsoon's avatar
qinsoon committed
98
/// gets staci pointer for previous frame
99
100
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_stack_pointer;
qinsoon's avatar
qinsoon committed
101
/// emits code for a function version (the function needs to be compiled first)
102
#[cfg(target_arch = "x86_64")]
103
pub use compiler::backend::x86_64::emit_code;
qinsoon's avatar
qinsoon committed
104
105
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
106
107
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context;
qinsoon's avatar
qinsoon committed
108
/// emits context with consideration of relocation info
109
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
110
pub use compiler::backend::x86_64::emit_context_with_reloc;
qinsoon's avatar
qinsoon committed
111
112
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
qinsoon's avatar
qinsoon committed
113
#[cfg(target_arch = "x86_64")]
114
pub use compiler::backend::x86_64::spill_rewrite;
qinsoon's avatar
qinsoon committed
115
116
117
118
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
119
120
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::call_stack_size;
qinsoon's avatar
qinsoon committed
121

qinsoon's avatar
qinsoon committed
122
/// --- aarch64 backend ---
123
124
125
126
#[cfg(target_arch = "aarch64")]
#[path = "arch/aarch64/mod.rs"]
pub mod aarch64;

qinsoon's avatar
qinsoon committed
127
/// estimates how many machine instructions are needed for a Mu instruction
128
129
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::estimate_insts_for_ir;
qinsoon's avatar
qinsoon committed
130
/// initializes machine registers in the function context
131
132
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::init_machine_regs_for_func;
qinsoon's avatar
qinsoon committed
133
/// checks if two machine registers are alias (the same register)
134
135
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_aliased;
qinsoon's avatar
qinsoon committed
136
/// gets color for a machine register
137
138
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_color_for_precolored;
qinsoon's avatar
qinsoon committed
139
/// returns the number of registers in a given RegGroup
140
#[cfg(target_arch = "aarch64")]
141
pub use compiler::backend::aarch64::number_of_usable_regs_in_group;
qinsoon's avatar
qinsoon committed
142
/// returns the number of all machine registers
143
144
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_all_regs;
qinsoon's avatar
qinsoon committed
145
/// returns a hashmap of all the machine registers
146
147
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_regs;
qinsoon's avatar
qinsoon committed
148
/// returns all usable registers (machine registers that can be assigned to temporaries)
149
150
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_usable_regs;
qinsoon's avatar
qinsoon committed
151
/// returns RegGroup for a machine register
152
153
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::pick_group_for_reg;
qinsoon's avatar
qinsoon committed
154
/// checks if a register is callee saved
155
156
157
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_callee_saved;
#[cfg(target_arch = "aarch64")]
qinsoon's avatar
qinsoon committed
158
pub use compiler::backend::aarch64::CALLEE_SAVED_COUNT;
159
160
161
162
163
164
165
166
167
168
169
170
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_callee_saved_offset;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_return_address;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_previous_stack_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_return_address;
qinsoon's avatar
qinsoon committed
171
/// emits code for a function version (the function needs to be compiled first)
172
#[cfg(target_arch = "aarch64")]
173
pub use compiler::backend::aarch64::emit_code;
qinsoon's avatar
qinsoon committed
174
175
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
176
177
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context;
qinsoon's avatar
qinsoon committed
178
/// emits context with consideration of relocation info
179
180
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context_with_reloc;
qinsoon's avatar
qinsoon committed
181
182
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
183
184
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::spill_rewrite;
qinsoon's avatar
qinsoon committed
185
#[cfg(target_arch = "aarch64")]
186
pub use compiler::backend::aarch64::ARGUMENT_GPRS;
qinsoon's avatar
qinsoon committed
187
#[cfg(target_arch = "aarch64")]
188
pub use compiler::backend::aarch64::ARGUMENT_FPRS;
189
190
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::call_stack_size;
qinsoon's avatar
qinsoon committed
191

qinsoon's avatar
qinsoon committed
192
use vm::VM;
qinsoon's avatar
qinsoon committed
193
194
use ast::types::*;
use ast::ptr::*;
195
use ast::ir::*;
196

qinsoon's avatar
qinsoon committed
197
198
199
200
201
202
203
/// BackendType describes storage type info for a MuType, including
/// size, alignment, struct layout, array element padded size, GC type.
///
/// We are compatible with C ABI, so that Mu objects can be accessed from
/// native without extra steps (though they need to be pinned first)
///
//  GCType is a temporary design, we will rewrite GC (Issue#12)
204
#[derive(Clone, Debug)]
qinsoon's avatar
qinsoon committed
205
pub struct BackendType {
qinsoon's avatar
qinsoon committed
206
207
    pub size: ByteSize,
    pub alignment: ByteSize,
qinsoon's avatar
qinsoon committed
208
    /// struct layout of the type, None if this is not a struct/hybrid type
209
    pub struct_layout: Option<Vec<ByteSize>>,
210
211
    /// element size for hybrid/array type
    pub elem_size: Option<ByteSize>,
qinsoon's avatar
qinsoon committed
212
213
    /// GC type, containing information for GC (this is a temporary design)
    /// See Issue#12
214
    pub gc_type: P<GCType>
qinsoon's avatar
qinsoon committed
215
216
}

qinsoon's avatar
qinsoon committed
217
218
219
220
221
rodal_struct!(BackendType {
    size,
    alignment,
    struct_layout,
    elem_size,
222
    gc_type
qinsoon's avatar
qinsoon committed
223
});
qinsoon's avatar
qinsoon committed
224

qinsoon's avatar
qinsoon committed
225
226
impl BackendType {
    /// gets field offset of a struct/hybrid type. Panics if this is not struct/hybrid type
qinsoon's avatar
qinsoon committed
227
228
229
230
231
232
233
234
    pub fn get_field_offset(&self, index: usize) -> ByteSize {
        if self.struct_layout.is_some() {
            let layout = self.struct_layout.as_ref().unwrap();
            layout[index]
        } else {
            panic!("trying to get field offset on a non-struct type")
        }
    }
qinsoon's avatar
qinsoon committed
235
236
237
238
239
240
241
242

    /// resolves a MuType to a BackendType
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    pub fn resolve(ty: &MuType, vm: &VM) -> BackendType {
        match ty.v {
            // integer
            MuType_::Int(size_in_bit) => {
                match size_in_bit {
qinsoon's avatar
qinsoon committed
243
244
245
246
247
                    1...8 => BackendType {
                        size: 1,
                        alignment: 1,
                        struct_layout: None,
                        elem_size: None,
248
                        gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
qinsoon's avatar
qinsoon committed
249
                    },
qinsoon's avatar
qinsoon committed
250
251
252
253
254
                    9...16 => BackendType {
                        size: 2,
                        alignment: 2,
                        struct_layout: None,
                        elem_size: None,
255
                        gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2))
qinsoon's avatar
qinsoon committed
256
                    },
qinsoon's avatar
qinsoon committed
257
258
259
260
261
                    17...32 => BackendType {
                        size: 4,
                        alignment: 4,
                        struct_layout: None,
                        elem_size: None,
262
                        gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
qinsoon's avatar
qinsoon committed
263
                    },
qinsoon's avatar
qinsoon committed
264
265
266
267
268
                    33...64 => BackendType {
                        size: 8,
                        alignment: 8,
                        struct_layout: None,
                        elem_size: None,
269
                        gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
qinsoon's avatar
qinsoon committed
270
271
                    },
                    128 => BackendType {
qinsoon's avatar
qinsoon committed
272
273
274
275
                        size: 16,
                        alignment: 16,
                        struct_layout: None,
                        elem_size: None,
276
                        gc_type: mm::add_gc_type(GCType::new_noreftype(16, 16))
qinsoon's avatar
qinsoon committed
277
                    },
278
                    _ => unimplemented!()
qinsoon's avatar
qinsoon committed
279
                }
qinsoon's avatar
qinsoon committed
280
            }
qinsoon's avatar
qinsoon committed
281
            // reference of any type
qinsoon's avatar
qinsoon committed
282
283
284
285
286
            MuType_::Ref(_) | MuType_::IRef(_) | MuType_::WeakRef(_) => BackendType {
                size: 8,
                alignment: 8,
                struct_layout: None,
                elem_size: None,
287
                gc_type: mm::add_gc_type(GCType::new_reftype())
qinsoon's avatar
qinsoon committed
288
289
            },
            // pointer/opque ref
qinsoon's avatar
qinsoon committed
290
291
292
293
294
295
296
297
298
            MuType_::UPtr(_) |
            MuType_::UFuncPtr(_) |
            MuType_::FuncRef(_) |
            MuType_::ThreadRef |
            MuType_::StackRef => BackendType {
                size: 8,
                alignment: 8,
                struct_layout: None,
                elem_size: None,
299
                gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
qinsoon's avatar
qinsoon committed
300
301
            },
            // tagref
302
            MuType_::Tagref64 => BackendType {
qinsoon's avatar
qinsoon committed
303
304
305
306
                size: 8,
                alignment: 8,
                struct_layout: None,
                elem_size: None,
307
                gc_type: mm::add_gc_type(GCType::new_reftype())
308
            },
qinsoon's avatar
qinsoon committed
309
            // floating point
qinsoon's avatar
qinsoon committed
310
311
312
313
314
            MuType_::Float => BackendType {
                size: 4,
                alignment: 4,
                struct_layout: None,
                elem_size: None,
315
                gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
qinsoon's avatar
qinsoon committed
316
317
            },
            MuType_::Double => BackendType {
qinsoon's avatar
qinsoon committed
318
319
320
321
                size: 8,
                alignment: 8,
                struct_layout: None,
                elem_size: None,
322
                gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
qinsoon's avatar
qinsoon committed
323
324
325
326
            },
            // array
            MuType_::Array(ref ty, len) => {
                let ele_ty = vm.get_backend_type_info(ty.id());
327
                let elem_size = ele_ty.size;
qinsoon's avatar
qinsoon committed
328
                let size = ele_ty.size * len;
329
                let align = ele_ty.alignment;
330

331
                // Acording to the AMD64 SYSV ABI Version 0.99.8,
qinsoon's avatar
qinsoon committed
332
333
334
                // a 'local or global array variable of at least 16 bytes ... always has
                // alignment of at least 16 bytes' However, if we apply this rule,
                // it will break 'Mu's array rule, hopefully C programs
335
                // won't care if we allocate a local or global which is incorrectly alligned
qinsoon's avatar
qinsoon committed
336
337
                // (A c function can't be sure a pointer to array that is passed to it is
                // a local or global so this is unlikely to break anything).
qinsoon's avatar
qinsoon committed
338

qinsoon's avatar
qinsoon committed
339
340
341
                BackendType {
                    size: size,
                    alignment: align,
qinsoon's avatar
qinsoon committed
342
                    struct_layout: None,
qinsoon's avatar
qinsoon committed
343
344
345
346
347
348
349
                    elem_size: Some(elem_size),
                    gc_type: mm::add_gc_type(GCType::new_fix(
                        GCTYPE_INIT_ID,
                        size,
                        align,
                        Some(RefPattern::Repeat {
                            pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
350
351
352
                            count: len
                        })
                    ))
qinsoon's avatar
qinsoon committed
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
                }
            }
            // struct
            MuType_::Struct(ref name) => {
                let read_lock = STRUCT_TAG_MAP.read().unwrap();
                let struc = read_lock.get(name).unwrap();
                let tys = struc.get_tys();

                trace!("layout struct: {}", struc);
                BackendType::layout_struct(tys, vm)
            }
            // hybrid
            // - align is the most strict aligned element (from all fix tys and var ty)
            // - size is fixed tys size
            // - layout is fixed tys layout
            MuType_::Hybrid(ref name) => {
                let read_lock = HYBRID_TAG_MAP.read().unwrap();
                let hybrid = read_lock.get(name).unwrap();

                let fix_tys = hybrid.get_fix_tys();
qinsoon's avatar
qinsoon committed
373
                let var_ty = hybrid.get_var_ty();
qinsoon's avatar
qinsoon committed
374
375
376
377
378
379

                // treat fix_tys as struct
                let mut ret = BackendType::layout_struct(fix_tys, vm);

                // treat var_ty as array (getting its alignment)
                let var_ele_ty = vm.get_backend_type_info(var_ty.id());
380
                let var_size = var_ele_ty.size;
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
381
                let var_align = var_ele_ty.alignment;
382
383
384
385
                ret.elem_size = Some(var_size);

                ret.alignment = lcm(ret.alignment, var_align);
                ret.size = align_up(ret.size, ret.alignment);
qinsoon's avatar
qinsoon committed
386
387
                let mut gctype = ret.gc_type.as_ref().clone();
                gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
388
                gctype.var_size = Some(var_size);
qinsoon's avatar
qinsoon committed
389
390
391
392
393
                ret.gc_type = mm::add_gc_type(gctype);

                ret
            }
            // void
qinsoon's avatar
qinsoon committed
394
395
396
397
398
            MuType_::Void => BackendType {
                size: 0,
                alignment: 1,
                struct_layout: None,
                elem_size: None,
399
                gc_type: mm::add_gc_type(GCType::new_noreftype(0, 1))
qinsoon's avatar
qinsoon committed
400
401
            },
            // vector
402
            MuType_::Vector(_, _) => unimplemented!()
qinsoon's avatar
qinsoon committed
403
404
405
406
407
408
        }
    }

    /// layouts struct fields
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendType {
qinsoon's avatar
qinsoon committed
409
410
411
        let mut offsets: Vec<ByteSize> = vec![];
        let mut cur: ByteSize = 0;
        let mut struct_align: ByteSize = 1;
qinsoon's avatar
qinsoon committed
412
413
414
415

        // for gc type
        let mut use_ref_offsets = true;
        let mut ref_offsets = vec![];
qinsoon's avatar
qinsoon committed
416
        let mut gc_types = vec![];
qinsoon's avatar
qinsoon committed
417
418
419
420
421
422

        for ty in tys.iter() {
            let ty_info = vm.get_backend_type_info(ty.id());
            trace!("examining field: {}, {:?}", ty, ty_info);

            let align = ty_info.alignment;
423
424
            struct_align = lcm(struct_align, align);
            cur = align_up(cur, align);
qinsoon's avatar
qinsoon committed
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
            offsets.push(cur);
            trace!("aligned to {}", cur);

            // for convenience, if the struct contains other struct/array
            // we do not use reference map
            if ty.is_aggregate() {
                use_ref_offsets = false;
            }

            // if this type is reference type, we store its offsets
            // we may not use this ref map though
            if ty.is_heap_reference() {
                ref_offsets.push(cur);
            }
            // always store its gc type (we may not use it as well)
            gc_types.push(ty_info.gc_type.clone());

            cur += ty_info.size;
        }

        // if we need padding at the end
446
        let size = align_up(cur, struct_align);
qinsoon's avatar
qinsoon committed
447
448

        BackendType {
qinsoon's avatar
qinsoon committed
449
450
            size: size,
            alignment: struct_align,
qinsoon's avatar
qinsoon committed
451
            struct_layout: Some(offsets),
452
            elem_size: None,
qinsoon's avatar
qinsoon committed
453
454
455
456
457
458
459
            gc_type: mm::add_gc_type(GCType::new_fix(
                GCTYPE_INIT_ID,
                size,
                struct_align,
                Some(if use_ref_offsets {
                    RefPattern::Map {
                        offsets: ref_offsets,
460
                        size: size
qinsoon's avatar
qinsoon committed
461
462
463
                    }
                } else {
                    RefPattern::NestedType(gc_types)
464
465
                })
            ))
qinsoon's avatar
qinsoon committed
466
467
468
469
470
471
472
473
474
475
476
        }
    }

    /// sequentially layout a few Mu types as if they are fields in a struct.
    /// Returns a triple of (size, alignment, offsets of each type)
    /// (when dealing with call convention, we use this function to layout stack arguments)
    pub fn sequential_layout(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, ByteSize, Vec<ByteSize>) {
        let ret = BackendType::layout_struct(tys, vm);

        (ret.size, ret.alignment, ret.struct_layout.unwrap())
    }
qinsoon's avatar
qinsoon committed
477
478
}

qinsoon's avatar
qinsoon committed
479
use std::fmt;
qinsoon's avatar
qinsoon committed
480
impl fmt::Display for BackendType {
qinsoon's avatar
qinsoon committed
481
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
qinsoon's avatar
qinsoon committed
482
483
484
485
486
487
        write!(
            f,
            "{} bytes ({} bytes aligned), ",
            self.size,
            self.alignment
        ).unwrap();
qinsoon's avatar
qinsoon committed
488
489
490
491
492
493
494
495
496
497
498
        if self.struct_layout.is_some() {
            use utils::vec_utils;

            let layout = self.struct_layout.as_ref().unwrap();
            write!(f, "field offsets: ({})", vec_utils::as_str(layout)).unwrap();
        }

        Ok(())
    }
}

qinsoon's avatar
qinsoon committed
499
/// RegGroup describes register class
500
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
qinsoon's avatar
qinsoon committed
501
502
503
504
505
506
pub enum RegGroup {
    /// general purpose register
    GPR,
    /// requires two general purpose register
    GPREX,
    /// floating point register
507
    FPR
qinsoon's avatar
qinsoon committed
508
}
509

qinsoon's avatar
qinsoon committed
510
rodal_enum!(RegGroup { GPR, GPREX, FPR });
511
512

impl RegGroup {
qinsoon's avatar
qinsoon committed
513
    /// gets RegGroup from a MuType
514
    pub fn get_from_ty(ty: &P<MuType>) -> RegGroup {
515
        match ty.v {
qinsoon's avatar
qinsoon committed
516
            // for now, only use 64bits registers
517
            MuType_::Int(len) if len <= 64 => RegGroup::GPR,
qinsoon's avatar
qinsoon committed
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
            MuType_::Int(len) if len == 128 => RegGroup::GPREX,

            MuType_::Ref(_) |
            MuType_::IRef(_) |
            MuType_::WeakRef(_) |
            MuType_::UPtr(_) |
            MuType_::ThreadRef |
            MuType_::StackRef |
            MuType_::Tagref64 |
            MuType_::FuncRef(_) |
            MuType_::UFuncPtr(_) => RegGroup::GPR,

            MuType_::Float => RegGroup::FPR,
            MuType_::Double => RegGroup::FPR,

533
            _ => unimplemented!()
534
535
        }
    }
536

qinsoon's avatar
qinsoon committed
537
    /// gets RegGroup from a Mu Value
538
539
540
    pub fn get_from_value(val: &P<Value>) -> RegGroup {
        RegGroup::get_from_ty(&val.ty)
    }
541
}
542

543
fn make_block_name(inst: &MuName, label: &str) -> MuName {
544
    Arc::new(format!("{}:{}", inst, label))
qinsoon's avatar
qinsoon committed
545
}