WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.6% of users enabled 2FA.

mod.rs 21 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
// Copyright 2017 The Australian National University
2
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
3 4 5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
9 10 11 12 13 14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
/// A instruction selection pass. Uses simple tree pattern matching.
16
pub mod inst_sel;
qinsoon's avatar
qinsoon committed
17
/// A register allocation pass. Graph coloring.
18
pub mod reg_alloc;
qinsoon's avatar
qinsoon committed
19
/// A peephole optimization pass after register allocation.
20
pub mod peephole_opt;
qinsoon's avatar
qinsoon committed
21
/// Code emission pass. May as well emit dot graph for IR and generated code.
22
pub mod code_emission;
23

24
use std;
qinsoon's avatar
qinsoon committed
25
use utils::ByteSize;
26
use utils::math::align_up;
27
use runtime::mm;
qinsoon's avatar
qinsoon committed
28
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
29
use num::integer::lcm;
qinsoon's avatar
qinsoon committed
30

qinsoon's avatar
qinsoon committed
31 32 33
/// for ahead-of-time compilation (boot image making), the file contains a persisted VM, a persisted
/// heap, constants. This allows the VM to resume execution with the same status as before persisting.
#[cfg(feature = "aot")]
34
pub const AOT_EMIT_CONTEXT_FILE : &'static str = "context.S";
35

qinsoon's avatar
qinsoon committed
36
/// name for prologue (this is not full name, but prologue name is generated from this)
qinsoon's avatar
qinsoon committed
37
pub const PROLOGUE_BLOCK_NAME: &'static str = "prologue";
qinsoon's avatar
qinsoon committed
38
/// name for epilogue (this is not full name, but epilogue name is generated from this)
qinsoon's avatar
qinsoon committed
39 40
pub const EPILOGUE_BLOCK_NAME: &'static str = "epilogue";

qinsoon's avatar
qinsoon committed
41
// type alias to make backend code more readable
qinsoon's avatar
qinsoon committed
42 43 44
pub type Reg<'a> = &'a P<Value>;
pub type Mem<'a> = &'a P<Value>;

qinsoon's avatar
qinsoon committed
45
// re-export some common backend functions to allow target independent code
qinsoon's avatar
qinsoon committed
46

qinsoon's avatar
qinsoon committed
47
/// --- X86_64 backend ---
48
#[cfg(target_arch = "x86_64")]
49
#[path = "arch/x86_64/mod.rs"]
50
pub mod x86_64;
51

qinsoon's avatar
qinsoon committed
52
/// estimates how many machine instructions are needed for a Mu instruction
qinsoon's avatar
qinsoon committed
53 54
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
qinsoon's avatar
qinsoon committed
55
/// initializes machine registers in the function context
qinsoon's avatar
qinsoon committed
56
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
57
pub use compiler::backend::x86_64::init_machine_regs_for_func;
qinsoon's avatar
qinsoon committed
58
/// checks if two machine registers are alias (the same register)
59 60
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_aliased;
qinsoon's avatar
qinsoon committed
61
/// gets color for a machine register (e.g. AH, AX, EAX all have color of RAX)
qinsoon's avatar
qinsoon committed
62
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
63
pub use compiler::backend::x86_64::get_color_for_precolored;
qinsoon's avatar
qinsoon committed
64
/// returns the number of registers in a given RegGroup
qinsoon's avatar
qinsoon committed
65
#[cfg(target_arch = "x86_64")]
66
pub use compiler::backend::x86_64::number_of_usable_regs_in_group;
qinsoon's avatar
qinsoon committed
67
/// returns the number of all machine registers
qinsoon's avatar
qinsoon committed
68
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
69
pub use compiler::backend::x86_64::number_of_all_regs;
qinsoon's avatar
qinsoon committed
70
/// returns a hashmap of all the machine registers
qinsoon's avatar
qinsoon committed
71 72
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_regs;
qinsoon's avatar
qinsoon committed
73
/// returns all usable registers (machine registers that can be assigned to temporaries)
74
#[cfg(target_arch = "x86_64")]
75
pub use compiler::backend::x86_64::all_usable_regs;
qinsoon's avatar
qinsoon committed
76
/// returns RegGroup for a machine register
77
#[cfg(target_arch = "x86_64")]
78
pub use compiler::backend::x86_64::pick_group_for_reg;
qinsoon's avatar
qinsoon committed
79
/// checks if a register is callee saved
80 81
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_callee_saved;
qinsoon's avatar
qinsoon committed
82
/// number of callee saved registers
83
#[cfg(target_arch = "x86_64")]
84
pub use compiler::backend::x86_64::CALLEE_SAVED_COUNT;
qinsoon's avatar
qinsoon committed
85
/// gets offset for callee saved registers (used for exception table)
86 87
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_callee_saved_offset;
qinsoon's avatar
qinsoon committed
88
/// gets frame pointer for previous frame
89 90
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_frame_pointer;
qinsoon's avatar
qinsoon committed
91
/// gets return address for current frame
92 93
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_return_address;
qinsoon's avatar
qinsoon committed
94
/// sets frame pointer for previous frame
95 96
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_previous_frame_pointer;
qinsoon's avatar
qinsoon committed
97
/// sets return address for current frame
98 99
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_return_address;
qinsoon's avatar
qinsoon committed
100
/// gets staci pointer for previous frame
101 102
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_stack_pointer;
qinsoon's avatar
qinsoon committed
103
/// emits code for a function version (the function needs to be compiled first)
104
#[cfg(target_arch = "x86_64")]
105
pub use compiler::backend::x86_64::emit_code;
qinsoon's avatar
qinsoon committed
106 107
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
108 109
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context;
qinsoon's avatar
qinsoon committed
110
/// emits context with consideration of relocation info
111
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
112
pub use compiler::backend::x86_64::emit_context_with_reloc;
qinsoon's avatar
qinsoon committed
113 114
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
qinsoon's avatar
qinsoon committed
115
#[cfg(target_arch = "x86_64")]
116
pub use compiler::backend::x86_64::spill_rewrite;
qinsoon's avatar
qinsoon committed
117 118 119 120
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
qinsoon's avatar
qinsoon committed
121

qinsoon's avatar
qinsoon committed
122
/// --- aarch64 backend ---
123 124 125 126
#[cfg(target_arch = "aarch64")]
#[path = "arch/aarch64/mod.rs"]
pub mod aarch64;

qinsoon's avatar
qinsoon committed
127
/// estimates how many machine instructions are needed for a Mu instruction
128 129
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::estimate_insts_for_ir;
qinsoon's avatar
qinsoon committed
130
/// initializes machine registers in the function context
131 132
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::init_machine_regs_for_func;
qinsoon's avatar
qinsoon committed
133
/// checks if two machine registers are alias (the same register)
134 135
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_aliased;
qinsoon's avatar
qinsoon committed
136
/// gets color for a machine register
137 138
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_color_for_precolored;
qinsoon's avatar
qinsoon committed
139
/// returns the number of registers in a given RegGroup
140
#[cfg(target_arch = "aarch64")]
141
pub use compiler::backend::aarch64::number_of_usable_regs_in_group;
qinsoon's avatar
qinsoon committed
142
/// returns the number of all machine registers
143 144
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_all_regs;
qinsoon's avatar
qinsoon committed
145
/// returns a hashmap of all the machine registers
146 147
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_regs;
qinsoon's avatar
qinsoon committed
148
/// returns all usable registers (machine registers that can be assigned to temporaries)
149 150
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_usable_regs;
qinsoon's avatar
qinsoon committed
151
/// returns RegGroup for a machine register
152 153
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::pick_group_for_reg;
qinsoon's avatar
qinsoon committed
154
/// checks if a register is callee saved
155 156 157
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_callee_saved;
#[cfg(target_arch = "aarch64")]
158 159 160 161 162 163 164 165 166 167 168 169 170
pub use compiler::backend::aarch64::CALLEE_SAVED_COUNT ;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_callee_saved_offset;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_return_address;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_previous_stack_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_return_address;
qinsoon's avatar
qinsoon committed
171
/// emits code for a function version (the function needs to be compiled first)
172
#[cfg(target_arch = "aarch64")]
173
pub use compiler::backend::aarch64::emit_code;
qinsoon's avatar
qinsoon committed
174 175
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
176 177
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context;
qinsoon's avatar
qinsoon committed
178
/// emits context with consideration of relocation info
179 180
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context_with_reloc;
qinsoon's avatar
qinsoon committed
181 182
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
183 184
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::spill_rewrite;
qinsoon's avatar
qinsoon committed
185
#[cfg(target_arch = "aarch64")]
186
pub use compiler::backend::aarch64::ARGUMENT_GPRS;
qinsoon's avatar
qinsoon committed
187
#[cfg(target_arch = "aarch64")]
188
pub use compiler::backend::aarch64::ARGUMENT_FPRS;
qinsoon's avatar
qinsoon committed
189

qinsoon's avatar
qinsoon committed
190
use vm::VM;
qinsoon's avatar
qinsoon committed
191 192
use ast::types::*;
use ast::ptr::*;
193
use ast::ir::*;
194

qinsoon's avatar
qinsoon committed
195 196 197 198 199 200 201
/// BackendType describes storage type info for a MuType, including
/// size, alignment, struct layout, array element padded size, GC type.
///
/// We are compatible with C ABI, so that Mu objects can be accessed from
/// native without extra steps (though they need to be pinned first)
///
//  GCType is a temporary design, we will rewrite GC (Issue#12)
202
#[derive(Clone, Debug)]
qinsoon's avatar
qinsoon committed
203
pub struct BackendType {
qinsoon's avatar
qinsoon committed
204 205
    pub size: ByteSize,
    pub alignment: ByteSize,
qinsoon's avatar
qinsoon committed
206
    /// struct layout of the type, None if this is not a struct/hybrid type
207
    pub struct_layout: Option<Vec<ByteSize>>,
208 209
    /// element size for hybrid/array type
    pub elem_size: Option<ByteSize>,
qinsoon's avatar
qinsoon committed
210 211
    /// GC type, containing information for GC (this is a temporary design)
    /// See Issue#12
212
    pub gc_type: P<GCType>
qinsoon's avatar
qinsoon committed
213 214
}

215
rodal_struct!(BackendType{size, alignment, struct_layout, elem_size, gc_type});
qinsoon's avatar
qinsoon committed
216

qinsoon's avatar
qinsoon committed
217 218
impl BackendType {
    /// gets field offset of a struct/hybrid type. Panics if this is not struct/hybrid type
qinsoon's avatar
qinsoon committed
219 220 221 222 223 224 225 226
    pub fn get_field_offset(&self, index: usize) -> ByteSize {
        if self.struct_layout.is_some() {
            let layout = self.struct_layout.as_ref().unwrap();
            layout[index]
        } else {
            panic!("trying to get field offset on a non-struct type")
        }
    }
qinsoon's avatar
qinsoon committed
227 228 229 230 231 232 233 234

    /// resolves a MuType to a BackendType
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    pub fn resolve(ty: &MuType, vm: &VM) -> BackendType {
        match ty.v {
            // integer
            MuType_::Int(size_in_bit) => {
                match size_in_bit {
235
                    1 ... 8   => BackendType{
236
                        size: 1, alignment: 1, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
237 238
                        gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
                    },
239
                    9 ... 16  => BackendType{
240
                        size: 2, alignment: 2, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
241 242
                        gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2))
                    },
243
                    17 ... 32 => BackendType{
244
                        size: 4, alignment: 4, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
245 246
                        gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
                    },
247
                    33 ... 64 => BackendType{
248
                        size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
249 250 251
                        gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
                    },
                    128 => BackendType {
252
                        size: 16, alignment: 16, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
253 254 255 256 257 258 259 260 261
                        gc_type: mm::add_gc_type(GCType::new_noreftype(16, 16))
                    },
                    _ => unimplemented!()
                }
            },
            // reference of any type
            MuType_::Ref(_)
            | MuType_::IRef(_)
            | MuType_::WeakRef(_) => BackendType{
262
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
263 264 265 266 267 268 269 270
                gc_type: mm::add_gc_type(GCType::new_reftype())
            },
            // pointer/opque ref
            MuType_::UPtr(_)
            | MuType_::UFuncPtr(_)
            | MuType_::FuncRef(_)
            | MuType_::ThreadRef
            | MuType_::StackRef => BackendType{
271
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
272 273 274
                gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
            },
            // tagref
275
            MuType_::Tagref64 => BackendType {
276
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
277 278
                gc_type: mm::add_gc_type(GCType::new_reftype())
            },
qinsoon's avatar
qinsoon committed
279 280
            // floating point
            MuType_::Float => BackendType{
281
                size: 4, alignment: 4, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
282 283 284
                gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
            },
            MuType_::Double => BackendType {
285
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
286 287 288 289 290
                gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
            },
            // array
            MuType_::Array(ref ty, len) => {
                let ele_ty = vm.get_backend_type_info(ty.id());
291 292 293 294 295 296 297 298 299 300 301 302 303
                let elem_size = ele_ty.size;
                let mut size = ele_ty.size*len;
                let mut align = ele_ty.alignment;

                if cfg!(target_arch = "x86_64") && size >= 16 {
                    // Acording to the AMD64 SYSV ABI Version 0.99.8,
                    // a 'local or global array variable of at least 16 bytes ... always has alignment of at least 16 bytes'
                    // An array may be allocated in different ways, and whether it is possible for one to count as local
                    // or global variables is unknown.
                    // So to be safe, we assume this rule always applies for all array allocations.
                    align = lcm(align, 16);
                    size = align_up(size, align);
                }
qinsoon's avatar
qinsoon committed
304 305

                BackendType{
306 307
                    size         : size,
                    alignment    : align,
qinsoon's avatar
qinsoon committed
308
                    struct_layout: None,
309 310
                    elem_size : Some(elem_size),
                    gc_type      : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID, size, align,
qinsoon's avatar
qinsoon committed
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
                                                                   Some(RefPattern::Repeat{
                                                                       pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
                                                                       count  : len
                                                                   })
                    ))
                }
            }
            // struct
            MuType_::Struct(ref name) => {
                let read_lock = STRUCT_TAG_MAP.read().unwrap();
                let struc = read_lock.get(name).unwrap();
                let tys = struc.get_tys();

                trace!("layout struct: {}", struc);
                BackendType::layout_struct(tys, vm)
            }
            // hybrid
            // - align is the most strict aligned element (from all fix tys and var ty)
            // - size is fixed tys size
            // - layout is fixed tys layout
            MuType_::Hybrid(ref name) => {
                let read_lock = HYBRID_TAG_MAP.read().unwrap();
                let hybrid = read_lock.get(name).unwrap();

                let fix_tys = hybrid.get_fix_tys();
                let var_ty  = hybrid.get_var_ty();

                // treat fix_tys as struct
                let mut ret = BackendType::layout_struct(fix_tys, vm);

                // treat var_ty as array (getting its alignment)
                let var_ele_ty = vm.get_backend_type_info(var_ty.id());
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
                let var_size = var_ele_ty.size;
                ret.elem_size = Some(var_size);

                let var_align = if cfg!(target_arch = "x86_64") {
                    // Acording to the AMD64 SYSV ABI Version 0.99.8,
                    // a 'a C99 variable-length array variable always has alignment of at least 16 bytes'
                    // Whether the var part of hybrid counts as a variable-length array is unknown,
                    // so to be safe, we assume this rule always applies to the hybrids var part
                    lcm(var_ele_ty.alignment, 16)
                } else {
                    var_ele_ty.alignment
                };

                ret.alignment = lcm(ret.alignment, var_align);
                ret.size = align_up(ret.size, ret.alignment);
qinsoon's avatar
qinsoon committed
358 359
                let mut gctype = ret.gc_type.as_ref().clone();
                gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
360
                gctype.var_size = Some(var_size);
qinsoon's avatar
qinsoon committed
361 362 363 364 365 366
                ret.gc_type = mm::add_gc_type(gctype);

                ret
            }
            // void
            MuType_::Void => BackendType{
367 368
                size: 0, alignment: 1, struct_layout: None, elem_size: None,
                gc_type: mm::add_gc_type(GCType::new_noreftype(0, 1))
qinsoon's avatar
qinsoon committed
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
            },
            // vector
            MuType_::Vector(_, _) => unimplemented!()
        }
    }

    /// layouts struct fields
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendType {
        let mut offsets : Vec<ByteSize> = vec![];
        let mut cur : ByteSize = 0;
        let mut struct_align : ByteSize = 1;

        // for gc type
        let mut use_ref_offsets = true;
        let mut ref_offsets = vec![];
        let mut gc_types    = vec![];

        for ty in tys.iter() {
            let ty_info = vm.get_backend_type_info(ty.id());
            trace!("examining field: {}, {:?}", ty, ty_info);

            let align = ty_info.alignment;
392 393
            struct_align = lcm(struct_align, align);
            cur = align_up(cur, align);
qinsoon's avatar
qinsoon committed
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
            offsets.push(cur);
            trace!("aligned to {}", cur);

            // for convenience, if the struct contains other struct/array
            // we do not use reference map
            if ty.is_aggregate() {
                use_ref_offsets = false;
            }

            // if this type is reference type, we store its offsets
            // we may not use this ref map though
            if ty.is_heap_reference() {
                ref_offsets.push(cur);
            }
            // always store its gc type (we may not use it as well)
            gc_types.push(ty_info.gc_type.clone());

            cur += ty_info.size;
        }

        // if we need padding at the end
415
        let size = align_up(cur, struct_align);
qinsoon's avatar
qinsoon committed
416 417 418 419 420

        BackendType {
            size         : size,
            alignment    : struct_align,
            struct_layout: Some(offsets),
421
            elem_size: None,
qinsoon's avatar
qinsoon committed
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
            gc_type      : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
                                                           size,
                                                           struct_align,
                                                           Some(if use_ref_offsets {
                                                               RefPattern::Map {
                                                                   offsets: ref_offsets,
                                                                   size: size
                                                               }
                                                           } else {
                                                               RefPattern::NestedType(gc_types)
                                                           })))
        }
    }

    /// sequentially layout a few Mu types as if they are fields in a struct.
    /// Returns a triple of (size, alignment, offsets of each type)
    /// (when dealing with call convention, we use this function to layout stack arguments)
    pub fn sequential_layout(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, ByteSize, Vec<ByteSize>) {
        let ret = BackendType::layout_struct(tys, vm);

        (ret.size, ret.alignment, ret.struct_layout.unwrap())
    }
qinsoon's avatar
qinsoon committed
444 445
}

qinsoon's avatar
qinsoon committed
446
use std::fmt;
qinsoon's avatar
qinsoon committed
447
impl fmt::Display for BackendType {
qinsoon's avatar
qinsoon committed
448 449 450 451 452 453 454 455 456 457 458 459 460
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{} bytes ({} bytes aligned), ", self.size, self.alignment).unwrap();
        if self.struct_layout.is_some() {
            use utils::vec_utils;

            let layout = self.struct_layout.as_ref().unwrap();
            write!(f, "field offsets: ({})", vec_utils::as_str(layout)).unwrap();
        }

        Ok(())
    }
}

qinsoon's avatar
qinsoon committed
461
/// RegGroup describes register class
462
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
qinsoon's avatar
qinsoon committed
463 464 465 466 467 468 469 470
pub enum RegGroup {
    /// general purpose register
    GPR,
    /// requires two general purpose register
    GPREX,
    /// floating point register
    FPR
}
471

472
rodal_enum!(RegGroup{GPR, GPREX, FPR});
473 474

impl RegGroup {
qinsoon's avatar
qinsoon committed
475
    /// gets RegGroup from a MuType
476
    pub fn get_from_ty(ty: &P<MuType>) -> RegGroup {
477
        match ty.v {
qinsoon's avatar
qinsoon committed
478
            // for now, only use 64bits registers
479
            MuType_::Int(len) if len <= 64 => RegGroup::GPR,
480
            MuType_::Int(len) if len == 128=> RegGroup::GPREX,
481 482 483 484 485 486 487 488 489

            MuType_::Ref(_)
            | MuType_::IRef(_)
            | MuType_::WeakRef(_)
            | MuType_::UPtr(_)
            | MuType_::ThreadRef
            | MuType_::StackRef
            | MuType_::Tagref64
            | MuType_::FuncRef(_)
490
            | MuType_::UFuncPtr(_)         => RegGroup::GPR,
491

492 493
            MuType_::Float                 => RegGroup::FPR,
            MuType_::Double                => RegGroup::FPR,
494 495 496 497

            _ => unimplemented!()
        }
    }
498

qinsoon's avatar
qinsoon committed
499
    /// gets RegGroup from a Mu Value
500 501 502
    pub fn get_from_value(val: &P<Value>) -> RegGroup {
        RegGroup::get_from_ty(&val.ty)
    }
503
}
504

505 506
fn make_block_name(fv_name: &String, id: MuID, label: &str) -> MuName {
    format!("{}.#{}:{}", fv_name, id, label)
507
}