WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.6% of users enabled 2FA.

mod.rs 20.1 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
// Copyright 2017 The Australian National University
2
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
3 4 5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
9 10 11 12 13 14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
/// A instruction selection pass. Uses simple tree pattern matching.
16
pub mod inst_sel;
qinsoon's avatar
qinsoon committed
17
/// A register allocation pass. Graph coloring.
18
pub mod reg_alloc;
qinsoon's avatar
qinsoon committed
19
/// A peephole optimization pass after register allocation.
20
pub mod peephole_opt;
qinsoon's avatar
qinsoon committed
21
/// Code emission pass. May as well emit dot graph for IR and generated code.
22
pub mod code_emission;
23

24
use std;
qinsoon's avatar
qinsoon committed
25
use utils::ByteSize;
26
use utils::math::align_up;
27
use runtime::mm;
qinsoon's avatar
qinsoon committed
28
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
29
use num::integer::lcm;
qinsoon's avatar
qinsoon committed
30

qinsoon's avatar
qinsoon committed
31 32 33
/// for ahead-of-time compilation (boot image making), the file contains a persisted VM, a persisted
/// heap, constants. This allows the VM to resume execution with the same status as before persisting.
#[cfg(feature = "aot")]
34
pub const AOT_EMIT_CONTEXT_FILE : &'static str = "context.S";
35

qinsoon's avatar
qinsoon committed
36
// type alias to make backend code more readable
qinsoon's avatar
qinsoon committed
37 38 39
pub type Reg<'a> = &'a P<Value>;
pub type Mem<'a> = &'a P<Value>;

qinsoon's avatar
qinsoon committed
40
// re-export some common backend functions to allow target independent code
qinsoon's avatar
qinsoon committed
41

qinsoon's avatar
qinsoon committed
42
/// --- X86_64 backend ---
43
#[cfg(target_arch = "x86_64")]
44
#[path = "arch/x86_64/mod.rs"]
45
pub mod x86_64;
46

qinsoon's avatar
qinsoon committed
47
/// estimates how many machine instructions are needed for a Mu instruction
qinsoon's avatar
qinsoon committed
48 49
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::estimate_insts_for_ir;
qinsoon's avatar
qinsoon committed
50
/// initializes machine registers in the function context
qinsoon's avatar
qinsoon committed
51
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
52
pub use compiler::backend::x86_64::init_machine_regs_for_func;
qinsoon's avatar
qinsoon committed
53
/// checks if two machine registers are alias (the same register)
54 55
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_aliased;
qinsoon's avatar
qinsoon committed
56
/// gets color for a machine register (e.g. AH, AX, EAX all have color of RAX)
qinsoon's avatar
qinsoon committed
57
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
58
pub use compiler::backend::x86_64::get_color_for_precolored;
qinsoon's avatar
qinsoon committed
59
/// returns the number of registers in a given RegGroup
qinsoon's avatar
qinsoon committed
60
#[cfg(target_arch = "x86_64")]
61
pub use compiler::backend::x86_64::number_of_usable_regs_in_group;
qinsoon's avatar
qinsoon committed
62
/// returns the number of all machine registers
qinsoon's avatar
qinsoon committed
63
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
64
pub use compiler::backend::x86_64::number_of_all_regs;
qinsoon's avatar
qinsoon committed
65
/// returns a hashmap of all the machine registers
qinsoon's avatar
qinsoon committed
66 67
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::all_regs;
qinsoon's avatar
qinsoon committed
68
/// returns all usable registers (machine registers that can be assigned to temporaries)
69
#[cfg(target_arch = "x86_64")]
70
pub use compiler::backend::x86_64::all_usable_regs;
qinsoon's avatar
qinsoon committed
71
/// returns RegGroup for a machine register
72
#[cfg(target_arch = "x86_64")]
73
pub use compiler::backend::x86_64::pick_group_for_reg;
qinsoon's avatar
qinsoon committed
74
/// checks if a register is callee saved
75 76
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::is_callee_saved;
qinsoon's avatar
qinsoon committed
77
/// number of callee saved registers
78
#[cfg(target_arch = "x86_64")]
79
pub use compiler::backend::x86_64::CALLEE_SAVED_COUNT;
qinsoon's avatar
qinsoon committed
80
/// gets offset for callee saved registers (used for exception table)
81 82
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_callee_saved_offset;
qinsoon's avatar
qinsoon committed
83
/// gets frame pointer for previous frame
84 85
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_frame_pointer;
qinsoon's avatar
qinsoon committed
86
/// gets return address for current frame
87 88
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_return_address;
qinsoon's avatar
qinsoon committed
89
/// sets frame pointer for previous frame
90 91
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_previous_frame_pointer;
qinsoon's avatar
qinsoon committed
92
/// sets return address for current frame
93 94
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::set_return_address;
qinsoon's avatar
qinsoon committed
95
/// gets staci pointer for previous frame
96 97
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::get_previous_stack_pointer;
qinsoon's avatar
qinsoon committed
98
/// emits code for a function version (the function needs to be compiled first)
99
#[cfg(target_arch = "x86_64")]
100
pub use compiler::backend::x86_64::emit_code;
qinsoon's avatar
qinsoon committed
101 102
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
103 104
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context;
qinsoon's avatar
qinsoon committed
105
/// emits context with consideration of relocation info
106
#[cfg(target_arch = "x86_64")]
qinsoon's avatar
qinsoon committed
107
pub use compiler::backend::x86_64::emit_context_with_reloc;
qinsoon's avatar
qinsoon committed
108 109
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
qinsoon's avatar
qinsoon committed
110
#[cfg(target_arch = "x86_64")]
111
pub use compiler::backend::x86_64::spill_rewrite;
qinsoon's avatar
qinsoon committed
112 113 114 115
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
qinsoon's avatar
qinsoon committed
116

qinsoon's avatar
qinsoon committed
117
/// --- aarch64 backend ---
118 119 120 121
#[cfg(target_arch = "aarch64")]
#[path = "arch/aarch64/mod.rs"]
pub mod aarch64;

qinsoon's avatar
qinsoon committed
122
/// estimates how many machine instructions are needed for a Mu instruction
123 124
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::estimate_insts_for_ir;
qinsoon's avatar
qinsoon committed
125
/// initializes machine registers in the function context
126 127
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::init_machine_regs_for_func;
qinsoon's avatar
qinsoon committed
128
/// checks if two machine registers are alias (the same register)
129 130
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_aliased;
qinsoon's avatar
qinsoon committed
131
/// gets color for a machine register
132 133
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_color_for_precolored;
qinsoon's avatar
qinsoon committed
134
/// returns the number of registers in a given RegGroup
135
#[cfg(target_arch = "aarch64")]
136
pub use compiler::backend::aarch64::number_of_usable_regs_in_group;
qinsoon's avatar
qinsoon committed
137
/// returns the number of all machine registers
138 139
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::number_of_all_regs;
qinsoon's avatar
qinsoon committed
140
/// returns a hashmap of all the machine registers
141 142
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_regs;
qinsoon's avatar
qinsoon committed
143
/// returns all usable registers (machine registers that can be assigned to temporaries)
144 145
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::all_usable_regs;
qinsoon's avatar
qinsoon committed
146
/// returns RegGroup for a machine register
147 148
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::pick_group_for_reg;
qinsoon's avatar
qinsoon committed
149
/// checks if a register is callee saved
150 151 152
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::is_callee_saved;
#[cfg(target_arch = "aarch64")]
153 154 155 156 157 158 159 160 161 162 163 164 165
pub use compiler::backend::aarch64::CALLEE_SAVED_COUNT ;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_callee_saved_offset;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_return_address;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::get_previous_stack_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_previous_frame_pointer;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::set_return_address;
qinsoon's avatar
qinsoon committed
166
/// emits code for a function version (the function needs to be compiled first)
167
#[cfg(target_arch = "aarch64")]
168
pub use compiler::backend::aarch64::emit_code;
qinsoon's avatar
qinsoon committed
169 170
/// emits context (persisted VM/heap/etc), should only be called after
/// finishing compilation for all functions
171 172
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context;
qinsoon's avatar
qinsoon committed
173
/// emits context with consideration of relocation info
174 175
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::emit_context_with_reloc;
qinsoon's avatar
qinsoon committed
176 177
/// rewrites a compiled Mu function with given spilling info
/// (inserting load/store for spilled temporaries)
178 179
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::spill_rewrite;
qinsoon's avatar
qinsoon committed
180
#[cfg(target_arch = "aarch64")]
181
pub use compiler::backend::aarch64::ARGUMENT_GPRS;
qinsoon's avatar
qinsoon committed
182
#[cfg(target_arch = "aarch64")]
183
pub use compiler::backend::aarch64::ARGUMENT_FPRS;
qinsoon's avatar
qinsoon committed
184

qinsoon's avatar
qinsoon committed
185
use vm::VM;
qinsoon's avatar
qinsoon committed
186 187
use ast::types::*;
use ast::ptr::*;
188
use ast::ir::*;
189

qinsoon's avatar
qinsoon committed
190 191 192 193 194 195 196
/// BackendType describes storage type info for a MuType, including
/// size, alignment, struct layout, array element padded size, GC type.
///
/// We are compatible with C ABI, so that Mu objects can be accessed from
/// native without extra steps (though they need to be pinned first)
///
//  GCType is a temporary design, we will rewrite GC (Issue#12)
197
#[derive(Clone, Debug)]
qinsoon's avatar
qinsoon committed
198
pub struct BackendType {
qinsoon's avatar
qinsoon committed
199 200
    pub size: ByteSize,
    pub alignment: ByteSize,
qinsoon's avatar
qinsoon committed
201
    /// struct layout of the type, None if this is not a struct/hybrid type
202
    pub struct_layout: Option<Vec<ByteSize>>,
203 204
    /// element size for hybrid/array type
    pub elem_size: Option<ByteSize>,
qinsoon's avatar
qinsoon committed
205 206
    /// GC type, containing information for GC (this is a temporary design)
    /// See Issue#12
207
    pub gc_type: P<GCType>
qinsoon's avatar
qinsoon committed
208 209
}

210
rodal_struct!(BackendType{size, alignment, struct_layout, elem_size, gc_type});
qinsoon's avatar
qinsoon committed
211

qinsoon's avatar
qinsoon committed
212 213
impl BackendType {
    /// gets field offset of a struct/hybrid type. Panics if this is not struct/hybrid type
qinsoon's avatar
qinsoon committed
214 215 216 217 218 219 220 221
    pub fn get_field_offset(&self, index: usize) -> ByteSize {
        if self.struct_layout.is_some() {
            let layout = self.struct_layout.as_ref().unwrap();
            layout[index]
        } else {
            panic!("trying to get field offset on a non-struct type")
        }
    }
qinsoon's avatar
qinsoon committed
222 223 224 225 226 227 228 229

    /// resolves a MuType to a BackendType
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    pub fn resolve(ty: &MuType, vm: &VM) -> BackendType {
        match ty.v {
            // integer
            MuType_::Int(size_in_bit) => {
                match size_in_bit {
230
                    1 ... 8   => BackendType{
231
                        size: 1, alignment: 1, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
232 233
                        gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
                    },
234
                    9 ... 16  => BackendType{
235
                        size: 2, alignment: 2, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
236 237
                        gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2))
                    },
238
                    17 ... 32 => BackendType{
239
                        size: 4, alignment: 4, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
240 241
                        gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
                    },
242
                    33 ... 64 => BackendType{
243
                        size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
244 245 246
                        gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
                    },
                    128 => BackendType {
247
                        size: 16, alignment: 16, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
248 249 250 251 252 253 254 255 256
                        gc_type: mm::add_gc_type(GCType::new_noreftype(16, 16))
                    },
                    _ => unimplemented!()
                }
            },
            // reference of any type
            MuType_::Ref(_)
            | MuType_::IRef(_)
            | MuType_::WeakRef(_) => BackendType{
257
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
258 259 260 261 262 263 264 265
                gc_type: mm::add_gc_type(GCType::new_reftype())
            },
            // pointer/opque ref
            MuType_::UPtr(_)
            | MuType_::UFuncPtr(_)
            | MuType_::FuncRef(_)
            | MuType_::ThreadRef
            | MuType_::StackRef => BackendType{
266
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
267 268 269
                gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
            },
            // tagref
270
            MuType_::Tagref64 => BackendType {
271
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
272 273
                gc_type: mm::add_gc_type(GCType::new_reftype())
            },
qinsoon's avatar
qinsoon committed
274 275
            // floating point
            MuType_::Float => BackendType{
276
                size: 4, alignment: 4, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
277 278 279
                gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
            },
            MuType_::Double => BackendType {
280
                size: 8, alignment: 8, struct_layout: None, elem_size: None,
qinsoon's avatar
qinsoon committed
281 282 283 284 285
                gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
            },
            // array
            MuType_::Array(ref ty, len) => {
                let ele_ty = vm.get_backend_type_info(ty.id());
286
                let elem_size = ele_ty.size;
287 288
                let size = ele_ty.size*len;
                let align = ele_ty.alignment;
289

290 291 292 293 294 295
                // Acording to the AMD64 SYSV ABI Version 0.99.8,
                // a 'local or global array variable of at least 16 bytes ... always has alignment of at least 16 bytes'
                // However, if we apply this rule, it will break 'Mu's array rule, hopefully C programs
                // won't care if we allocate a local or global which is incorrectly alligned
                // (A c function can't be sure a pointer to array that is passed to it is a local or global
                // so this is unlikley to break anything).
qinsoon's avatar
qinsoon committed
296 297

                BackendType{
298 299
                    size         : size,
                    alignment    : align,
qinsoon's avatar
qinsoon committed
300
                    struct_layout: None,
301 302
                    elem_size : Some(elem_size),
                    gc_type      : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID, size, align,
qinsoon's avatar
qinsoon committed
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
                                                                   Some(RefPattern::Repeat{
                                                                       pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
                                                                       count  : len
                                                                   })
                    ))
                }
            }
            // struct
            MuType_::Struct(ref name) => {
                let read_lock = STRUCT_TAG_MAP.read().unwrap();
                let struc = read_lock.get(name).unwrap();
                let tys = struc.get_tys();

                trace!("layout struct: {}", struc);
                BackendType::layout_struct(tys, vm)
            }
            // hybrid
            // - align is the most strict aligned element (from all fix tys and var ty)
            // - size is fixed tys size
            // - layout is fixed tys layout
            MuType_::Hybrid(ref name) => {
                let read_lock = HYBRID_TAG_MAP.read().unwrap();
                let hybrid = read_lock.get(name).unwrap();

                let fix_tys = hybrid.get_fix_tys();
                let var_ty  = hybrid.get_var_ty();

                // treat fix_tys as struct
                let mut ret = BackendType::layout_struct(fix_tys, vm);

                // treat var_ty as array (getting its alignment)
                let var_ele_ty = vm.get_backend_type_info(var_ty.id());
335
                let var_size = var_ele_ty.size;
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
336
                let var_align = var_ele_ty.alignment;
337 338 339 340
                ret.elem_size = Some(var_size);

                ret.alignment = lcm(ret.alignment, var_align);
                ret.size = align_up(ret.size, ret.alignment);
qinsoon's avatar
qinsoon committed
341 342
                let mut gctype = ret.gc_type.as_ref().clone();
                gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
343
                gctype.var_size = Some(var_size);
qinsoon's avatar
qinsoon committed
344 345 346 347 348 349
                ret.gc_type = mm::add_gc_type(gctype);

                ret
            }
            // void
            MuType_::Void => BackendType{
350 351
                size: 0, alignment: 1, struct_layout: None, elem_size: None,
                gc_type: mm::add_gc_type(GCType::new_noreftype(0, 1))
qinsoon's avatar
qinsoon committed
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
            },
            // vector
            MuType_::Vector(_, _) => unimplemented!()
        }
    }

    /// layouts struct fields
    #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
    fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendType {
        let mut offsets : Vec<ByteSize> = vec![];
        let mut cur : ByteSize = 0;
        let mut struct_align : ByteSize = 1;

        // for gc type
        let mut use_ref_offsets = true;
        let mut ref_offsets = vec![];
        let mut gc_types    = vec![];

        for ty in tys.iter() {
            let ty_info = vm.get_backend_type_info(ty.id());
            trace!("examining field: {}, {:?}", ty, ty_info);

            let align = ty_info.alignment;
375 376
            struct_align = lcm(struct_align, align);
            cur = align_up(cur, align);
qinsoon's avatar
qinsoon committed
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
            offsets.push(cur);
            trace!("aligned to {}", cur);

            // for convenience, if the struct contains other struct/array
            // we do not use reference map
            if ty.is_aggregate() {
                use_ref_offsets = false;
            }

            // if this type is reference type, we store its offsets
            // we may not use this ref map though
            if ty.is_heap_reference() {
                ref_offsets.push(cur);
            }
            // always store its gc type (we may not use it as well)
            gc_types.push(ty_info.gc_type.clone());

            cur += ty_info.size;
        }

        // if we need padding at the end
398
        let size = align_up(cur, struct_align);
qinsoon's avatar
qinsoon committed
399 400 401 402 403

        BackendType {
            size         : size,
            alignment    : struct_align,
            struct_layout: Some(offsets),
404
            elem_size: None,
qinsoon's avatar
qinsoon committed
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
            gc_type      : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
                                                           size,
                                                           struct_align,
                                                           Some(if use_ref_offsets {
                                                               RefPattern::Map {
                                                                   offsets: ref_offsets,
                                                                   size: size
                                                               }
                                                           } else {
                                                               RefPattern::NestedType(gc_types)
                                                           })))
        }
    }

    /// sequentially layout a few Mu types as if they are fields in a struct.
    /// Returns a triple of (size, alignment, offsets of each type)
    /// (when dealing with call convention, we use this function to layout stack arguments)
    pub fn sequential_layout(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, ByteSize, Vec<ByteSize>) {
        let ret = BackendType::layout_struct(tys, vm);

        (ret.size, ret.alignment, ret.struct_layout.unwrap())
    }
qinsoon's avatar
qinsoon committed
427 428
}

qinsoon's avatar
qinsoon committed
429
use std::fmt;
qinsoon's avatar
qinsoon committed
430
impl fmt::Display for BackendType {
qinsoon's avatar
qinsoon committed
431 432 433 434 435 436 437 438 439 440 441 442 443
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{} bytes ({} bytes aligned), ", self.size, self.alignment).unwrap();
        if self.struct_layout.is_some() {
            use utils::vec_utils;

            let layout = self.struct_layout.as_ref().unwrap();
            write!(f, "field offsets: ({})", vec_utils::as_str(layout)).unwrap();
        }

        Ok(())
    }
}

qinsoon's avatar
qinsoon committed
444
/// RegGroup describes register class
445
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
qinsoon's avatar
qinsoon committed
446 447 448 449 450 451 452 453
pub enum RegGroup {
    /// general purpose register
    GPR,
    /// requires two general purpose register
    GPREX,
    /// floating point register
    FPR
}
454

455
rodal_enum!(RegGroup{GPR, GPREX, FPR});
456 457

impl RegGroup {
qinsoon's avatar
qinsoon committed
458
    /// gets RegGroup from a MuType
459
    pub fn get_from_ty(ty: &P<MuType>) -> RegGroup {
460
        match ty.v {
qinsoon's avatar
qinsoon committed
461
            // for now, only use 64bits registers
462
            MuType_::Int(len) if len <= 64 => RegGroup::GPR,
463
            MuType_::Int(len) if len == 128=> RegGroup::GPREX,
464 465 466 467 468 469 470 471 472

            MuType_::Ref(_)
            | MuType_::IRef(_)
            | MuType_::WeakRef(_)
            | MuType_::UPtr(_)
            | MuType_::ThreadRef
            | MuType_::StackRef
            | MuType_::Tagref64
            | MuType_::FuncRef(_)
473
            | MuType_::UFuncPtr(_)         => RegGroup::GPR,
474

475 476
            MuType_::Float                 => RegGroup::FPR,
            MuType_::Double                => RegGroup::FPR,
477 478 479 480

            _ => unimplemented!()
        }
    }
481

qinsoon's avatar
qinsoon committed
482
    /// gets RegGroup from a Mu Value
483 484 485
    pub fn get_from_value(val: &P<Value>) -> RegGroup {
        RegGroup::get_from_ty(&val.ty)
    }
486
}
487

488 489
fn make_block_name(fv_name: &String, id: MuID, label: &str) -> MuName {
    format!("{}.#{}:{}", fv_name, id, label)
490
}