Commit 29759a3c authored by qinsoon's avatar qinsoon

finish refactoring/commenting ast crate

parent 16dde904
......@@ -742,7 +742,7 @@ impl Value {
pub fn is_int_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
if is_scalar(&self.ty) && !is_fp(&self.ty) {
if self.ty.is_scalar() && !self.ty.is_fp() {
true
} else {
false
......@@ -766,7 +766,7 @@ impl Value {
pub fn is_fp_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
if is_scalar(&self.ty) && is_fp(&self.ty) {
if self.ty.is_scalar() && self.ty.is_fp() {
true
} else {
false
......
......@@ -41,6 +41,7 @@ macro_rules! impl_mu_entity {
}
}
/// select between two values based on condition
macro_rules! select_value {
($cond: expr, $res1 : expr, $res2 : expr) => {
if $cond {
......
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The AST pointer
//!
//! Provides `P<T>`, a frozen owned smart pointer, as a replacement for `@T` in
//! the AST.
//!
//! # Motivations and benefits
//!
//! * **Identity**: sharing AST nodes is problematic for the various analysis
//! passes (e.g. one may be able to bypass the borrow checker with a shared
//! `ExprAddrOf` node taking a mutable borrow). The only reason `@T` in the
//! AST hasn't caused issues is because of inefficient folding passes which
//! would always deduplicate any such shared nodes. Even if the AST were to
//! switch to an arena, this would still hold, i.e. it couldn't use `&'a T`,
//! but rather a wrapper like `P<'a, T>`.
//!
//! * **Immutability**: `P<T>` disallows mutating its inner `T`, unlike `Box<T>`
//! (unless it contains an `Unsafe` interior, but that may be denied later).
//! This mainly prevents mistakes, but can also enforces a kind of "purity".
//!
//! * **Efficiency**: folding can reuse allocation space for `P<T>` and `Vec<T>`,
//! the latter even when the input and output types differ (as it would be the
//! case with arenas or a GADT AST using type parameters to toggle features).
//!
//! * **Maintainability**: `P<T>` provides a fixed interface - `Deref`,
//! `and_then` and `map` - which can remain fully functional even if the
//! implementation changes (using a special thread-local heap, for example).
//! Moreover, a switch to, e.g. `P<'a, T>` would be easy and mostly automated.
//use std::fmt::{self, Display, Debug};
//use std::hash::{Hash, Hasher};
//use std::ops::Deref;
//use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use std::sync::Arc;
/// P<T> is alias type for sharable Mu AST components (such as Value, MuFuncSig, MuType, etc)
// This design is similar to P<T> in rustc compiler.
// However, instead of using Box<T>, we use Arc<T> to encourage sharing
pub type P<T> = Arc<T>;
//pub struct P<T: MuEntity> {
// ptr: Arc<T>
//}
#[allow(non_snake_case)]
/// Construct a `P<T>` from a `T` value.
pub fn P<T>(value: T) -> P<T> {
// P {ptr: Arc::new(value)}
Arc::new(value)
}
//impl<T: MuEntity> Deref for P<T> {
// type Target = T;
//
// fn deref<'a>(&'a self) -> &'a T {
// &*self.ptr
// }
//}
//
//impl<T: MuEntity> Clone for P<T> {
// fn clone(&self) -> P<T> {
// P {ptr: self.ptr.clone()}
// }
//}
//
//impl<T: MuEntity + PartialEq> PartialEq for P<T> {
// fn eq(&self, other: &P<T>) -> bool {
// **self == **other
// }
//}
//
//impl<T: MuEntity + Eq> Eq for P<T> {}
//
//impl<T: MuEntity + Debug> Debug for P<T> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Debug::fmt(&**self, f)
// }
//}
//impl<T: MuEntity + Display> Display for P<T> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Display::fmt(&**self, f)
// }
//}
//
//impl<T: MuEntity> fmt::Pointer for P<T> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// fmt::Pointer::fmt(&self.ptr, f)
// }
//}
//
//impl<T: MuEntity + Hash> Hash for P<T> {
// fn hash<H: Hasher>(&self, state: &mut H) {
// (**self).hash(state);
// }
//}
//impl<T: MuEntity> Encodable for P<T> {
// fn encode<S: Encoder> (&self, s: &mut S) -> Result<(), S::Error> {
// s.emit_usize(self.id())
// }
//}
}
\ No newline at end of file
......@@ -8,6 +8,7 @@ use std::fmt;
use utils::LinkedHashMap;
use std::sync::RwLock;
// some common types that the compiler may use internally
lazy_static! {
pub static ref ADDRESS_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::int(POINTER_SIZE * 8))
......@@ -63,6 +64,7 @@ lazy_static! {
];
}
/// clear struct/hybrid maps, called when creating new VM
pub fn init_types() {
{
let mut map_lock = STRUCT_TAG_MAP.write().unwrap();
......@@ -75,6 +77,7 @@ pub fn init_types() {
}
}
/// MuType represents a Mu type
#[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct MuType {
pub hdr: MuEntityHeader,
......@@ -82,6 +85,7 @@ pub struct MuType {
}
impl MuType {
/// creates a new Mu type
pub fn new(id: MuID, v: MuType_) -> MuType {
MuType {
hdr: MuEntityHeader::unnamed(id),
......@@ -89,6 +93,7 @@ impl MuType {
}
}
/// is this type struct type?
pub fn is_struct(&self) -> bool {
match self.v {
MuType_::Struct(_) => true,
......@@ -96,6 +101,7 @@ impl MuType {
}
}
/// is this type hybrid type?
pub fn is_hybrid(&self) -> bool {
match self.v {
MuType_::Hybrid(_) => true,
......@@ -103,6 +109,35 @@ impl MuType {
}
}
/// is this type a floating point type? (float/double)
pub fn is_fp(&self) -> bool {
match self.v {
MuType_::Float | MuType_::Double => true,
_ => false
}
}
/// is this type a scalar type?
pub fn is_scalar(&self) -> bool {
match self.v {
MuType_::Int(_)
| MuType_::Float
| MuType_::Double
| MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_)
| MuType_::FuncRef(_)
| MuType_::UFuncPtr(_)
| MuType_::ThreadRef
| MuType_::StackRef
| MuType_::Tagref64
| MuType_::UPtr(_) => true,
_ => false
}
}
/// gets the tag of a struct/hybrid type, returns None if the type is not hybrid/struct
/// We use tag to resolve recursive types, and maintains a map between tag and struct types
pub fn get_struct_hybrid_tag(&self) -> Option<MuName> {
match self.v {
MuType_::Hybrid(ref name)
......@@ -111,6 +146,8 @@ impl MuType {
}
}
/// is this type a reference type?
/// (only reference type, which does not include iref, or other opaque reference types)
pub fn is_ref(&self) -> bool {
match self.v {
MuType_::Ref(_) => true,
......@@ -118,6 +155,17 @@ impl MuType {
}
}
/// is this type any reference type pointing to the heap? (including ref/iref/weakref)
pub fn is_heap_reference(&self) -> bool {
match self.v {
MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_) => true,
_ => false
}
}
/// is this type an internal reference type?
pub fn is_iref(&self) -> bool {
match self.v {
MuType_::IRef(_) => true,
......@@ -125,6 +173,89 @@ impl MuType {
}
}
/// is this type an aggregated type? (consisted of other types)
pub fn is_aggregate(&self) -> bool {
match self.v {
MuType_::Struct(_)
| MuType_::Hybrid(_)
| MuType_::Array(_, _) => true,
_ => false
}
}
/// is this type a type traced by the garbage collector?
/// Note: An aggregated type is traced if any of its part is traced.
#[allow(dead_code)]
pub fn is_traced(&self) -> bool {
match self.v {
MuType_::Ref(_) => true,
MuType_::IRef(_) => true,
MuType_::WeakRef(_) => true,
MuType_::Array(ref elem_ty, _)
| MuType_::Vector(ref elem_ty, _) => elem_ty.is_traced(),
MuType_::ThreadRef
| MuType_::StackRef
| MuType_::Tagref64 => true,
MuType_::Hybrid(ref tag) => {
let map = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty = map.get(tag).unwrap();
let ref fix_tys = hybrid_ty.fix_tys;
let ref var_ty = hybrid_ty.var_ty;
var_ty.is_traced()
|| fix_tys.into_iter().map(|ty| ty.is_traced())
.fold(false, |ret, this| ret || this)
},
MuType_::Struct(ref tag) => {
let map = STRUCT_TAG_MAP.read().unwrap();
let struct_ty = map.get(tag).unwrap();
let ref field_tys = struct_ty.tys;
field_tys.into_iter().map(|ty| ty.is_traced())
.fold(false, |ret, this| ret || this)
},
_ => false
}
}
/// is this type native safe?
/// Note: An aggregated type is native safe if all of its parts are native safe.
#[allow(dead_code)]
pub fn is_native_safe(&self) -> bool {
match self.v {
MuType_::Int(_) => true,
MuType_::Float => true,
MuType_::Double => true,
MuType_::Void => true,
MuType_::Array(ref elem_ty, _)
| MuType_::Vector(ref elem_ty, _) => elem_ty.is_native_safe(),
MuType_::UPtr(_) => true,
MuType_::UFuncPtr(_) => true,
MuType_::Hybrid(ref tag) => {
let map = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty = map.get(tag).unwrap();
let ref fix_tys = hybrid_ty.fix_tys;
let ref var_ty = hybrid_ty.var_ty;
var_ty.is_native_safe()
&& fix_tys.into_iter().map(|ty| ty.is_native_safe())
.fold(true, |ret, this| ret && this)
},
MuType_::Struct(ref tag) => {
let map = STRUCT_TAG_MAP.read().unwrap();
let struct_ty = map.get(tag).unwrap();
let ref field_tys = struct_ty.tys;
field_tys.into_iter().map(|ty| ty.is_native_safe())
.fold(true, |ret, this| ret && this)
},
_ => false
}
}
/// gets the element type of an array type, returns None if the type is not an array type
pub fn get_elem_ty(&self) -> Option<P<MuType>> {
match self.v {
MuType_::Array(ref elem_ty, _) => Some(elem_ty.clone()),
......@@ -132,6 +263,7 @@ impl MuType {
}
}
/// gets a field's type of a struct type, returns None if the type is not a struct or hybrid type
pub fn get_field_ty(&self, index: usize) -> Option<P<MuType>> {
match self.v {
MuType_::Struct(ref tag) => {
......@@ -150,6 +282,7 @@ impl MuType {
}
}
/// gets the var part type of a hybrid type, returns None if the type is not a hybrid type
pub fn get_hybrid_varpart_ty(&self) -> Option<P<MuType>> {
match self.v {
MuType_::Hybrid(ref tag) => {
......@@ -162,7 +295,9 @@ impl MuType {
}
}
pub fn get_referenced_ty(&self) -> Option<P<MuType>> {
/// gets the referent type for Ref/IRef/WeakRef/UPtr, returns None if the type is
/// not any mentioned type.
pub fn get_referent_ty(&self) -> Option<P<MuType>> {
use types::MuType_::*;
match self.v {
Ref(ref ty)
......@@ -173,6 +308,8 @@ impl MuType {
}
}
/// gets the length (in bit) of a integer/pointer type (assume pointer types are always 64 bits)
// FIXME: should deprecate this function, and get the length from BackendType
pub fn get_int_length(&self) -> Option<usize> {
use types::MuType_::*;
match self.v {
......@@ -193,6 +330,8 @@ impl MuType {
pub type StructTag = MuName;
pub type HybridTag = MuName;
/// MuType_ is used for pattern matching for MuType
#[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub enum MuType_ {
/// int <length>
......@@ -376,12 +515,42 @@ impl MuType_ {
pub fn uptr(referent: P<MuType>) -> MuType_ {
MuType_::UPtr(referent)
}
pub fn array(ty: P<MuType>, len: usize) -> MuType_ {
MuType_::Array(ty, len)
}
pub fn void() -> MuType_ {
MuType_::Void
}
pub fn threadref() -> MuType_ {
MuType_::ThreadRef
}
pub fn stackref() -> MuType_ {
MuType_::StackRef
}
pub fn tagref64() -> MuType_ {
MuType_::Tagref64
}
pub fn vector(ty: P<MuType>, len: usize) -> MuType_ {
MuType_::Vector(ty, len)
}
pub fn funcref(sig: P<MuFuncSig>) -> MuType_ {
MuType_::FuncRef(sig)
}
pub fn ufuncptr(sig: P<MuFuncSig>) -> MuType_ {
MuType_::UFuncPtr(sig)
}
/// creates an empty struct type with a tag (we can later put types into the struct)
/// This is used to create a recursive struct type, e.g. T = struct { ref<T> }
pub fn mustruct_empty(tag: MuName) -> MuType_ {
let struct_ty_ = StructType_{tys: vec![]};
STRUCT_TAG_MAP.write().unwrap().insert(tag.clone(), struct_ty_);
MuType_::Struct(tag)
}
/// puts types into an empty struct (created by mustruct_empty())
/// This method will clear existing types declared with the tag, and set struct to the specified types
/// This method panics if the tag does not exist
pub fn mustruct_put(tag: &MuName, mut list: Vec<P<MuType>>) {
let mut map_guard = STRUCT_TAG_MAP.write().unwrap();
......@@ -393,6 +562,7 @@ impl MuType_ {
None => panic!("call mustruct_empty() to create an empty struct before mustruct_put()")
}
}
/// creates a Mu struct with specified field types
pub fn mustruct(tag: StructTag, list: Vec<P<MuType>>) -> MuType_ {
let struct_ty_ = StructType_{tys: list};
......@@ -412,15 +582,18 @@ impl MuType_ {
MuType_::Struct(tag)
}
pub fn array(ty: P<MuType>, len: usize) -> MuType_ {
MuType_::Array(ty, len)
}
/// creates an empty hybrid type with a tag (we can later put types into the hybrid)
/// This is used to create a recursive hybrid type, e.g. T = hybrid { ref<T>, ... | ref<T> }
pub fn hybrid_empty(tag: HybridTag) -> MuType_ {
let hybrid_ty_ = HybridType_{fix_tys: vec![], var_ty: VOID_TYPE.clone()};
HYBRID_TAG_MAP.write().unwrap().insert(tag.clone(), hybrid_ty_);
MuType_::Hybrid(tag)
}
/// puts types into an empty hybrid (created by muhybrid_empty())
/// This method will clear existing types declared with the tag, and set hybrid to the specified types
/// This method panics if the tag does not exist
pub fn hybrid_put(tag: &HybridTag, mut fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) {
let mut map_guard = HYBRID_TAG_MAP.write().unwrap();
......@@ -434,6 +607,7 @@ impl MuType_ {
None => panic!("call hybrid_empty() to create an empty struct before hybrid_put()")
}
}
/// creates a Mu hybrid with specified fix part and var part types
pub fn hybrid(tag: HybridTag, fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) -> MuType_ {
let hybrid_ty_ = HybridType_{fix_tys: fix_tys, var_ty: var_ty};
......@@ -452,175 +626,9 @@ impl MuType_ {
MuType_::Hybrid(tag)
}
pub fn void() -> MuType_ {
MuType_::Void
}
pub fn threadref() -> MuType_ {
MuType_::ThreadRef
}
pub fn stackref() -> MuType_ {
MuType_::StackRef
}
pub fn tagref64() -> MuType_ {
MuType_::Tagref64
}
pub fn vector(ty: P<MuType>, len: usize) -> MuType_ {
MuType_::Vector(ty, len)
}
pub fn funcref(sig: P<MuFuncSig>) -> MuType_ {
MuType_::FuncRef(sig)
}
pub fn ufuncptr(sig: P<MuFuncSig>) -> MuType_ {
MuType_::UFuncPtr(sig)
}
}
/// is a type floating-point type?
pub fn is_fp(ty: &MuType) -> bool {
match ty.v {
MuType_::Float | MuType_::Double => true,
_ => false
}
}
/// is a type raw pointer?
pub fn is_ptr(ty: &MuType) -> bool {
match ty.v {
MuType_::UPtr(_) | MuType_::UFuncPtr(_) => true,
_ => false
}
}
/// this a type reference type (located in heap)?
pub fn is_reference(ty: &MuType) -> bool {
match ty.v {
MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_) => true,
_ => false
}
}
/// this is a aggregated type (consited of other types)
pub fn is_aggregate(ty: &MuType) -> bool {
match ty.v {
MuType_::Struct(_)
| MuType_::Hybrid(_)
| MuType_::Array(_, _) => true,
_ => false
}
}
/// is a type scalar type?
pub fn is_scalar(ty: &MuType) -> bool {
match ty.v {
MuType_::Int(_)
| MuType_::Float
| MuType_::Double
| MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_)
| MuType_::FuncRef(_)
| MuType_::UFuncPtr(_)
| MuType_::ThreadRef
| MuType_::StackRef
| MuType_::Tagref64
| MuType_::UPtr(_) => true,
_ => false
}
}
/// is a type traced by the garbage collector?
/// Note: An aggregated type is traced if any of its part is traced.
pub fn is_traced(ty: &MuType) -> bool {
match ty.v {
MuType_::Ref(_) => true,
MuType_::IRef(_) => true,
MuType_::WeakRef(_) => true,
MuType_::Array(ref elem_ty, _)
| MuType_::Vector(ref elem_ty, _) => is_traced(elem_ty),
MuType_::ThreadRef
| MuType_::StackRef
| MuType_::Tagref64 => true,
MuType_::Hybrid(ref tag) => {
let map = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty = map.get(tag).unwrap();
let ref fix_tys = hybrid_ty.fix_tys;
let ref var_ty = hybrid_ty.var_ty;
is_traced(var_ty) ||
fix_tys.into_iter().map(|ty| is_traced(ty))
.fold(false, |ret, this| ret || this)
},
MuType_::Struct(ref tag) => {
let map = STRUCT_TAG_MAP.read().unwrap();
let struct_ty = map.get(tag).unwrap();
let ref field_tys = struct_ty.tys;
field_tys.into_iter().map(|ty| is_traced(&ty))
.fold(false, |ret, this| ret || this)
},
_ => false
}
}
/// is a type native safe?
/// Note: An aggregated type is native safe if all of its parts are native safe.
pub fn is_native_safe(ty: &MuType) -> bool {
match ty.v {
MuType_::Int(_) => true,
MuType_::Float => true,
MuType_::Double => true,
MuType_::Void => true,
MuType_::Array(ref elem_ty, _)
| MuType_::Vector(ref elem_ty, _) => is_native_safe(elem_ty),
MuType_::UPtr(_) => true,
MuType_::UFuncPtr(_) => true,
MuType_::Hybrid(ref tag) => {
let map = HYBRID_TAG_MAP.read().unwrap();
let hybrid_ty = map.get(tag).unwrap();
let ref fix_tys = hybrid_ty.fix_tys;
let ref var_ty = hybrid_ty.var_ty;
is_native_safe(var_ty) &&
fix_tys.into_iter().map(|ty| is_native_safe(&ty))
.fold(true, |ret, this| ret && this)
},
MuType_::Struct(ref tag) => {
let map = STRUCT_TAG_MAP.read().unwrap();
let struct_ty = map.get(tag).unwrap();
let ref field_tys = struct_ty.tys;
field_tys.into_iter().map(|ty| is_native_safe(&ty))
.fold(true, |ret, this| ret && this)
},
_ => false
}
}
pub fn get_referent_ty(ty: &MuType) -> Option<P<MuType>> {
match ty.v {
MuType_::Ref(ref referent)
| MuType_::IRef(ref referent)
| MuType_::WeakRef(ref referent)
| MuType_::UPtr(ref referent) => Some(referent.clone()),
_ => None
}
}
macro_rules! is_type (
($e:expr, $p:pat) => (
match $e {
$p => true,
_ => false
}
)
);
pub type CFuncSig = MuFuncSig;
/// MuFuncSig represents a Mu function signature
#[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct MuFuncSig {
pub hdr: MuEntityHeader,
......@@ -633,3 +641,5 @@ impl fmt::Display for MuFuncSig {
write!(f, "[{}] -> [{}]", vec_utils::as_str(&self.arg_tys), vec_utils::as_str(&self.ret_tys))
}
}
pub type CFuncSig = MuFuncSig;
\ No newline at end of file
......@@ -3425,7 +3425,7 @@ impl <'a> InstructionSelection {
// GETVARPARTIREF < T1 > opnd = opnd + offset_of(T1.var_part)
Instruction_::GetVarPartIRef{base, ..} => {
let struct_ty = match ops[base].clone_value().ty.get_referenced_ty() {
let struct_ty = match ops[base].clone_value().ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
......@@ -3435,13 +3435,13 @@ impl <'a> InstructionSelection {
// SHIFTIREF < T1 T2 > opnd offset = opnd + offset*size_of(T1)
Instruction_::ShiftIRef{base, offset, ..} => {
let element_type = ops[base].clone_value().ty.get_referenced_ty().unwrap();
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap();
let element_size = vm.get_backend_type_info(element_type.id()).size;
self.emit_shift_ref(&ops[base], &ops[offset], element_size, f_content, f_context, vm)
}
// GETELEMIREF <T1 T2> opnd index = opnd + index*element_size(T1)
Instruction_::GetElementIRef{base, index, ..} => {
let element_type = ops[base].clone_value().ty.get_referenced_ty().unwrap().get_elem_ty().unwrap();
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap().get_elem_ty().unwrap();
let element_size = vm.get_backend_type_info(element_type.id()).size;
self.emit_shift_ref(&ops[base], &ops[index], element_size, f_content, f_context, vm)
......@@ -3599,7 +3599,7 @@ impl <'a> InstructionSelection {
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
if !dst_ty.is_fp() && dst_ty.is_scalar() {
if match_node_int_imm(src) {
let src_imm = node_imm_to_u64(src);
if dest.is_int_reg() {
......@@ -3617,7 +3617,7 @@ impl <'a> InstructionSelection {
} else {
panic!("expected src: {}", src);
}
} else if types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
} else if dst_ty.is_fp() && dst_ty.is_scalar() {
if match_node_int_imm(src) {
if dst_ty.v == MuType_::Double {
let src_imm = node_imm_to_f64(src);
......@@ -3656,7 +3656,7 @@ impl <'a> InstructionSelection {
fn emit_move_value_to_value(&mut self, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
let ref src_ty = src.ty;
if types::is_scalar(src_ty) && !types::is_fp(src_ty) {
if src_ty.is_scalar() && !src_ty.is_fp() {
// gpr mov
if dest.is_int_reg() && src.is_int_const() {
let imm = value_imm_to_u64(src);
......@@ -3671,7 +3671,7 @@ impl <'a> InstructionSelection {
} else {
panic!("unexpected gpr mov between {} -> {}", src, dest);
}
} else if types::is_scalar(src_ty) && types::is_fp(src_ty) {
} else if src_ty.is_scalar() && src_ty.is_fp() {
// fpr mov
if dest.is_fp_reg() && match_value_f32imm(src) {
let src = value_imm_to_f32(src);
......
......@@ -19,7 +19,6 @@ use utils::LinkedHashMap;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
use std::str;
use std::usize;
......@@ -3594,7 +3593,7 @@ pub fn spill_rewrite(
let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence();
if is_fp(&temp_ty) {
if temp_ty.is_fp() {
codegen.emit_spill_load_fpr(&temp, spill_mem);
} else {
codegen.emit_spill_load_gpr(&temp, spill_mem);
......@@ -3643,7 +3642,7 @@ pub fn spill_rewrite(
let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence();
if is_fp(&temp.ty) {
if temp.ty.is_fp() {
codegen.emit_spill_store_fpr(spill_mem, &temp);
} else {
codegen.emit_spill_store_gpr(spill_mem, &temp);
......
......@@ -3137,7 +3137,7 @@ impl <'a> InstructionSelection {
match pv.v {
Value_::SSAVar(_) => P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),