Commit e716d22f authored by qinsoon's avatar qinsoon

[wip] refactoring: type

parent cee7d9be
......@@ -17,6 +17,12 @@ pub type Address = usize; // TODO: replace this with Address(usize)
pub type OpIndex = usize;
pub const MACHINE_ID_START : usize = 0;
pub const MACHINE_ID_END : usize = 100;
pub const INTERNAL_ID_START: usize = 101;
pub const INTERNAL_ID_END : usize = 200;
pub const USER_ID_START : usize = 201;
#[derive(Debug)]
pub struct MuFunction {
pub id: MuID,
......@@ -72,8 +78,6 @@ impl fmt::Display for MuFunctionVersion {
}
}
pub const RESERVED_NODE_IDS_FOR_MACHINE : usize = 100;
impl MuFunctionVersion {
pub fn new(id: MuID, func: MuID, sig: P<MuFuncSig>) -> MuFunctionVersion {
MuFunctionVersion{
......@@ -468,9 +472,9 @@ impl Value {
pub fn is_int_const(&self) -> bool {
match self.v {
Value_::Constant(_) => {
let ty : &MuType_ = &self.ty;
match ty {
&MuType_::Int(_) => true,
let ty : &MuType = &self.ty;
match ty.v {
MuType_::Int(_) => true,
_ => false
}
}
......
......@@ -56,59 +56,59 @@ pub enum OpCode {
pub fn pick_op_code_for_ssa(ty: &P<MuType>) -> OpCode {
use ast::types::MuType_::*;
let a : &MuType_ = ty;
match a {
let a : &MuType = ty;
match a.v {
// currently use i64 for all ints
&Int(_) => OpCode::RegI64,
Int(_) => OpCode::RegI64,
// currently do not differentiate float and double
&Float
| &Double => OpCode::RegFP,
Float
| Double => OpCode::RegFP,
// ref and pointer types use RegI64
&Ref(_)
| &IRef(_)
| &WeakRef(_)
| &UPtr(_)
| &ThreadRef
| &StackRef
| &Tagref64
| &FuncRef(_)
| &UFuncPtr(_) => OpCode::RegI64,
Ref(_)
| IRef(_)
| WeakRef(_)
| UPtr(_)
| ThreadRef
| StackRef
| Tagref64
| FuncRef(_)
| UFuncPtr(_) => OpCode::RegI64,
// we are not supposed to have these as SSA
&Struct(_)
| &Array(_, _)
| &Hybrid(_, _)
| &Void => panic!("Not expecting {} as SSA", ty),
Struct(_)
| Array(_, _)
| Hybrid(_, _)
| Void => panic!("Not expecting {} as SSA", ty),
// unimplemented
&Vector(_, _) => unimplemented!()
Vector(_, _) => unimplemented!()
}
}
pub fn pick_op_code_for_value(ty: &P<MuType>) -> OpCode {
use ast::types::MuType_::*;
let a : &MuType_ = ty;
match a {
let a : &MuType = ty;
match a.v {
// currently use i64 for all ints
&Int(_) => OpCode::IntImmI64,
Int(_) => OpCode::IntImmI64,
// currently do not differentiate float and double
&Float
| &Double => OpCode::FPImm,
Float
| Double => OpCode::FPImm,
// ref and pointer types use RegI64
&Ref(_)
| &IRef(_)
| &WeakRef(_)
| &UPtr(_)
| &ThreadRef
| &StackRef
| &Tagref64
| &FuncRef(_)
| &UFuncPtr(_) => OpCode::IntImmI64,
Ref(_)
| IRef(_)
| WeakRef(_)
| UPtr(_)
| ThreadRef
| StackRef
| Tagref64
| FuncRef(_)
| UFuncPtr(_) => OpCode::IntImmI64,
// we are not supposed to have these as SSA
&Struct(_)
| &Array(_, _)
| &Hybrid(_, _)
| &Void => unimplemented!(),
Struct(_)
| Array(_, _)
| Hybrid(_, _)
| Void => unimplemented!(),
// unimplemented
&Vector(_, _) => unimplemented!()
Vector(_, _) => unimplemented!()
}
}
......
......@@ -6,7 +6,22 @@ use std::fmt;
use std::collections::HashMap;
use std::sync::RwLock;
pub type MuType = MuType_;
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
pub struct MuType {
pub id: MuID,
pub name: Option<MuName>,
pub v: MuType_
}
impl MuType {
pub fn new(id: MuID, v: MuType_) -> MuType {
MuType {
id: id,
name: None,
v: v
}
}
}
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
pub enum MuType_ {
......@@ -57,6 +72,12 @@ pub enum MuType_ {
UFuncPtr (P<MuFuncSig>),
}
impl fmt::Display for MuType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.v)
}
}
impl fmt::Display for MuType_ {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
......@@ -88,7 +109,7 @@ lazy_static! {
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct StructType_ {
tys: Vec<P<MuType_>>
tys: Vec<P<MuType>>
}
impl fmt::Display for StructType_ {
......@@ -111,7 +132,7 @@ impl StructType_ {
self.tys.append(&mut list);
}
pub fn get_tys(&self) -> &Vec<P<MuType_>> {
pub fn get_tys(&self) -> &Vec<P<MuType>> {
&self.tys
}
}
......@@ -126,19 +147,16 @@ impl MuType_ {
pub fn double() -> MuType_ {
MuType_::Double
}
pub fn muref(referent: P<MuType_>) -> MuType_ {
pub fn muref(referent: P<MuType>) -> MuType_ {
MuType_::Ref(referent)
}
pub fn muref_void() -> MuType_ {
MuType_::Ref(P(MuType_::void()))
}
pub fn iref(referent: P<MuType_>) -> MuType_ {
pub fn iref(referent: P<MuType>) -> MuType_ {
MuType_::IRef(referent)
}
pub fn weakref(referent: P<MuType_>) -> MuType_ {
pub fn weakref(referent: P<MuType>) -> MuType_ {
MuType_::WeakRef(referent)
}
pub fn uptr(referent: P<MuType_>) -> MuType_ {
pub fn uptr(referent: P<MuType>) -> MuType_ {
MuType_::UPtr(referent)
}
pub fn mustruct_empty(tag: MuName) -> MuType_ {
......@@ -147,7 +165,7 @@ impl MuType_ {
MuType_::Struct(tag)
}
pub fn mustruct(tag: MuName, list: Vec<P<MuType_>>) -> MuType_ {
pub fn mustruct(tag: MuName, list: Vec<P<MuType>>) -> MuType_ {
let struct_ty_ = StructType_{tys: list};
// if there is an attempt to use a same tag for different struct,
......@@ -167,10 +185,10 @@ impl MuType_ {
MuType_::Struct(tag)
}
pub fn array(ty: P<MuType_>, len: usize) -> MuType_ {
pub fn array(ty: P<MuType>, len: usize) -> MuType_ {
MuType_::Array(ty, len)
}
pub fn hybrid(fix_tys: Vec<P<MuType_>>, var_ty: P<MuType_>) -> MuType_ {
pub fn hybrid(fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) -> MuType_ {
MuType_::Hybrid(fix_tys, var_ty)
}
pub fn void() -> MuType_ {
......@@ -185,7 +203,7 @@ impl MuType_ {
pub fn tagref64() -> MuType_ {
MuType_::Tagref64
}
pub fn vector(ty: P<MuType_>, len: usize) -> MuType_ {
pub fn vector(ty: P<MuType>, len: usize) -> MuType_ {
MuType_::Vector(ty, len)
}
pub fn funcref(sig: P<MuFuncSig>) -> MuType_ {
......@@ -198,7 +216,7 @@ impl MuType_ {
/// is a type floating-point type?
pub fn is_fp(ty: &MuType) -> bool {
match *ty {
match ty.v {
MuType_::Float | MuType_::Double => true,
_ => false
}
......@@ -206,7 +224,7 @@ pub fn is_fp(ty: &MuType) -> bool {
/// is a type raw pointer?
pub fn is_ptr(ty: &MuType) -> bool {
match *ty {
match ty.v {
MuType_::UPtr(_) | MuType_::UFuncPtr(_) => true,
_ => false
}
......@@ -214,7 +232,7 @@ pub fn is_ptr(ty: &MuType) -> bool {
/// is a type scalar type?
pub fn is_scalar(ty: &MuType) -> bool {
match *ty {
match ty.v {
MuType_::Int(_)
| MuType_::Float
| MuType_::Double
......@@ -234,7 +252,7 @@ pub fn is_scalar(ty: &MuType) -> bool {
/// is a type traced by the garbage collector?
/// Note: An aggregated type is traced if any of its part is traced.
pub fn is_traced(ty: &MuType) -> bool {
match *ty {
match ty.v {
MuType_::Ref(_) => true,
MuType_::IRef(_) => true,
MuType_::WeakRef(_) => true,
......@@ -263,7 +281,7 @@ pub fn is_traced(ty: &MuType) -> bool {
/// is a type native safe?
/// Note: An aggregated type is native safe if all of its parts are native safe.
pub fn is_native_safe(ty: &MuType) -> bool {
match *ty {
match ty.v {
MuType_::Int(_) => true,
MuType_::Float => true,
MuType_::Double => true,
......@@ -290,10 +308,10 @@ pub fn is_native_safe(ty: &MuType) -> bool {
}
pub fn get_referent_ty(ty: &MuType) -> Option<P<MuType>> {
match ty {
&MuType_::Ref(ref referent)
| &MuType_::IRef(ref referent)
| &MuType_::WeakRef(ref referent) => Some(referent.clone()),
match ty.v {
MuType_::Ref(ref referent)
| MuType_::IRef(ref referent)
| MuType_::WeakRef(ref referent) => Some(referent.clone()),
_ => None
}
}
......
......@@ -496,7 +496,7 @@ impl ASMCodeGen {
fn asm_reg_op(&self, op: &P<Value>) -> String {
let id = op.extract_ssa_id().unwrap();
if id < RESERVED_NODE_IDS_FOR_MACHINE {
if id < MACHINE_ID_END {
// machine reg
format!("%{}", op.name.unwrap())
} else {
......
......@@ -7,7 +7,7 @@ use ast::inst::Instruction_;
use ast::inst::MemoryOrder;
use ast::op;
use ast::types;
use ast::types::MuType_;
use ast::types::*;
use vm::VM;
use vm::CompiledFunction;
......@@ -113,10 +113,10 @@ impl <'a> InstructionSelection {
let ref func = ops[data.func];
let ref func_sig = match func.v {
TreeNode_::Value(ref pv) => {
let ty : &MuType_ = &pv.ty;
match ty {
&MuType_::FuncRef(ref sig)
| &MuType_::UFuncPtr(ref sig) => sig,
let ty : &MuType = &pv.ty;
match ty.v {
MuType_::FuncRef(ref sig)
| MuType_::UFuncPtr(ref sig) => sig,
_ => panic!("expected funcref/ptr type")
}
},
......
......@@ -39,8 +39,8 @@ macro_rules! FPR {
}
lazy_static! {
pub static ref GPR_TY : P<MuType> = P(MuType::int(64));
pub static ref FPR_TY : P<MuType> = P(MuType::double());
pub static ref GPR_TY : P<MuType> = P(MuType::new(INTERNAL_ID_START + 0, MuType_::int(64)));
pub static ref FPR_TY : P<MuType> = P(MuType::new(INTERNAL_ID_START + 1, MuType_::double()));
}
// put into several segments to avoid 'recursion limit reached' error
......
......@@ -43,9 +43,9 @@ use vm::VM;
use ast::types::*;
use ast::ptr::*;
pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
match ty {
match ty.v {
// integral
&MuType_::Int(size_in_bit) => {
MuType_::Int(size_in_bit) => {
match size_in_bit {
8 => BackendTypeInfo{size: 1, alignment: 1, struct_layout: None},
16 => BackendTypeInfo{size: 2, alignment: 2, struct_layout: None},
......@@ -55,26 +55,26 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
}
},
// pointer of any type
&MuType_::Ref(_)
| &MuType_::IRef(_)
| &MuType_::WeakRef(_)
| &MuType_::UPtr(_)
| &MuType_::FuncRef(_)
| &MuType_::UFuncPtr(_)
| &MuType_::Tagref64
| &MuType_::ThreadRef
| &MuType_::StackRef => BackendTypeInfo{size: 8, alignment: 8, struct_layout: None},
MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_)
| MuType_::UPtr(_)
| MuType_::FuncRef(_)
| MuType_::UFuncPtr(_)
| MuType_::Tagref64
| MuType_::ThreadRef
| MuType_::StackRef => BackendTypeInfo{size: 8, alignment: 8, struct_layout: None},
// floating point
&MuType_::Float => BackendTypeInfo{size: 4, alignment: 4, struct_layout: None},
&MuType_::Double => BackendTypeInfo{size: 8, alignment: 8, struct_layout: None},
MuType_::Float => BackendTypeInfo{size: 4, alignment: 4, struct_layout: None},
MuType_::Double => BackendTypeInfo{size: 8, alignment: 8, struct_layout: None},
// array
&MuType_::Array(ref ty, len) => {
MuType_::Array(ref ty, len) => {
let ele_ty = vm.get_backend_type_info(ty);
BackendTypeInfo{size: ele_ty.size * len, alignment: ele_ty.alignment, struct_layout: None}
}
// struct
&MuType_::Struct(name) => {
MuType_::Struct(name) => {
let read_lock = STRUCT_TAG_MAP.read().unwrap();
let struc = read_lock.get(name).unwrap();
let tys = struc.get_tys();
......@@ -86,7 +86,7 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
// - align is the most strict aligned element (from all fix tys and var ty)
// - size is fixed tys size
// - layout is fixed tys layout
&MuType_::Hybrid(ref fix_tys, ref var_ty) => {
MuType_::Hybrid(ref fix_tys, ref var_ty) => {
// treat fix_tys as struct
let mut ret = layout_struct(fix_tys, vm);
......@@ -100,13 +100,13 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
ret
}
// void
&MuType_::Void => BackendTypeInfo{size: 0, alignment: 8, struct_layout: None},
MuType_::Void => BackendTypeInfo{size: 0, alignment: 8, struct_layout: None},
// vector
&MuType_::Vector(_, _) => unimplemented!()
MuType_::Vector(_, _) => unimplemented!()
}
}
fn layout_struct(tys: &Vec<P<MuType_>>, vm: &VM) -> BackendTypeInfo {
fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
let mut offsets : Vec<ByteSize> = vec![];
let mut cur : ByteSize = 0;
let mut struct_align : ByteSize = 0;
......
......@@ -254,7 +254,7 @@ impl InterferenceGraph {
}
pub fn is_machine_reg(reg: MuID) -> bool {
if reg < RESERVED_NODE_IDS_FOR_MACHINE {
if reg < MACHINE_ID_END {
true
} else {
false
......
......@@ -48,7 +48,7 @@ impl RegisterAllocation {
let temp = coloring.ig.get_temp_of(node);
// skip machine registers
if temp < RESERVED_NODE_IDS_FOR_MACHINE {
if temp < MACHINE_ID_END {
continue;
} else {
let alias = coloring.get_alias(node);
......
......@@ -444,4 +444,5 @@ macro_rules! mu_entity {
mu_entity!(MuFunction);
mu_entity!(MuFunctionVersion);
mu_entity!(Block);
mu_entity!(TreeNode);
\ No newline at end of file
mu_entity!(TreeNode);
mu_entity!(MuType);
\ No newline at end of file
......@@ -9,6 +9,7 @@ use compiler::backend;
use compiler::backend::BackendTypeInfo;
use vm::machine_code::CompiledFunction;
use vm::vm_options::VMOptions;
use vm::api::*;
use std::sync::RwLock;
use std::cell::RefCell;
......@@ -23,7 +24,7 @@ pub struct VM {
constants: RwLock<HashMap<MuName, P<Value>>>,
types: RwLock<HashMap<MuName, P<MuType>>>,
types: RwLock<HashMap<MuID, P<MuType>>>,
backend_type_info: RwLock<HashMap<P<MuType>, BackendTypeInfo>>,
globals: RwLock<HashMap<MuName, P<GlobalCell>>>,
......@@ -58,7 +59,7 @@ impl <'a> VM {
};
ret.is_running.store(false, Ordering::SeqCst);
ret.next_id.store(RESERVED_NODE_IDS_FOR_MACHINE, Ordering::SeqCst);
ret.next_id.store(USER_ID_START, Ordering::SeqCst);
let options = VMOptions::default();
gc::gc_init(options.immix_size, options.lo_size, options.n_gcthreads);
......@@ -97,16 +98,18 @@ impl <'a> VM {
P(Value{
id: id,
name: Some(global_name),
ty: P(MuType::iref(ty)),
ty: P(MuType::new(self.next_id(), MuType_::iref(ty))),
v: Value_::Global(global.clone())
})
}
pub fn declare_type(&self, type_name: MuName, ty: P<MuType>) -> P<MuType> {
pub fn declare_type(&self, id: MuID, ty: MuType_) -> P<MuType> {
let ty = P(MuType{id: id, name: None, v: ty});
let mut types = self.types.write().unwrap();
debug_assert!(!types.contains_key(type_name));
debug_assert!(!types.contains_key(&id));
types.insert(type_name, ty.clone());
types.insert(ty.id(), ty.clone());
ty
}
......
......@@ -33,8 +33,10 @@ pub fn sum() -> VM {
let vm = VM::new();
// .typedef @int_64 = int<64>
let type_def_int64 = vm.declare_type("int_64", P(MuType::int(64)));
let type_def_int1 = vm.declare_type("int_1", P(MuType::int(1)));
let mut type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
type_def_int64.set_name("int_64");
let mut type_def_int1 = vm.declare_type(vm.next_id(), MuType_::int(1));
type_def_int1.set_name("int_1");
// .const @int_64_0 <@int_64> = 0
// .const @int_64_1 <@int_64> = 1
......@@ -183,20 +185,28 @@ pub fn factorial() -> VM {
// .typedef @void = void
// .typedef @int_8 = int<8>
// .typedef @int_32 = int<32>
let type_def_int64 = vm.declare_type("int_64", P(MuType::int(64)));
let type_def_int1 = vm.declare_type("int_1", P(MuType::int(1)));
let type_def_float = vm.declare_type("float", P(MuType::float()));
let type_def_double = vm.declare_type("double", P(MuType::double()));
let type_def_void = vm.declare_type("void", P(MuType::void()));
let type_def_int8 = vm.declare_type("int8", P(MuType::int(8)));
let type_def_int32 = vm.declare_type("int32", P(MuType::int(32)));
let mut type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
type_def_int64.set_name("int_64");
let mut type_def_int1 = vm.declare_type(vm.next_id(), MuType_::int(1));
type_def_int1.set_name("int_1");
let mut type_def_float = vm.declare_type(vm.next_id(), MuType_::float());
type_def_float.set_name("float");
let mut type_def_double = vm.declare_type(vm.next_id(), MuType_::double());
type_def_double.set_name("double");
let mut type_def_void = vm.declare_type(vm.next_id(), MuType_::void());
type_def_void.set_name("void");
let mut type_def_int8 = vm.declare_type(vm.next_id(), MuType_::int(8));
type_def_int8.set_name("int8");
let mut type_def_int32 = vm.declare_type(vm.next_id(), MuType_::int(32));
type_def_int32.set_name("int32");
// .const @int_64_1 <@int_64> = 1
let const_def_int64_1 = vm.declare_const(vm.next_id(), "int64_1", type_def_int64.clone(), Constant::Int(1));
// .funcsig @fac_sig = (@int_64) -> (@int_64)
let fac_sig = vm.declare_func_sig("fac_sig", vec![type_def_int64.clone()], vec![type_def_int64.clone()]);
let type_def_funcref_fac = vm.declare_type("fac_sig", P(MuType::funcref(fac_sig.clone())));
let mut type_def_funcref_fac = vm.declare_type(vm.next_id(), MuType_::funcref(fac_sig.clone()));
type_def_funcref_fac.set_name("fac_sig");
// .funcdecl @fac <@fac_sig>
let func = MuFunction::new(vm.next_id(), fac_sig.clone());
......@@ -342,8 +352,10 @@ pub fn global_access() -> VM {
// .typedef @int64 = int<64>
// .typedef @iref_int64 = iref<int<64>>
let type_def_int64 = vm.declare_type("int64", P(MuType::int(64)));
let type_def_iref_int64 = vm.declare_type("iref_int64", P(MuType::iref(type_def_int64.clone())));
let mut type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
type_def_int64.set_name("int64");
let mut type_def_iref_int64 = vm.declare_type(vm.next_id(), MuType_::iref(type_def_int64.clone()));
type_def_iref_int64.set_name("iref_int64");
// .const @int_64_0 <@int_64> = 0
// .const @int_64_1 <@int_64> = 1
......
......@@ -19,57 +19,57 @@ macro_rules! println_type (
fn create_types() -> Vec<P<MuType>> {
let mut types = vec![];
let t0 = MuType::int(8);
let t0 = MuType::new(0, MuType_::int(8));
types.push(P(t0));
let t1 = MuType::float();
let t1 = MuType::new(1, MuType_::float());
types.push(P(t1));
let t2 = MuType::double();
let t2 = MuType::new(2, MuType_::double());
types.push(P(t2));
let t3 = MuType::muref(types[0].clone());
let t3 = MuType::new(3, MuType_::muref(types[0].clone()));
types.push(P(t3));
let t4 = MuType::iref(types[0].clone());
let t4 = MuType::new(4, MuType_::iref(types[0].clone()));
types.push(P(t4));
let t5 = MuType::weakref(types[0].clone());
let t5 = MuType::new(5, MuType_::weakref(types[0].clone()));
types.push(P(t5));
let t6 = MuType::uptr(types[0].clone());
let t6 = MuType::new(6, MuType_::uptr(types[0].clone()));
types.push(P(t6));
let t7 = MuType::mustruct("MyStructTag1", vec![types[0].clone(), types[1].clone()]);
let t7 = MuType::new(7, MuType_::mustruct("MyStructTag1", vec![types[0].clone(), types[1].clone()]));
types.push(P(t7));
let t8 = MuType::array(types[0].clone(), 5);
let t8 = MuType::new(8, MuType_::array(types[0].clone(), 5));
types.push(P(t8));
let t9 = MuType::hybrid(vec![types[7].clone(), types[1].clone()], types[0].clone());
let t9 = MuType::new(9, MuType_::hybrid(vec![types[7].clone(), types[1].clone()], types[0].clone()));
types.push(P(t9));
let t10 = MuType::void();
let t10 = MuType::new(10, MuType_::void());
types.push(P(t10));
let t11 = MuType::threadref();
let t11 = MuType::new(11, MuType_::threadref());
types.push(P(t11));
let t12 = MuType::stackref();
let t12 = MuType::new(12, MuType_::stackref());
types.push(P(t12));
let t13 = MuType::tagref64();
let t13 = MuType::new(13, MuType_::tagref64());
types.push(P(t13));
let t14 = MuType::vector(types[0].clone(), 5);
let t14 = MuType::new(14, MuType_::vector(types[0].clone(), 5));
types.push(P(t14));
let sig = P(MuFuncSig{ret_tys: vec![types[10].clone()], arg_tys: vec![types[0].clone(), types[0].clone()]});
let t15 = MuType::funcref(sig.clone());
let t15 = MuType::new(15, MuType_::funcref(sig.clone()));
types.push(P(t15));
let t16 = MuType::ufuncptr(sig.clone());
let t16 = MuType::new(16, MuType_::ufuncptr(sig.clone()));
types.push(P(t16));
types
......@@ -107,9 +107,9 @@ fn test_type_constructors() {
#[test]
fn test_cyclic_struct() {
// .typedef @cyclic_struct_ty = struct<ref<@cyclic_struct_ty> int<32>>
let ty = P(MuType::mustruct_empty("MyStructTag2"));
let ref_ty = P(MuType::muref(ty.clone()));
let i32_ty = P(MuType::int(32));
let ty = P(MuType::new(0, MuType_::mustruct_empty("MyStructTag2")));
let ref_ty = P(MuType::new(1, MuType_::muref(ty.clone())));
let i32_ty = P(MuType::new(2, MuType_::int(32)));
{
STRUCT_TAG_MAP.write().unwrap().
......@@ -133,17 +133,17 @@ fn test_is_traced() {
assert_eq!(is_traced(&types[5]), true);
assert_eq!(is_traced(&types[6]), false);
assert_eq!(is_traced(&types[7]), false);
let struct3 = MuType::mustruct("MyStructTag3", vec![types[3].clone(), types[0].clone()]);
let struct3 = MuType::new(100, MuType_::mustruct("MyStructTag3", vec![types[3].clone(), types[0].clone()]));
assert_eq!(is_traced(&struct3), true);
let struct4 = MuType::mustruct("MyStructTag4", vec![types[3].clone(), types[4].clone()]);
let struct4 = MuType::new(101, MuType_::mustruct("MyStructTag4", vec![types[3].clone(), types[4].clone()]));
assert_eq!(is_traced(&struct4), true);
assert_eq!(is_traced(&types[8]), false);
let ref_array = MuType::array(types[3].clone(), 5);
let ref_array = MuType::new(102, MuType_::array(types[3].clone(), 5));
assert_eq!(is_traced(&ref_array), true);
assert_eq!(is_traced(&types[9]), false);
let fix_ref_hybrid = MuType::hybrid(vec![types[3].clone(), types[0].clone()], types[0].clone());
let fix_ref_hybrid = MuType::new(103, MuType_::hybrid(vec![types[3].clone(), types[0].clone()], types[0].clone()));
assert_eq!(is_traced(&fix_ref_hybrid), true);
let var_ref_hybrid = MuType::hybrid(vec![types[0].clone(), types[1].clone()], types[3].clone());
let var_ref_hybrid = MuType::new(104, MuType_::hybrid(vec![types[0].clone(), types[1].clone()], types[3].clone()));
assert_eq!(is_traced(&var_ref_hybrid), true);
assert_eq!(is_traced(&types[10]), false);
assert_eq!(is_traced(&types[11]), true);
......@@ -166,17 +166,17 @@ fn test_is_native_safe() {
assert_eq!(is_native_safe(&types[5]), false);
assert_eq!(is_native_safe(&types[6]), true);
assert_eq!(is_native_safe(&types[7]), true);
let struct3 = MuType::mustruct("MyStructTag3", vec![types[3].clone(), types[0].clone()]);
let struct3 = MuType::new(100, MuType_::mustruct("MyStructTag3", vec![types[3].clone(), types[0].clone()]));
assert_eq!(is_native_safe(&struct3), false);
let struct4 = MuType::mustruct("MyStructTag4", vec![types[3].clone(), types[4].clone()]);
let struct4 = MuType::new(101, MuType_::mustruct("MyStructTag4", vec![types[3].clone(), types[4].clone()]));
assert_eq!(is_native_safe(&struct4), false);
assert_eq!(is_native_safe(&types[8]), true);
let ref_array = MuType::array(types[3].clone(), 5);