Commit 29759a3c authored by qinsoon's avatar qinsoon

finish refactoring/commenting ast crate

parent 16dde904
Pipeline #526 passed with stage
in 76 minutes and 33 seconds
......@@ -742,7 +742,7 @@ impl Value {
pub fn is_int_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
if is_scalar(&self.ty) && !is_fp(&self.ty) {
if self.ty.is_scalar() && !self.ty.is_fp() {
true
} else {
false
......@@ -766,7 +766,7 @@ impl Value {
pub fn is_fp_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
if is_scalar(&self.ty) && is_fp(&self.ty) {
if self.ty.is_scalar() && self.ty.is_fp() {
true
} else {
false
......
......@@ -41,6 +41,7 @@ macro_rules! impl_mu_entity {
}
}
/// select between two values based on condition
macro_rules! select_value {
($cond: expr, $res1 : expr, $res2 : expr) => {
if $cond {
......
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The AST pointer
//!
//! Provides `P<T>`, a frozen owned smart pointer, as a replacement for `@T` in
//! the AST.
//!
//! # Motivations and benefits
//!
//! * **Identity**: sharing AST nodes is problematic for the various analysis
//! passes (e.g. one may be able to bypass the borrow checker with a shared
//! `ExprAddrOf` node taking a mutable borrow). The only reason `@T` in the
//! AST hasn't caused issues is because of inefficient folding passes which
//! would always deduplicate any such shared nodes. Even if the AST were to
//! switch to an arena, this would still hold, i.e. it couldn't use `&'a T`,
//! but rather a wrapper like `P<'a, T>`.
//!
//! * **Immutability**: `P<T>` disallows mutating its inner `T`, unlike `Box<T>`
//! (unless it contains an `Unsafe` interior, but that may be denied later).
//! This mainly prevents mistakes, but can also enforces a kind of "purity".
//!
//! * **Efficiency**: folding can reuse allocation space for `P<T>` and `Vec<T>`,
//! the latter even when the input and output types differ (as it would be the
//! case with arenas or a GADT AST using type parameters to toggle features).
//!
//! * **Maintainability**: `P<T>` provides a fixed interface - `Deref`,
//! `and_then` and `map` - which can remain fully functional even if the
//! implementation changes (using a special thread-local heap, for example).
//! Moreover, a switch to, e.g. `P<'a, T>` would be easy and mostly automated.
//use std::fmt::{self, Display, Debug};
//use std::hash::{Hash, Hasher};
//use std::ops::Deref;
//use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use std::sync::Arc;
/// P<T> is alias type for sharable Mu AST components (such as Value, MuFuncSig, MuType, etc)
// This design is similar to P<T> in rustc compiler.
// However, instead of using Box<T>, we use Arc<T> to encourage sharing
pub type P<T> = Arc<T>;
//pub struct P<T: MuEntity> {
// ptr: Arc<T>
//}
#[allow(non_snake_case)]
/// Construct a `P<T>` from a `T` value.
pub fn P<T>(value: T) -> P<T> {
// P {ptr: Arc::new(value)}
Arc::new(value)
}
//impl<T: MuEntity> Deref for P<T> {
// type Target = T;
//
// fn deref<'a>(&'a self) -> &'a T {
// &*self.ptr
// }
//}
//
//impl<T: MuEntity> Clone for P<T> {
// fn clone(&self) -> P<T> {
// P {ptr: self.ptr.clone()}
// }
//}
//
//impl<T: MuEntity + PartialEq> PartialEq for P<T> {
// fn eq(&self, other: &P<T>) -> bool {
// **self == **other
// }
//}
//
//impl<T: MuEntity + Eq> Eq for P<T> {}
//
//impl<T: MuEntity + Debug> Debug for P<T> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Debug::fmt(&**self, f)
// }
//}
//impl<T: MuEntity + Display> Display for P<T> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Display::fmt(&**self, f)
// }
//}
//
//impl<T: MuEntity> fmt::Pointer for P<T> {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// fmt::Pointer::fmt(&self.ptr, f)
// }
//}
//
//impl<T: MuEntity + Hash> Hash for P<T> {
// fn hash<H: Hasher>(&self, state: &mut H) {
// (**self).hash(state);
// }
//}
//impl<T: MuEntity> Encodable for P<T> {
// fn encode<S: Encoder> (&self, s: &mut S) -> Result<(), S::Error> {
// s.emit_usize(self.id())
// }
//}
}
\ No newline at end of file
This diff is collapsed.
......@@ -3425,7 +3425,7 @@ impl <'a> InstructionSelection {
// GETVARPARTIREF < T1 > opnd = opnd + offset_of(T1.var_part)
Instruction_::GetVarPartIRef{base, ..} => {
let struct_ty = match ops[base].clone_value().ty.get_referenced_ty() {
let struct_ty = match ops[base].clone_value().ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
......@@ -3435,13 +3435,13 @@ impl <'a> InstructionSelection {
// SHIFTIREF < T1 T2 > opnd offset = opnd + offset*size_of(T1)
Instruction_::ShiftIRef{base, offset, ..} => {
let element_type = ops[base].clone_value().ty.get_referenced_ty().unwrap();
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap();
let element_size = vm.get_backend_type_info(element_type.id()).size;
self.emit_shift_ref(&ops[base], &ops[offset], element_size, f_content, f_context, vm)
}
// GETELEMIREF <T1 T2> opnd index = opnd + index*element_size(T1)
Instruction_::GetElementIRef{base, index, ..} => {
let element_type = ops[base].clone_value().ty.get_referenced_ty().unwrap().get_elem_ty().unwrap();
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap().get_elem_ty().unwrap();
let element_size = vm.get_backend_type_info(element_type.id()).size;
self.emit_shift_ref(&ops[base], &ops[index], element_size, f_content, f_context, vm)
......@@ -3599,7 +3599,7 @@ impl <'a> InstructionSelection {
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
if !dst_ty.is_fp() && dst_ty.is_scalar() {
if match_node_int_imm(src) {
let src_imm = node_imm_to_u64(src);
if dest.is_int_reg() {
......@@ -3617,7 +3617,7 @@ impl <'a> InstructionSelection {
} else {
panic!("expected src: {}", src);
}
} else if types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
} else if dst_ty.is_fp() && dst_ty.is_scalar() {
if match_node_int_imm(src) {
if dst_ty.v == MuType_::Double {
let src_imm = node_imm_to_f64(src);
......@@ -3656,7 +3656,7 @@ impl <'a> InstructionSelection {
fn emit_move_value_to_value(&mut self, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
let ref src_ty = src.ty;
if types::is_scalar(src_ty) && !types::is_fp(src_ty) {
if src_ty.is_scalar() && !src_ty.is_fp() {
// gpr mov
if dest.is_int_reg() && src.is_int_const() {
let imm = value_imm_to_u64(src);
......@@ -3671,7 +3671,7 @@ impl <'a> InstructionSelection {
} else {
panic!("unexpected gpr mov between {} -> {}", src, dest);
}
} else if types::is_scalar(src_ty) && types::is_fp(src_ty) {
} else if src_ty.is_scalar() && src_ty.is_fp() {
// fpr mov
if dest.is_fp_reg() && match_value_f32imm(src) {
let src = value_imm_to_f32(src);
......
......@@ -19,7 +19,6 @@ use utils::LinkedHashMap;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
use std::str;
use std::usize;
......@@ -3594,7 +3593,7 @@ pub fn spill_rewrite(
let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence();
if is_fp(&temp_ty) {
if temp_ty.is_fp() {
codegen.emit_spill_load_fpr(&temp, spill_mem);
} else {
codegen.emit_spill_load_gpr(&temp, spill_mem);
......@@ -3643,7 +3642,7 @@ pub fn spill_rewrite(
let mut codegen = ASMCodeGen::new();
codegen.start_code_sequence();
if is_fp(&temp.ty) {
if temp.ty.is_fp() {
codegen.emit_spill_store_fpr(spill_mem, &temp);
} else {
codegen.emit_spill_store_gpr(spill_mem, &temp);
......
......@@ -3137,7 +3137,7 @@ impl <'a> InstructionSelection {
match pv.v {
Value_::SSAVar(_) => P(Value{
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: types::get_referent_ty(& pv.ty).unwrap(),
ty: pv.ty.get_referent_ty().unwrap(),
v: Value_::Memory(MemoryLocation::Address{
base: pv.clone(),
offset: None,
......@@ -3154,7 +3154,7 @@ impl <'a> InstructionSelection {
if cfg!(target_os = "macos") {
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: types::get_referent_ty(&pv.ty).unwrap(),
ty: pv.ty.get_referent_ty().unwrap(),
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
label: pv.name().unwrap(),
......@@ -3179,7 +3179,7 @@ impl <'a> InstructionSelection {
let actual_loc = self.make_temporary(f_context, pv.ty.clone(), vm);
self.emit_move_value_to_value(&actual_loc, &got_loc);
self.make_memory_op_base_offset(&actual_loc, 0, types::get_referent_ty(&pv.ty).unwrap(), vm)
self.make_memory_op_base_offset(&actual_loc, 0, pv.ty.get_referent_ty().unwrap(), vm)
} else {
unimplemented!()
}
......@@ -3303,7 +3303,7 @@ impl <'a> InstructionSelection {
Instruction_::GetVarPartIRef{base, ..} => {
let ref base = ops[base];
let struct_ty = match base.clone_value().ty.get_referenced_ty() {
let struct_ty = match base.clone_value().ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
......@@ -3341,7 +3341,7 @@ impl <'a> InstructionSelection {
let ref offset = ops[offset];
let ref base_ty = base.clone_value().ty;
let ele_ty = match base_ty.get_referenced_ty() {
let ele_ty = match base_ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expected op in ShiftIRef of type IRef, found type: {}", base_ty)
};
......@@ -3434,7 +3434,7 @@ impl <'a> InstructionSelection {
let ref index = ops[index];
let ref iref_array_ty = base.clone_value().ty;
let array_ty = match iref_array_ty.get_referenced_ty() {
let array_ty = match iref_array_ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expected base in GetElemIRef to be type IRef, found {}", iref_array_ty)
};
......@@ -3623,7 +3623,7 @@ impl <'a> InstructionSelection {
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
if !dst_ty.is_fp() && dst_ty.is_scalar() {
if self.match_iimm(src) {
let (src_imm, src_len) = self.node_iimm_to_i32_with_len(src);
if dest.is_int_reg() {
......@@ -3639,7 +3639,7 @@ impl <'a> InstructionSelection {
} else {
panic!("expected src: {}", src);
}
} else if types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
} else if dst_ty.is_fp() && dst_ty.is_scalar() {
if self.match_fpreg(src) {
let src_reg = self.emit_fpreg(src, f_content, f_context, vm);
self.emit_move_value_to_value(dest, &src_reg)
......@@ -3654,7 +3654,7 @@ impl <'a> InstructionSelection {
fn emit_move_value_to_value(&mut self, dest: &P<Value>, src: &P<Value>) {
let ref src_ty = src.ty;
if types::is_scalar(src_ty) && !types::is_fp(src_ty) {
if src_ty.is_scalar() && !src_ty.is_fp() {
// gpr mov
if dest.is_int_reg() && src.is_int_reg() {
self.backend.emit_mov_r_r(dest, src);
......@@ -3671,7 +3671,7 @@ impl <'a> InstructionSelection {
} else {
panic!("unexpected gpr mov between {} -> {}", src, dest);
}
} else if types::is_scalar(src_ty) && types::is_fp(src_ty) {
} else if src_ty.is_scalar() && src_ty.is_fp() {
// fpr mov
if dest.is_fp_reg() && src.is_fp_reg() {
self.backend.emit_movsd_f64_f64(dest, src);
......
......@@ -3,7 +3,6 @@ pub mod reg_alloc;
pub mod peephole_opt;
pub mod code_emission;
use ast::types;
use utils::ByteSize;
use utils::math::align_up;
use runtime::mm;
......@@ -253,13 +252,13 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
// for convenience, if the struct contains other struct/array
// we do not use reference map
if types::is_aggregate(ty) {
if ty.is_aggregate() {
use_ref_offsets = false;
}
// if this type is reference type, we store its offsets
// we may not use this ref map though
if types::is_reference(ty) {
if ty.is_heap_reference() {
ref_offsets.push(cur);
}
// always store its gc type (we may not use it as well)
......
......@@ -69,7 +69,7 @@ pub fn allocate_hybrid(ty: P<MuType>, len: u64, backendtype: Box<BackendTypeInfo
}
pub fn allocate_global(iref_global: P<Value>, backendtype: Box<BackendTypeInfo>) -> ValueLocation {
let referenced_type = match iref_global.ty.get_referenced_ty() {
let referenced_type = match iref_global.ty.get_referent_ty() {
Some(ty) => ty,
None => panic!("expected global to be an iref type, found {}", iref_global.ty)
};
......
......@@ -2005,7 +2005,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
let op_ty = self.ensure_type_rec(tys[0]);
let op = self.get_treenode(fcb, args[0]);
let referent_ty = match op_ty.get_referenced_ty() {
let referent_ty = match op_ty.get_referent_ty() {
Some(ty) => ty,
_ => panic!("expected ty in PIN to be ref/iref, found {}", op_ty)
};
......
......@@ -686,7 +686,7 @@ impl <'a> VM {
global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
id: MuID, val: P<Value>
) {
let backend_ty = self.get_backend_type_info(val.ty.get_referenced_ty().unwrap().id());
let backend_ty = self.get_backend_type_info(val.ty.get_referent_ty().unwrap().id());
let loc = gc::allocate_global(val, backend_ty);
info!("allocate global #{} as {}", id, loc);
global_locs.insert(id, loc);
......@@ -1151,7 +1151,7 @@ impl <'a> VM {
match from_op.v {
APIHandleValue::Ref(_, addr) => {
assert!(to_ty.is_ref());
let inner_ty = to_ty.get_referenced_ty().unwrap();
let inner_ty = to_ty.get_referent_ty().unwrap();
self.new_handle(APIHandle {
id: handle_id,
......@@ -1160,7 +1160,7 @@ impl <'a> VM {
},
APIHandleValue::IRef(_, addr) => {
assert!(to_ty.is_iref());
let inner_ty = to_ty.get_referenced_ty().unwrap();
let inner_ty = to_ty.get_referent_ty().unwrap();
self.new_handle(APIHandle {
id: handle_id,
......@@ -1414,7 +1414,7 @@ impl <'a> VM {
let global_inner_ty = {
let global_lock = self.globals.read().unwrap();
global_lock.get(&id).unwrap().ty.get_referenced_ty().unwrap()
global_lock.get(&id).unwrap().ty.get_referent_ty().unwrap()
};
let handle_id = self.next_id();
......
......@@ -126,65 +126,65 @@ fn test_cyclic_struct() {
fn test_is_traced() {
let types = create_types();
assert_eq!(is_traced(&types[0]), false);
assert_eq!(is_traced(&types[1]), false);
assert_eq!(is_traced(&types[2]), false);
assert_eq!(is_traced(&types[3]), true);
assert_eq!(is_traced(&types[4]), true);
assert_eq!(is_traced(&types[5]), true);
assert_eq!(is_traced(&types[6]), false);
assert_eq!(is_traced(&types[7]), false);
assert_eq!(types[0].is_traced(), false);
assert_eq!(types[1].is_traced(), false);
assert_eq!(types[2].is_traced(), false);
assert_eq!(types[3].is_traced(), true);
assert_eq!(types[4].is_traced(), true);
assert_eq!(types[5].is_traced(), true);
assert_eq!(types[6].is_traced(), false);
assert_eq!(types[7].is_traced(), false);
let struct3 = MuType::new(100, MuType_::mustruct("MyStructTag3".to_string(), vec![types[3].clone(), types[0].clone()]));
assert_eq!(is_traced(&struct3), true);
assert_eq!(struct3.is_traced(), true);
let struct4 = MuType::new(101, MuType_::mustruct("MyStructTag4".to_string(), vec![types[3].clone(), types[4].clone()]));
assert_eq!(is_traced(&struct4), true);
assert_eq!(is_traced(&types[8]), false);
assert_eq!(struct4.is_traced(), true);
assert_eq!(types[8].is_traced(), false);
let ref_array = MuType::new(102, MuType_::array(types[3].clone(), 5));
assert_eq!(is_traced(&ref_array), true);
assert_eq!(is_traced(&types[9]), false);
assert_eq!(ref_array.is_traced(), true);
assert_eq!(types[9].is_traced(), false);
let fix_ref_hybrid = MuType::new(103, MuType_::hybrid("FixRefHybrid".to_string(), vec![types[3].clone(), types[0].clone()], types[0].clone()));
assert_eq!(is_traced(&fix_ref_hybrid), true);
assert_eq!(fix_ref_hybrid.is_traced(), true);
let var_ref_hybrid = MuType::new(104, MuType_::hybrid("VarRefHybrid".to_string(), vec![types[0].clone(), types[1].clone()], types[3].clone()));
assert_eq!(is_traced(&var_ref_hybrid), true);
assert_eq!(is_traced(&types[10]), false);
assert_eq!(is_traced(&types[11]), true);
assert_eq!(is_traced(&types[12]), true);
assert_eq!(is_traced(&types[13]), true);
assert_eq!(is_traced(&types[14]), false);
assert_eq!(is_traced(&types[15]), false);
assert_eq!(is_traced(&types[16]), false);
assert_eq!(var_ref_hybrid.is_traced(), true);
assert_eq!(types[10].is_traced(), false);
assert_eq!(types[11].is_traced(), true);
assert_eq!(types[12].is_traced(), true);
assert_eq!(types[13].is_traced(), true);
assert_eq!(types[14].is_traced(), false);
assert_eq!(types[15].is_traced(), false);
assert_eq!(types[16].is_traced(), false);
}
#[test]
fn test_is_native_safe() {
let types = create_types();
assert_eq!(is_native_safe(&types[0]), true);
assert_eq!(is_native_safe(&types[1]), true);
assert_eq!(is_native_safe(&types[2]), true);
assert_eq!(is_native_safe(&types[3]), false);
assert_eq!(is_native_safe(&types[4]), false);
assert_eq!(is_native_safe(&types[5]), false);
assert_eq!(is_native_safe(&types[6]), true);
assert_eq!(is_native_safe(&types[7]), true);
assert_eq!(types[0].is_native_safe(), true);
assert_eq!(types[1].is_native_safe(), true);
assert_eq!(types[2].is_native_safe(), true);
assert_eq!(types[3].is_native_safe(), false);
assert_eq!(types[4].is_native_safe(), false);
assert_eq!(types[5].is_native_safe(), false);
assert_eq!(types[6].is_native_safe(), true);
assert_eq!(types[7].is_native_safe(), true);
let struct3 = MuType::new(100, MuType_::mustruct("MyStructTag3".to_string(), vec![types[3].clone(), types[0].clone()]));
assert_eq!(is_native_safe(&struct3), false);
assert_eq!(struct3.is_native_safe(), false);
let struct4 = MuType::new(101, MuType_::mustruct("MyStructTag4".to_string(), vec![types[3].clone(), types[4].clone()]));
assert_eq!(is_native_safe(&struct4), false);
assert_eq!(is_native_safe(&types[8]), true);
assert_eq!(struct4.is_native_safe(), false);
assert_eq!(types[8].is_native_safe(), true);
let ref_array = MuType::new(102, MuType_::array(types[3].clone(), 5));
assert_eq!(is_native_safe(&ref_array), false);
assert_eq!(is_native_safe(&types[9]), true);
assert_eq!(ref_array.is_native_safe(), false);
assert_eq!(types[9].is_native_safe(), true);
let fix_ref_hybrid = MuType::new(103, MuType_::hybrid("FixRefHybrid".to_string(), vec![types[3].clone(), types[0].clone()], types[0].clone()));
assert_eq!(is_native_safe(&fix_ref_hybrid), false);
assert_eq!(fix_ref_hybrid.is_native_safe(), false);
let var_ref_hybrid = MuType::new(104, MuType_::hybrid("VarRefHybrid".to_string(), vec![types[0].clone(), types[1].clone()], types[3].clone()));
assert_eq!(is_native_safe(&var_ref_hybrid), false);
assert_eq!(is_native_safe(&types[10]), true);
assert_eq!(is_native_safe(&types[11]), false);
assert_eq!(is_native_safe(&types[12]), false);
assert_eq!(is_native_safe(&types[13]), false);
assert_eq!(is_native_safe(&types[14]), true);
assert_eq!(is_native_safe(&types[15]), false); // funcref is not native safe
assert_eq!(var_ref_hybrid.is_native_safe(), false);
assert_eq!(types[10].is_native_safe(), true);
assert_eq!(types[11].is_native_safe(), false);
assert_eq!(types[12].is_native_safe(), false);
assert_eq!(types[13].is_native_safe(), false);
assert_eq!(types[14].is_native_safe(), true);
assert_eq!(types[15].is_native_safe(), false); // funcref is not native safe
// and not traced either
assert_eq!(is_native_safe(&types[16]), true);
assert_eq!(types[16].is_native_safe(), true);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment