Commit 327b3de6 authored by qinsoon's avatar qinsoon

remove warnings (based on Rust 1.20)

parent 86e3722c
......@@ -1718,7 +1718,7 @@ impl ASMCodeGen {
// merge use vec
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
let locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1.clone());
} else {
uses.insert(id1, vec![loc1]);
......@@ -1738,7 +1738,7 @@ impl ASMCodeGen {
let (mem, mut uses) = self.prepare_mem(op2, inst.len() + 1 + reg.len() + 1);
if uses.contains_key(&id) {
let mut locs = uses.get_mut(&id).unwrap();
let locs = uses.get_mut(&id).unwrap();
vec_utils::add_unique(locs, loc.clone());
} else {
uses.insert(id, vec![loc.clone()]);
......@@ -1844,7 +1844,7 @@ impl ASMCodeGen {
let (reg, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + mem.len() + 1);
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
let locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1.clone());
} else {
uses.insert(id1, vec![loc1.clone()]);
......@@ -2082,7 +2082,7 @@ impl ASMCodeGen {
// the register we used for the memory location is counted as 'use'
// use the vec from mem as 'use' (push use reg from src to it)
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
let locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1);
} else {
uses.insert(id1, vec![loc1]);
......
......@@ -20,7 +20,6 @@ use ast::op::*;
use ast::types::*;
use vm::VM;
use runtime::mm;
use runtime::mm::OBJECT_HEADER_SIZE;
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
......
......@@ -128,7 +128,7 @@ impl PeepholeOptimization {
}
fn remove_unnecessary_jump(&mut self, inst: usize, cf: &mut CompiledFunction) {
let mut mc = cf.mc_mut();
let mc = cf.mc_mut();
// if this is last instruction, return
if inst == mc.number_of_insts() - 1 {
......@@ -158,7 +158,7 @@ impl PeepholeOptimization {
}
fn remove_jump_to_jump(&mut self, inst: usize, cf: &mut CompiledFunction) {
let mut mc = cf.mc_mut();
let mc = cf.mc_mut();
// the instruction that we may rewrite
let orig_inst = inst;
......
......@@ -424,7 +424,7 @@ impl<'a> GraphColoring<'a> {
) {
trace!(" add {:?} to movelist[{}]", mov, reg);
if movelist.contains_key(&reg) {
let mut list = movelist.get_mut(&reg).unwrap();
let list = movelist.get_mut(&reg).unwrap();
list.insert(mov);
} else {
let mut list = LinkedHashSet::new();
......
......@@ -758,7 +758,7 @@ fn global_liveness_analysis(
// in <- use + (out - def)
{
let mut inset = livein.get_mut(node).unwrap();
let inset = livein.get_mut(node).unwrap();
inset.clear();
......@@ -776,7 +776,7 @@ fn global_liveness_analysis(
// out[n] <- union(in[s] for every successor s of n)
{
let mut outset = liveout.get_mut(node).unwrap();
let outset = liveout.get_mut(node).unwrap();
outset.clear();
for s in cfg_node.succ.iter() {
......
......@@ -194,7 +194,7 @@ pub fn validate_regalloc(
if visited.contains_key(&block) {
// if current block exists in visited, intersect with current
let mut old = visited.get_mut(&block).unwrap();
let old = visited.get_mut(&block).unwrap();
let changed = old.intersect(&alive);
if changed {
......
......@@ -195,7 +195,7 @@ fn dfs(cur: MuID, stack: &mut Vec<MuID>, visited: &mut Vec<MuID>, func: &mut MuF
target: MuID,
prob: f32| {
if map.contains_key(&target) {
let mut edge : &mut BlockEdge = map.get_mut(&target).unwrap();
let edge: &mut BlockEdge = map.get_mut(&target).unwrap();
edge.probability += prob;
} else {
map.insert(
......
......@@ -57,7 +57,7 @@ impl CompilerPass for GenMovPhi {
let mut new_blocks_to_insert: Vec<IntermediateBlockInfo> = vec![];
// first step - collects info on intermediate blocks
for (blk_id, mut block) in f_content.blocks.iter_mut() {
for (blk_id, block) in f_content.blocks.iter_mut() {
trace!("block: {}", blk_id);
// old block content
......@@ -313,7 +313,7 @@ impl CompilerPass for GenMovPhi {
block_info.blk_name.clone()
));
let mut target_block = f_content.get_block_mut(target_id);
let target_block = f_content.get_block_mut(target_id);
assert!(target_block.content.is_some());
// if target_block is an exception block,
......
......@@ -48,6 +48,7 @@ impl CompilerPass for InjectRuntime {
self
}
#[allow(unused_variables)]
fn finish_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
debug!("after inject runtime: ");
......@@ -61,7 +62,7 @@ impl CompilerPass for InjectRuntime {
let mut new_blocks = vec![];
for (_, mut block) in blocks.into_iter() {
for (_, block) in blocks.into_iter() {
// get all the instructions of this block, so we can iterate through them
let body_copy = block.content.as_ref().unwrap().body.clone();
......
......@@ -158,7 +158,7 @@ impl Inlining {
let call_edges = func.get_static_call_edges();
let mut f_content = func.content.as_mut().unwrap();
let f_content = func.content.as_mut().unwrap();
let ref mut f_context = func.context;
let mut new_blocks: Vec<Block> = vec![];
......
......@@ -88,7 +88,7 @@ impl CompilerPass for RetSink {
// rewrite existing RET instruction to a BRANCH
// use RET values as BRANCH's goto values
let mut has_ret: bool = false;
for (blk_id, mut block) in f_content.blocks.iter_mut() {
for (blk_id, block) in f_content.blocks.iter_mut() {
trace!("block: {}", blk_id);
// old block content
......
......@@ -304,7 +304,7 @@ fn branch_adjustment(func: &mut MuFunctionVersion, vm: &VM) {
let mut f_content = func.content.take().unwrap();
let mut new_blocks: Vec<Block> = vec![];
for (blk_id, mut block) in f_content.blocks.iter_mut() {
for (blk_id, block) in f_content.blocks.iter_mut() {
trace_if!(LOG_TRACE_SCHEDULE, "block: {} #{}", block, blk_id);
let next_block_in_trace: Option<usize> = {
......
......@@ -47,7 +47,7 @@ pub fn init(n_gcthreads: usize) {
pub fn trigger_gc() {
trace!("Triggering GC...");
for mut m in MUTATORS.write().unwrap().iter_mut() {
for m in MUTATORS.write().unwrap().iter_mut() {
if m.is_some() {
m.as_mut().unwrap().set_take_yield(true);
}
......@@ -193,7 +193,7 @@ pub fn sync_barrier(mutator: &mut Mutator) {
// mutators will resume
CONTROLLER.store(NO_CONTROLLER, Ordering::SeqCst);
for mut t in MUTATORS.write().unwrap().iter_mut() {
for t in MUTATORS.write().unwrap().iter_mut() {
if t.is_some() {
let t_mut = t.as_mut().unwrap();
t_mut.set_take_yield(false);
......@@ -243,7 +243,7 @@ fn gc() {
// each space prepares for GC
{
let mut gccontext_guard = MY_GC.write().unwrap();
let mut gccontext = gccontext_guard.as_mut().unwrap();
let gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.prepare_for_gc();
gccontext.immix_normal.prepare_for_gc();
gccontext.lo.prepare_for_gc();
......@@ -272,7 +272,7 @@ fn gc() {
// sweep
{
let mut gccontext_guard = MY_GC.write().unwrap();
let mut gccontext = gccontext_guard.as_mut().unwrap();
let gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.sweep();
gccontext.immix_normal.sweep();
......@@ -333,8 +333,6 @@ pub fn start_trace(work_stack: &mut Vec<ObjectReference>) {
#[allow(unused_variables)]
fn start_steal_trace(stealer: Stealer<ObjectReference>, job_sender: mpsc::Sender<ObjectReference>) {
use objectmodel;
let mut local_queue = vec![];
loop {
......
......@@ -82,11 +82,9 @@ extern crate field_offset;
use common::objectdump;
use common::ptr::*;
use heap::*;
use heap::immix::*;
use heap::freelist::*;
use utils::*;
use objectmodel::sidemap::*;
use std::sync::Arc;
use std::sync::RwLock;
......@@ -209,7 +207,7 @@ pub extern "C" fn gc_destroy() {
if gc_lock.is_some() {
{
let mut gc = gc_lock.as_mut().unwrap();
let gc = gc_lock.as_mut().unwrap();
gc.immix_tiny.destroy();
gc.immix_normal.destroy();
gc.lo.destroy();
......
......@@ -13,7 +13,6 @@
// limitations under the License.
use objectmodel::*;
use utils::*;
pub const IMMORTAL_OBJECT_HEADER_SIZE: ByteSize = 32;
......
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::atomic;
use utils::ByteSize;
pub mod sidemap;
......
......@@ -12,13 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Mutex;
use std::sync::RwLock;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::collections::HashMap;
use std::mem;
use utils::mem::*;
use utils::math;
use utils::*;
use objectmodel::sidemap::TypeID;
......@@ -66,7 +64,7 @@ impl GlobalTypeTable {
let meta_addr = mmap;
GLOBAL_TYPE_TABLE_META.store(meta_addr.as_usize(), Ordering::Relaxed);
let mut meta: &mut GlobalTypeTable = unsafe { meta_addr.to_ref_mut() };
let meta: &mut GlobalTypeTable = unsafe { meta_addr.to_ref_mut() };
// actual table
let table_addr = Address::from_ptr(&meta.table as *const [ShortTypeEncode; N_TYPES]);
......@@ -114,8 +112,8 @@ impl GlobalTypeTable {
}
pub fn insert_small_entry(entry: ShortTypeEncode) -> TypeID {
let mut meta = GlobalTypeTable::table_meta();
let mut table = GlobalTypeTable::table();
let meta = GlobalTypeTable::table_meta();
let table = GlobalTypeTable::table();
if meta.small_entry_i < SMALL_ENTRY_CAP {
let id = meta.small_entry_i;
......@@ -128,8 +126,8 @@ impl GlobalTypeTable {
}
pub fn insert_large_entry(entry: ShortTypeEncode) -> TypeID {
let mut meta = GlobalTypeTable::table_meta();
let mut table = GlobalTypeTable::table();
let meta = GlobalTypeTable::table_meta();
let table = GlobalTypeTable::table();
if meta.large_entry_i < LARGE_ENTRY_CAP {
let id = meta.large_entry_i;
......@@ -142,8 +140,8 @@ impl GlobalTypeTable {
}
pub fn force_set_short_entry(index: TypeID, entry: ShortTypeEncode) {
let mut meta = GlobalTypeTable::table_meta();
let mut table = GlobalTypeTable::table();
let meta = GlobalTypeTable::table_meta();
let table = GlobalTypeTable::table();
table[index] = entry;
......@@ -175,7 +173,7 @@ impl GlobalTypeTable {
}
pub fn force_set_full_entry(index: TypeID, entry: FullTypeEncode) {
let mut meta = GlobalTypeTable::table_meta();
let meta = GlobalTypeTable::table_meta();
let mut lock = meta.full_entries.write().unwrap();
assert!(!lock.contains_key(&index));
lock.insert(index, entry);
......
......@@ -66,12 +66,9 @@ impl ObjectEncode {
}
mod object_encoding {
use super::*;
use std::mem;
#[test]
fn struct_size() {
println!("{:?}", mem::size_of::<ObjectEncode>());
println!("{:?}", size_of::<ObjectEncode>());
}
}
......
......@@ -13,7 +13,6 @@
// limitations under the License.
use objectmodel::*;
use utils::*;
use std;
use std::mem::transmute;
......@@ -196,9 +195,9 @@ impl fmt::Debug for ShortTypeEncode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ShortTypeEncode {{ ").unwrap();
write!(f, "fix_len: {:?}, fix_ty: ", self.fix_len).unwrap();
self.fix_ty[0..self.fix_len as usize].fmt(f);
self.fix_ty[0..self.fix_len as usize].fmt(f).unwrap();
write!(f, ", var_len: {:?}, var_ty: ", self.var_len).unwrap();
self.var_ty[0..self.var_len as usize].fmt(f);
self.var_ty[0..self.var_len as usize].fmt(f).unwrap();
write!(f, "}}")
}
}
......
......@@ -124,7 +124,7 @@ pub extern "C" fn muentry_alloc_var_size(
full_tyid: TypeID
) -> ObjectReference {
debug_assert!(MuThread::has_current());
let mut cur_thread = MuThread::current_mut();
let cur_thread = MuThread::current_mut();
let mutator: *mut Mutator = &mut cur_thread.allocator as *mut Mutator;
let size = check_hybrid_size(fix_size + var_size * var_len);
......@@ -142,7 +142,7 @@ pub extern "C" fn muentry_alloc_var_size(
let encode = gen_object_encode_internal(true, tyid, full_tyid, size, vm);
match encode {
ObjectEncode::Tiny(enc) => unreachable!(),
ObjectEncode::Tiny(_) => unreachable!(),
ObjectEncode::Small(enc) => muentry_init_small_object(mutator, res, enc),
ObjectEncode::Medium(enc) => muentry_init_medium_object(mutator, res, enc),
ObjectEncode::Large(enc) => muentry_init_large_object(mutator, res, enc)
......@@ -214,14 +214,6 @@ pub fn allocate_hybrid(
backendtype: Box<BackendType>,
vm: &VM
) -> Address {
let gc_type = {
let gctype = backendtype.gc_type.as_ref().unwrap();
vm.get_gc_type_id(gctype)
};
let gc_hybrid_full_type = match backendtype.gc_type_hybrid_full {
Some(ref enc) => vm.get_gc_type_id(enc),
None => 0
};
let size = check_hybrid_size(backendtype.size + backendtype.elem_size.unwrap() * len);
let encode = gen_object_encode(&backendtype, size, vm);
......
......@@ -207,12 +207,12 @@ impl Address {
#[inline(always)]
pub unsafe fn to_ref<T>(&self) -> &'static T {
unsafe { mem::transmute(self.0) }
mem::transmute(self.0)
}
#[inline(always)]
pub unsafe fn to_ref_mut<T>(&self) -> &'static mut T {
unsafe { mem::transmute(self.0) }
mem::transmute(self.0)
}
/// converts the Address to a pointer-sized integer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment