Commit ef467bc0 authored by qinsoon's avatar qinsoon

immix tiny mostly works

parent 12b9e253
......@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use utils::*;
mod bitmap;
mod address_bitmap;
mod address_map;
......@@ -21,3 +23,7 @@ pub mod objectdump;
pub use self::address_bitmap::AddressBitmap;
pub use self::address_map::AddressMap;
pub const SIZE_1KB: ByteSize = 1 << 10;
pub const SIZE_1MB: ByteSize = 1 << 20;
pub const SIZE_1GB: ByteSize = 1 << 30;
\ No newline at end of file
......@@ -38,21 +38,19 @@ pub struct FreeListSpace {
pub trace_map: Arc<AddressMap<u8>>,
#[allow(dead_code)]
mmap: memmap::Mmap,
mmap: memmap::MmapMut,
treadmill: Mutex<Treadmill>
}
impl FreeListSpace {
pub fn new(space_size: usize) -> FreeListSpace {
let anon_mmap: memmap::Mmap = match memmap::Mmap::anonymous(
space_size + SPACE_ALIGN,
memmap::Protection::ReadWrite
) {
Ok(m) => m,
Err(_) => panic!("failed to call mmap")
};
let start: Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let mut anon_mmap: memmap::MmapMut =
match memmap::MmapMut::map_anon(space_size + SPACE_ALIGN) {
Ok(m) => m,
Err(_) => panic!("failed to call mmap")
};
let start: Address = Address::from_ptr::<u8>(anon_mmap.as_mut_ptr()).align_up(SPACE_ALIGN);
let end: Address = start + space_size;
let trace_map = AddressMap::new(start, end);
......
......@@ -257,6 +257,14 @@ fn gc() {
atomic::Ordering::SeqCst
);
// each space prepares for GC
{
let mut gccontext_guard = MY_GC.write().unwrap();
let mut gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.prepare_for_gc();
gccontext.immix_normal.prepare_for_gc();
}
trace!("GC starts");
// creates root deque
......@@ -278,12 +286,12 @@ fn gc() {
// sweep
{
let gccontext_guard = MY_GC.read().unwrap();
let gccontext = gccontext_guard.as_ref().unwrap();
let mut gccontext_guard = MY_GC.write().unwrap();
let mut gccontext = gccontext_guard.as_mut().unwrap();
gccontext.immix_tiny.sweep();
gccontext.immix_normal.sweep();
gccontext.lo.sweep();
// gccontext.lo.sweep();
}
objectmodel::flip_mark_state();
......@@ -389,7 +397,7 @@ pub fn steal_trace_object(
immix::mark_object_traced(obj);
let encode = unsafe {
ImmixBlock::get_type_map_slot_static(obj.to_address()).load::<TinyObjectEncode>()
ImmixSpace::get_type_byte_slot_static(obj.to_address()).load::<TinyObjectEncode>()
};
for i in 0..encode.n_fields() {
......@@ -408,7 +416,7 @@ pub fn steal_trace_object(
// get type encode
let (type_encode, type_size): (&TypeEncode, ByteOffset) = {
let type_slot = ImmixBlock::get_type_map_slot_static(obj.to_address());
let type_slot = ImmixSpace::get_type_byte_slot_static(obj.to_address());
let encode = unsafe { type_slot.load::<MediumObjectEncode>() };
let (type_id, type_size) = if encode.is_medium() {
(encode.type_id(), encode.size())
......
This diff is collapsed.
This diff is collapsed.
......@@ -13,6 +13,7 @@
// limitations under the License.
use utils::*;
use heap::*;
use std::mem::size_of;
mod immix_space;
......@@ -45,12 +46,7 @@ pub use self::immix_space::is_object_traced;
// | ...... |
// |__________________|
pub const IMMIX_SPACE_ALIGN: ByteSize = (1 << 34); // 16GB
pub const IMMIX_SPACE_LOWBITS_MASK: usize = !(IMMIX_SPACE_ALIGN - 1);
// preallocating 16 GB for immix space
pub const LOG_BYTES_PREALLOC_IMMIX_SPACE: usize = 34;
pub const BYTES_PREALLOC_IMMIX_SPACE: ByteSize = 1 << LOG_BYTES_PREALLOC_IMMIX_SPACE;
// 64KB Immix Block
pub const LOG_BYTES_IN_BLOCK: usize = 16;
......@@ -61,16 +57,17 @@ pub const LOG_BYTES_IN_LINE: usize = 8;
pub const BYTES_IN_LINE: ByteSize = (1 << LOG_BYTES_IN_LINE);
// 256K blocks per space
pub const BLOCKS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_IMMIX_SPACE - LOG_BYTES_IN_BLOCK);
pub const BLOCKS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_SPACE - LOG_BYTES_IN_BLOCK);
// 64M lines per space
pub const LINES_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_IMMIX_SPACE - LOG_BYTES_IN_LINE);
pub const LINES_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_SPACE - LOG_BYTES_IN_LINE);
// 2G words per space
pub const WORDS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_IMMIX_SPACE - LOG_POINTER_SIZE);
pub const WORDS_IN_SPACE: usize = 1 << (LOG_BYTES_PREALLOC_SPACE - LOG_POINTER_SIZE);
// 256 lines per block
pub const LINES_IN_BLOCK: usize = 1 << (LOG_BYTES_IN_BLOCK - LOG_BYTES_IN_LINE);
pub const LOG_LINES_IN_BLOCK: usize = LOG_BYTES_IN_BLOCK - LOG_BYTES_IN_LINE;
// 64KB space metadata (we do not need this much though, but for alignment, we use 64KB)
pub const BYTES_META_SPACE: ByteSize = BYTES_IN_BLOCk;
pub const BYTES_META_SPACE: ByteSize = BYTES_IN_BLOCK;
// 256KB block mark table (1 byte per block)
pub const BYTES_META_BLOCK_MARK_TABLE: ByteSize = BLOCKS_IN_SPACE;
// 64MB line mark table
......@@ -78,7 +75,7 @@ pub const BYTES_META_LINE_MARK_TABLE: ByteSize = LINES_IN_SPACE;
// 1GB GC byte table
pub const BYTES_META_GC_TABLE: ByteSize = WORDS_IN_SPACE >> 1;
// 1GB TYPE byte table
pub const BYTES_META_TYPE_TABLE: ByteSize = WORDS_IN_SPACE >> 2;
pub const BYTES_META_TYPE_TABLE: ByteSize = WORDS_IN_SPACE >> 1;
pub const OFFSET_META_BLOCK_MARK_TABLE: ByteOffset = BYTES_META_SPACE as ByteOffset;
pub const OFFSET_META_LINE_MARK_TABLE: ByteOffset =
......@@ -86,7 +83,9 @@ pub const OFFSET_META_LINE_MARK_TABLE: ByteOffset =
pub const OFFSET_META_GC_TABLE: ByteOffset =
OFFSET_META_LINE_MARK_TABLE + BYTES_META_LINE_MARK_TABLE as ByteOffset;
pub const OFFSET_META_TYPE_TABLE: ByteOffset =
OFFSET_META_META_GC_TABLE + BYTES_META_GC_TABLE as ByteOffset;
OFFSET_META_GC_TABLE + BYTES_META_GC_TABLE as ByteOffset;
pub const OFFSET_MEM_START: ByteOffset =
OFFSET_META_TYPE_TABLE + BYTES_META_TYPE_TABLE as ByteOffset;
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
......@@ -101,7 +100,7 @@ pub enum LineMark {
#[repr(u8)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum BlockMark {
Uninitialized,
Uninitialized = 0,
Usable,
Full
}
......@@ -38,11 +38,15 @@ lazy_static! {
AtomicUsize::new( (DEFAULT_HEAP_SIZE as f64 * LO_SPACE_RATIO) as usize );
}
pub const SPACE_ALIGN: ByteSize = (1 << 19); // 512K
// preallocating 16 GB for space
pub const LOG_BYTES_PREALLOC_SPACE: usize = 34;
pub const BYTES_PREALLOC_SPACE: ByteSize = 1 << LOG_BYTES_PREALLOC_SPACE;
pub const SPACE_ALIGN: ByteSize = BYTES_PREALLOC_SPACE; // 16GB
pub const SPACE_LOWBITS_MASK: usize = !(SPACE_ALIGN - 1);
#[repr(u8)]
#[derive(Copy, Clone)]
#[repr(u64)]
#[derive(Copy, Clone, Debug)]
pub enum SpaceDescriptor {
ImmixTiny,
ImmixNormal,
......
......@@ -154,15 +154,14 @@ pub extern "C" fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize,
let immix_tiny = ImmixSpace::new(SpaceDescriptor::ImmixTiny, immix_size >> 1);
trace!(" initializing normal immix space...");
let immix_normal = ImmixSpace::new(SpaceDescriptor::ImmixNormal, immix_size >> 1);
trace!(" initializing large object space...");
let lo_space = Arc::new(FreeListSpace::new(lo_size));
// trace!(" initializing large object space...");
// let lo_space = Arc::new(FreeListSpace::new(lo_size));
heap::gc::init(n_gcthreads);
*MY_GC.write().unwrap() = Some(GC {
immix_tiny,
immix_normal,
lo: lo_space,
gc_types: vec![],
roots: LinkedHashSet::new()
});
......@@ -281,15 +280,12 @@ pub extern "C" fn muentry_alloc_tiny(
size: usize,
align: usize
) -> ObjectReference {
let addr = unsafe { &mut *mutator }.tiny.alloc(size, align);
trace!(
"muentry_alloc_tiny(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
addr
);
unsafe { addr.to_object_reference() }
unsafe {
(&mut *mutator)
.tiny
.alloc(size, align)
.to_object_reference()
}
}
#[inline(always)]
......@@ -299,15 +295,12 @@ pub extern "C" fn muentry_alloc_normal(
size: usize,
align: usize
) -> ObjectReference {
let addr = unsafe { &mut *mutator }.normal.alloc(size, align);
trace!(
"muentry_alloc_normal(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
addr
);
unsafe { addr.to_object_reference() }
unsafe {
(&mut *mutator)
.normal
.alloc(size, align)
.to_object_reference()
}
}
/// allocates an object with slowpath in the immix space
......@@ -318,18 +311,7 @@ pub extern "C" fn muentry_alloc_tiny_slow(
size: usize,
align: usize
) -> Address {
let ret = unsafe { &mut *mutator }
.tiny
.try_alloc_from_local(size, align);
trace!(
"muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
ret
);
ret
unsafe { (&mut *mutator).tiny.try_alloc_from_local(size, align) }
}
/// allocates an object with slowpath in the immix space
......@@ -340,18 +322,7 @@ pub extern "C" fn muentry_alloc_normal_slow(
size: usize,
align: usize
) -> Address {
let ret = unsafe { &mut *mutator }
.normal
.try_alloc_from_local(size, align);
trace!(
"muentry_alloc_slow(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
ret
);
ret
unsafe { (&mut *mutator).normal.try_alloc_from_local(size, align) }
}
/// allocates an object in the freelist space (large object space)
......@@ -362,21 +333,22 @@ pub extern "C" fn muentry_alloc_large(
size: usize,
align: usize
) -> ObjectReference {
let ret = freelist::alloc_large(
size,
align,
unsafe { mutator.as_mut().unwrap() },
MY_GC.read().unwrap().as_ref().unwrap().lo.clone()
);
trace!(
"muentry_alloc_large(mutator: {:?}, size: {}, align: {}) = {}",
mutator,
size,
align,
ret
);
unsafe { ret.to_object_reference() }
// let ret = freelist::alloc_large(
// size,
// align,
// unsafe { mutator.as_mut().unwrap() },
// MY_GC.read().unwrap().as_ref().unwrap().lo.clone()
// );
// trace!(
// "muentry_alloc_large(mutator: {:?}, size: {}, align: {}) = {}",
// mutator,
// size,
// align,
// ret
// );
//
// unsafe { ret.to_object_reference() }
unimplemented!()
}
#[no_mangle]
......@@ -450,8 +422,9 @@ pub extern "C" fn muentry_init_hybrid(
/// forces gc to happen
/// (this is not a 'hint' - world will be stopped, and heap traversal will start)
#[no_mangle]
pub extern "C" fn force_gc() {
pub extern "C" fn force_gc(mutator: *mut Mutator) {
heap::gc::trigger_gc();
yieldpoint(mutator);
}
/// traces reachable objects and record them as a data structure
......@@ -465,7 +438,7 @@ pub extern "C" fn persist_heap(roots: Vec<Address>) -> objectdump::HeapDump {
struct GC {
immix_tiny: Raw<ImmixSpace>,
immix_normal: Raw<ImmixSpace>,
lo: Arc<FreeListSpace>,
// lo: Arc<FreeListSpace>,
gc_types: Vec<Arc<GCType>>,
roots: LinkedHashSet<ObjectReference>
}
......@@ -476,17 +449,7 @@ lazy_static! {
impl GC {
pub fn is_heap_object(&self, addr: Address) -> bool {
self.immix_tiny.addr_in_space(addr) || self.immix_normal.addr_in_space(addr) ||
self.lo.addr_in_space(addr)
}
}
impl fmt::Debug for GC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "GC\n").unwrap();
write!(f, "{}", self.immix_tiny).unwrap();
write!(f, "{}", self.immix_normal).unwrap();
write!(f, "{}", self.lo)
self.immix_tiny.addr_in_space(addr) || self.immix_normal.addr_in_space(addr)
}
}
......@@ -495,19 +458,18 @@ impl fmt::Debug for GC {
/// prints current GC context for debugging
#[no_mangle]
pub extern "C" fn print_gc_context() {
println!("{:?}", MY_GC.read().unwrap().as_ref().unwrap());
println!("GC CONTEXT UNKNOWN");
}
/// gets immix space and freelist space
#[no_mangle]
pub extern "C" fn get_spaces() -> (Raw<ImmixSpace>, Raw<ImmixSpace>, Arc<FreeListSpace>) {
pub extern "C" fn get_spaces() -> (Raw<ImmixSpace>, Raw<ImmixSpace>) {
let space_lock = MY_GC.read().unwrap();
let space = space_lock.as_ref().unwrap();
(
space.immix_tiny.clone(),
space.immix_normal.clone(),
space.lo.clone()
space.immix_normal.clone() // space.lo.clone()
)
}
......
......@@ -61,7 +61,7 @@ static global_type_table_meta: AtomicUsize = ATOMIC_USIZE_INIT;
/// save Mmap to keep the memory map alive
// it is okay to use lock here, as we won't actually access this field
lazy_static!{
static ref gtt_mmap: Mutex<Option<memmap::Mmap>> = Mutex::new(None);
static ref gtt_mmap: Mutex<Option<memmap::MmapMut>> = Mutex::new(None);
}
impl GlobalTypeTable {
......@@ -72,18 +72,15 @@ impl GlobalTypeTable {
let entry_size = mem::size_of::<TypeEncode>();
let metadata_size = math::align_up(mem::size_of::<GlobalTypeTable>(), entry_size);
let mmap = match memmap::Mmap::anonymous(
metadata_size + N_TYPES * entry_size,
memmap::Protection::ReadWrite
) {
let mut mmap = match memmap::MmapMut::map_anon(metadata_size + N_TYPES * entry_size) {
Ok(m) => m,
Err(_) => panic!("failed to mmap for global type table")
};
info!("Global Type Table allocated at {:?}", mmap.ptr());
info!("Global Type Table allocated at {:?}", mmap.as_mut_ptr());
// start address of metadata
let meta_addr = Address::from_ptr::<u8>(mmap.ptr());
let meta_addr = Address::from_ptr::<u8>(mmap.as_mut_ptr());
global_type_table_meta.store(meta_addr.as_usize(), Ordering::Relaxed);
// actual table
let table_addr = meta_addr + metadata_size;
......
......@@ -19,15 +19,11 @@ extern crate log;
use self::mu_gc::*;
use self::mu_gc::heap;
use self::mu_gc::heap::*;
use self::mu_gc::heap::immix::*;
use self::mu_gc::objectmodel::sidemap::*;
use self::mu_utils::*;
use std::sync::atomic::Ordering;
const OBJECT_SIZE: usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = 10000;
#[allow(dead_code)]
const SPACIOUS_SPACE_SIZE: usize = 500 << 20; // 500mb
#[allow(dead_code)]
......@@ -41,33 +37,97 @@ const IMMIX_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
const LO_SPACE_SIZE: usize = SPACIOUS_SPACE_SIZE;
#[test]
pub fn test_exhaust_alloc() {
pub fn test_tiny_immix_alloc() {
const OBJECT_SIZE: usize = 16;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = BYTES_IN_BLOCK / OBJECT_SIZE;
// we should see the slow paths get invoked exactly twice
start_logging_trace();
gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
let (tiny_space, _) = get_spaces();
let mutator = new_mutator();
println!(
"Trying to allocate {} objects of (size {}, align {}). ",
WORK_LOAD,
OBJECT_SIZE,
OBJECT_ALIGN
);
const ACTUAL_OBJECT_SIZE: usize = OBJECT_SIZE;
println!(
"This would take {} bytes of {} bytes heap",
WORK_LOAD * ACTUAL_OBJECT_SIZE,
heap::IMMIX_SPACE_SIZE.load(Ordering::SeqCst)
);
for _ in 0..WORK_LOAD {
yieldpoint(mutator);
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_tiny_object(mutator, res, TinyObjectEncode::new(0u8));
}
assert_eq!(tiny_space.n_used_blocks(), 0);
gc_destoy();
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
assert_eq!(tiny_space.n_used_blocks(), 1);
}
#[test]
pub fn test_tiny_immix_gc() {
const OBJECT_SIZE: usize = 16;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD: usize = BYTES_IN_BLOCK / OBJECT_SIZE;
// we should see the slow paths get invoked exactly twice
start_logging_trace();
gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, true);
let (tiny_space, _) = get_spaces();
let mutator = new_mutator();
let tiny_header = TinyObjectEncode::new(0b0u8);
// doing one allocation
let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_tiny_object(mutator, res, tiny_header);
// add the object to the root and force a gc
add_to_root(res);
force_gc(mutator);
// one line should be alive - and another line is conservatively alive
assert_eq!(tiny_space.last_gc_used_lines, 2);
// another allocation
let res2 = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
muentry_init_tiny_object(mutator, res2, tiny_header);
// remove the object, and force a gc
remove_root(res);
force_gc(mutator);
// no line should be alive
assert_eq!(tiny_space.last_gc_used_lines, 0);
}
//#[test]
//pub fn test_exhaust_alloc2() {
// const OBJECT_SIZE: usize = 16;
// const OBJECT_ALIGN: usize = 8;
// const WORK_LOAD: usize = BYTES_IN_BLOCK * 2 / OBJECT_SIZE + 1;
// // we should see the slow paths get invoked exactly 3 times
//
// start_logging_trace();
// gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
// let mutator = new_mutator();
// for _ in 0..WORK_LOAD {
// yieldpoint(mutator);
// let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
// muentry_init_tiny_object(mutator, res, TinyObjectEncode::new(0u8));
// }
// gc_destoy();
//}
//
//#[test]
//pub fn test_exhaust_overflow_alloc() {
// const OBJECT_SIZE: usize = 512;
// const OBJECT_ALIGN: usize = 8;
// const WORK_LOAD: usize = BYTES_IN_BLOCK * 2 / OBJECT_SIZE;
//
// start_logging_trace();
// gc_init(IMMIX_SPACE_SIZE, LO_SPACE_SIZE, 8, false);
// let mutator = new_mutator();
// for _ in 0..WORK_LOAD {
// yieldpoint(mutator);
// let res = muentry_alloc_tiny(mutator, OBJECT_SIZE, OBJECT_ALIGN);
// muentry_init_tiny_object(mutator, res, TinyObjectEncode::new(0u8));
// }
//
// gc_destoy();
//}
//
//const LARGE_OBJECT_SIZE: usize = 256;
//
......
......@@ -272,6 +272,16 @@ mod addr_tests {
assert!(addr == aligned);
}
#[test]
fn test_large_align_up() {
let addr = Address(0x1034a9000);
let align = 1 << 34;
let aligned = addr.align_up(align);
println!("aligned = {}", aligned);
assert!(aligned.is_aligned_to(align));
}
#[test]
fn test_is_aligned() {
let addr = Address(0);
......
......@@ -17,7 +17,7 @@
//! # Examples
//!
//! ```
//! use utils::LinkedHashMap;
//! use mu_utils::LinkedHashMap;
//!
//! let mut map = LinkedHashMap::new();
//! map.insert(2, 20);
......@@ -44,39 +44,6 @@ use std::mem;
use std::ops::{Index, IndexMut};
use std::ptr;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_serialize() {
let a: LinkedHashMap<usize, usize> = {
let mut ret = LinkedHashMap::new();
ret.insert(0, 0);
ret.insert(1, 100);
ret.insert(2, 200);
ret.insert(3, 300);
ret.insert(4, 400);
ret
};
println!("a = {:?}", a);
let serialized = json::encode(&a).unwrap();
println!("json = {:?}", serialized);
let a_: LinkedHashMap<usize, usize> = json::decode(&serialized).unwrap();
println!("a_ = {:?}", a_);
let serialized2 = json::encode(&a_).unwrap();
println!("json = {:?}", serialized2);
let a__: LinkedHashMap<usize, usize> = json::decode(&serialized2).unwrap();
println!("a__ = {:?}", a__);
assert!(a_ == a__);
}
}
struct KeyRef<K> {
k: *const K
}
......@@ -227,7 +194,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
......@@ -293,7 +260,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
......@@ -319,7 +286,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
......@@ -346,7 +313,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
......@@ -378,7 +345,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(2, "a");
......@@ -412,7 +379,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map: LinkedHashMap<i32, &str> = LinkedHashMap::new();
/// let capacity = map.capacity();
/// ```
......@@ -427,7 +394,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
......@@ -457,7 +424,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
......@@ -481,7 +448,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
......@@ -511,7 +478,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// # Examples
///
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
......@@ -563,7 +530,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
///
/// # Examples
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
......@@ -594,7 +561,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
/// Iterator element type is `(&'a K, &'a mut V)`
/// # Examples
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
......@@ -628,7 +595,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
///
/// # Examples
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
......@@ -649,7 +616,7 @@ impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
///
/// # Examples
/// ```
/// use utils::LinkedHashMap;
/// use mu_utils::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
......