Commit 896af8f0 authored by qinsoon's avatar qinsoon

use MAP_NORESERVE for mmapping a large chunk of memory

parent 9f4bc558
Pipeline #1305 failed with stages
in 22 minutes and 22 seconds
......@@ -15,7 +15,7 @@
use common::ptr::*;
use heap::*;
use objectmodel::sidemap::*;
use utils::mem::memmap;
use utils::mem::*;
use utils::mem::memsec::memzero;
use std::sync::Mutex;
......@@ -51,8 +51,8 @@ pub struct FreelistSpace {
pub last_gc_used_pages: usize,
// 16 bytes
#[allow(dead_code)]
mmap: memmap::MmapMut,
mmap_start: Address,
mmap_size: ByteSize,
padding: [u64; (BYTES_IN_PAGE - 32 - 24 - 88 - 32) >> 3],
......@@ -91,7 +91,9 @@ impl Space for FreelistSpace {
true
}
fn destroy(&mut self) {}
fn destroy(&mut self) {
munmap(self.mmap_start, self.mmap_size);
}
fn prepare_for_gc(&mut self) {
// erase page mark
......@@ -169,18 +171,13 @@ impl Space for FreelistSpace {
impl FreelistSpace {
pub fn new(desc: SpaceDescriptor, space_size: ByteSize) -> Raw<FreelistSpace> {
let mut anon_mmap = match memmap::MmapMut::map_anon(
BYTES_PREALLOC_SPACE * 2 // for alignment
) {
Ok(m) => m,
Err(_) => panic!("failed to reserve address space for mmap")
};
let mmap_ptr = anon_mmap.as_mut_ptr();
trace!(" mmap ptr: {:?}", mmap_ptr);
let mmap_size = BYTES_PREALLOC_SPACE * 2;
let mmap_start = mmap_large(mmap_size);
trace!(" mmap ptr: {}", mmap_start);
let space_size = math::align_up(space_size, BYTES_IN_PAGE);
let meta_start = Address::from_ptr::<u8>(mmap_ptr).align_up(SPACE_ALIGN);
let meta_start = mmap_start.align_up(SPACE_ALIGN);
let mem_start = meta_start + BYTES_IN_PAGE +
mem::size_of::<LargeObjectEncode>() * PAGES_IN_SPACE +
mem::size_of::<PageMark>() * PAGES_IN_SPACE;
......@@ -216,10 +213,8 @@ impl FreelistSpace {
}
trace!(" initialized total/usable/used_nodes");
unsafe {
use std::ptr;
ptr::write(&mut space.mmap as *mut memmap::MmapMut, anon_mmap);
}
space.mmap_start = mmap_start;
space.mmap_size = mmap_size;
trace!(" store mmap");
debug_assert_eq!(Address::from_ptr(&space.mem as *const [u8; 0]), mem_start);
......
This diff is collapsed.
......@@ -18,7 +18,7 @@ use heap::immix::*;
use heap::gc;
use objectmodel::*;
use utils::bit_utils;
use utils::mem::memmap;
use utils::mem::*;
use utils::mem::memsec;
use std::*;
......@@ -75,8 +75,8 @@ pub struct ImmixSpace {
pub last_gc_used_lines: usize,
// 16 bytes
#[allow(dead_code)]
mmap: memmap::MmapMut,
mmap_start: Address,
mmap_size: ByteSize,
// padding to space metadata takes 64KB
padding: [u64; ((BYTES_IN_BLOCK - 32 - 32 - 88 - 32 - 16) >> 3)],
......@@ -125,7 +125,9 @@ impl Space for ImmixSpace {
true
}
fn destroy(&mut self) {}
fn destroy(&mut self) {
munmap(self.mmap_start, self.size);
}
fn prepare_for_gc(&mut self) {
// erase lines marks
......@@ -301,16 +303,11 @@ impl RawMemoryMetadata for ImmixBlock {
impl ImmixSpace {
pub fn new(desc: SpaceDescriptor, space_size: ByteSize) -> Raw<ImmixSpace> {
// acquire memory through mmap
let mut anon_mmap: memmap::MmapMut = match memmap::MmapMut::map_anon(
BYTES_PREALLOC_SPACE * 2 // for alignment
) {
Ok(m) => m,
Err(_) => panic!("failed to reserve addresss pace for mmap")
};
let mmap_ptr = anon_mmap.as_mut_ptr();
trace!(" mmap ptr: {:?}", mmap_ptr);
let mmap_size = BYTES_PREALLOC_SPACE * 2;
let mmap_start = mmap_large(mmap_size);
trace!(" mmap ptr: {}", mmap_start);
let meta_start: Address = Address::from_ptr::<u8>(mmap_ptr).align_up(SPACE_ALIGN);
let meta_start: Address = mmap_start.align_up(SPACE_ALIGN);
let mem_start: Address = meta_start + OFFSET_MEM_START;
let mem_end: Address = mem_start + space_size;
trace!(" space metadata: {}", meta_start);
......@@ -346,10 +343,8 @@ impl ImmixSpace {
}
trace!(" initialized total/usable/used blocks");
unsafe {
use std::ptr;
ptr::write(&mut space.mmap as *mut memmap::MmapMut, anon_mmap);
}
space.mmap_start = mmap_start;
space.mmap_size = mmap_size;
trace!(" store mmap");
space.last_gc_used_lines = 0;
......
......@@ -17,9 +17,9 @@ use std::sync::RwLock;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use std::collections::HashMap;
use std::mem;
use utils::mem::memmap;
use utils::mem::*;
use utils::math;
use utils::Address;
use utils::*;
use objectmodel::sidemap::TypeID;
use objectmodel::sidemap::N_TYPES;
......@@ -39,7 +39,8 @@ pub struct GlobalTypeTable {
full_entries: RwLock<HashMap<usize, FullTypeEncode>>,
#[allow(dead_code)]
mmap: memmap::MmapMut,
mmap_start: Address,
mmap_size: ByteSize,
table: [ShortTypeEncode; N_TYPES]
}
......@@ -56,15 +57,13 @@ static GLOBAL_TYPE_TABLE_META: AtomicUsize = ATOMIC_USIZE_INIT;
impl GlobalTypeTable {
pub fn init() {
debug!("Init GlobalTypeTable...");
let mut mmap = match memmap::MmapMut::map_anon(mem::size_of::<GlobalTypeTable>()) {
Ok(m) => m,
Err(_) => panic!("failed to mmap for global type table")
};
let mmap_size = mem::size_of::<GlobalTypeTable>();
let mmap = mmap_large(mmap_size);
info!("Global Type Table allocated at {:?}", mmap.as_mut_ptr());
info!("Global Type Table allocated at {}", mmap);
// start address of metadata
let meta_addr = Address::from_ptr::<u8>(mmap.as_mut_ptr());
let meta_addr = mmap;
GLOBAL_TYPE_TABLE_META.store(meta_addr.as_usize(), Ordering::Relaxed);
let mut meta: &mut GlobalTypeTable = unsafe { meta_addr.to_ref_mut() };
......@@ -84,16 +83,20 @@ impl GlobalTypeTable {
RwLock::new(HashMap::new())
)
}
unsafe {
use std::ptr;
ptr::write(&mut meta.mmap as *mut memmap::MmapMut, mmap);
}
meta.mmap_start = mmap;
meta.mmap_size = mmap_size;
// save mmap
trace!("Global Type Table initialization done");
}
pub fn cleanup() {
// unmap the table
let mmap_start = GlobalTypeTable::table_meta().mmap_start;
let mmap_size = GlobalTypeTable::table_meta().mmap_size;
munmap(mmap_start, mmap_size);
// set pointers to zero
GLOBAL_TYPE_TABLE_PTR.store(0, Ordering::Relaxed);
GLOBAL_TYPE_TABLE_META.store(0, Ordering::Relaxed);
}
......
......@@ -21,6 +21,7 @@ authors = ["qinsoon <qinsoon@gmail.com>"]
crate-type = ["rlib"]
[dependencies]
libc="*"
memmap = "*"
memsec = "0.1.9"
byteorder = "*"
......
......@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate libc;
/// cross-platform mmap crate
pub extern crate memmap;
/// secured memory operations: memset, memzero, etc.
......@@ -20,6 +22,47 @@ pub extern crate memsec;
#[allow(unused_imports)] // import both endianness (we may not use big endian though)
use byteorder::{LittleEndian, BigEndian, ReadBytesExt, WriteBytesExt, ByteOrder};
use Address;
use ByteSize;
use std::ptr;
#[cfg(target_os = "macos")]
fn mmap_flags() -> libc::c_int {
libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_NORESERVE
}
#[cfg(target_os = "linux")]
fn mmap_flags() -> libc::c_int {
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_NORESERVE
}
pub fn mmap_large(size: ByteSize) -> Address {
use self::libc::*;
let ret = unsafe {
mmap(
ptr::null_mut(),
size as size_t,
PROT_READ | PROT_WRITE,
mmap_flags(),
-1,
0
)
};
if ret == MAP_FAILED {
panic!("failed to mmap {} bytes", size);
}
Address::from_mut_ptr(ret)
}
pub fn munmap(addr: Address, size: ByteSize) {
use self::libc::*;
unsafe {
munmap(addr.to_ptr_mut() as *mut c_void, size as size_t);
}
}
/// malloc's and zeroes the memory
pub unsafe fn malloc_zero(size: usize) -> *mut u8 {
use self::memsec;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment