WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 896af8f0 authored by qinsoon's avatar qinsoon
Browse files

use MAP_NORESERVE for mmapping a large chunk of memory

parent 9f4bc558
......@@ -15,7 +15,7 @@
use common::ptr::*;
use heap::*;
use objectmodel::sidemap::*;
use utils::mem::memmap;
use utils::mem::*;
use utils::mem::memsec::memzero;
use std::sync::Mutex;
......@@ -51,8 +51,8 @@ pub struct FreelistSpace {
pub last_gc_used_pages: usize,
// 16 bytes
#[allow(dead_code)]
mmap: memmap::MmapMut,
mmap_start: Address,
mmap_size: ByteSize,
padding: [u64; (BYTES_IN_PAGE - 32 - 24 - 88 - 32) >> 3],
......@@ -91,7 +91,9 @@ impl Space for FreelistSpace {
true
}
fn destroy(&mut self) {}
fn destroy(&mut self) {
munmap(self.mmap_start, self.mmap_size);
}
fn prepare_for_gc(&mut self) {
// erase page mark
......@@ -169,18 +171,13 @@ impl Space for FreelistSpace {
impl FreelistSpace {
pub fn new(desc: SpaceDescriptor, space_size: ByteSize) -> Raw<FreelistSpace> {
let mut anon_mmap = match memmap::MmapMut::map_anon(
BYTES_PREALLOC_SPACE * 2 // for alignment
) {
Ok(m) => m,
Err(_) => panic!("failed to reserve address space for mmap")
};
let mmap_ptr = anon_mmap.as_mut_ptr();
trace!(" mmap ptr: {:?}", mmap_ptr);
let mmap_size = BYTES_PREALLOC_SPACE * 2;
let mmap_start = mmap_large(mmap_size);
trace!(" mmap ptr: {}", mmap_start);
let space_size = math::align_up(space_size, BYTES_IN_PAGE);
let meta_start = Address::from_ptr::<u8>(mmap_ptr).align_up(SPACE_ALIGN);
let meta_start = mmap_start.align_up(SPACE_ALIGN);
let mem_start = meta_start + BYTES_IN_PAGE +
mem::size_of::<LargeObjectEncode>() * PAGES_IN_SPACE +
mem::size_of::<PageMark>() * PAGES_IN_SPACE;
......@@ -216,10 +213,8 @@ impl FreelistSpace {
}
trace!(" initialized total/usable/used_nodes");
unsafe {
use std::ptr;
ptr::write(&mut space.mmap as *mut memmap::MmapMut, anon_mmap);
}
space.mmap_start = mmap_start;
space.mmap_size = mmap_size;
trace!(" store mmap");
debug_assert_eq!(Address::from_ptr(&space.mem as *const [u8; 0]), mem_start);
......
// Copyright 2017 The Australian National University
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use utils::Address;
use utils::mem::memmap;
use common::AddressMap;
use objectmodel;
use std::sync::Arc;
use std::fmt;
use std::sync::Mutex;
const SPACE_ALIGN: usize = 1 << 19;
const BLOCK_SIZE: usize = 1 << 12; // 4kb
const TRACE_TREADMILL: bool = false;
#[repr(C)]
pub struct FreeListSpace {
start: Address,
end: Address,
pub alloc_map: Arc<AddressMap<u8>>,
pub trace_map: Arc<AddressMap<u8>>,
#[allow(dead_code)]
mmap: memmap::MmapMut,
treadmill: Mutex<Treadmill>
}
impl FreeListSpace {
pub fn new(space_size: usize) -> FreeListSpace {
let mut anon_mmap: memmap::MmapMut =
match memmap::MmapMut::map_anon(space_size + SPACE_ALIGN) {
Ok(m) => m,
Err(_) => panic!("failed to call mmap")
};
let start: Address = Address::from_ptr::<u8>(anon_mmap.as_mut_ptr()).align_up(SPACE_ALIGN);
let end: Address = start + space_size;
let trace_map = AddressMap::new(start, end);
let alloc_map = AddressMap::new(start, end);
if cfg!(debug_assertions) {
trace_map.init_all(0);
alloc_map.init_all(0);
}
let treadmill = Treadmill::new(start, end);
FreeListSpace {
start: start,
end: end,
alloc_map: Arc::new(alloc_map),
trace_map: Arc::new(trace_map),
mmap: anon_mmap,
treadmill: Mutex::new(treadmill)
}
}
pub fn alloc(&self, size: usize, align: usize) -> Address {
// every block is 'BLOCK_SIZE' aligned, usually we do not need to align
assert!(BLOCK_SIZE % align == 0);
let size = size + objectmodel::OBJECT_HEADER_SIZE;
let blocks_needed = if size % BLOCK_SIZE == 0 {
size / BLOCK_SIZE
} else {
size / BLOCK_SIZE + 1
};
if TRACE_TREADMILL {
trace!("---before allocation---");
trace!("{}", self);
}
trace!("requiring {} bytes ({} blocks)", size, blocks_needed);
let res = {
let mut treadmill = self.treadmill.lock().unwrap();
treadmill.alloc_blocks(blocks_needed)
};
if TRACE_TREADMILL {
trace!("---after allocation---");
trace!("{}", self);
}
if res.is_zero() {
res
} else {
res + (-objectmodel::OBJECT_HEADER_OFFSET)
}
}
#[inline(always)]
#[cfg(feature = "use-sidemap")]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
unimplemented!()
}
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
objectmodel::is_traced(unsafe { addr.to_object_reference() }, mark_state)
}
pub fn sweep(&self) {
trace!("going to sweep treadmill space");
if TRACE_TREADMILL {
trace!("{}", self);
}
let mut nodes_scanned = 0;
let mut free_nodes_scanned = 0;
let mut alive_nodes_scanned = 0;
let mut treadmill = self.treadmill.lock().unwrap();
{
let mark_state = objectmodel::load_mark_state();
let from = treadmill.from;
let to = treadmill.to;
let total_nodes = treadmill.spaces[from].len();
let mut i = 0;
while nodes_scanned < total_nodes {
trace!("scanning {}", treadmill.spaces[from][i]);
let addr = treadmill.spaces[from][i].payload;
nodes_scanned += 1;
let traced = self.is_traced(addr, mark_state);
if traced {
// this object is alive
alive_nodes_scanned += 1;
// move to tospace
let node = treadmill.spaces[from].remove(i);
treadmill.spaces[to].push(node);
trace!("is alive");
// do not increment i
} else {
free_nodes_scanned += 1;
i += 1;
}
}
// check if we have any free nodes
if free_nodes_scanned == 0 && treadmill.spaces[treadmill.to].len() == 0 {
println!("didnt free up any memory in treadmill space");
panic!("we ran out of memory in large object space")
}
}
// next allocation in to_space will starts from alive_nodes_scanned
treadmill.from_space_next = alive_nodes_scanned;
// flip
if treadmill.from == 0 {
treadmill.from = 1;
treadmill.to = 0;
} else {
treadmill.from = 0;
treadmill.to = 1;
}
// sort from_space from from_space_next so contiguous blocks are together
// (easier to allocate)
let from = treadmill.from;
let ref mut from_space = treadmill.spaces[from];
// we do not care about alive nodes in from space
for start in alive_nodes_scanned..from_space.len() {
let first = {
let mut ret = start;
for i in start..from_space.len() {
if from_space[i].payload < from_space[ret].payload {
ret = i;
}
}
ret
};
if first != start {
let block = from_space.remove(first);
from_space.insert(start, block);
}
}
if cfg!(debug_assertions) {
debug!("---tread mill space---");
debug!("total nodes scanned: {}", nodes_scanned);
debug!("alive nodes scanned: {}", alive_nodes_scanned);
debug!("free nodes scanned: {}", free_nodes_scanned);
}
}
}
use heap::Space;
impl Space for FreeListSpace {
#[inline(always)]
fn start(&self) -> Address {
self.start
}
#[inline(always)]
fn end(&self) -> Address {
self.end
}
#[inline(always)]
fn is_valid_object(&self, addr: Address) -> bool {
true
}
}
unsafe impl Sync for FreeListSpace {}
unsafe impl Send for FreeListSpace {}
impl fmt::Display for FreeListSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FreeListSpace\n").unwrap();
write!(f, "range={:#X} ~ {:#X}\n", self.start, self.end).unwrap();
let treadmill: &Treadmill = &self.treadmill.lock().unwrap();
write!(f, "treadmill: {}", treadmill)
}
}
struct Treadmill {
from_space_next: usize, // next available node in from_space
from: usize,
to: usize,
spaces: [Vec<TreadmillNode>; 2]
}
impl Treadmill {
fn new(start: Address, end: Address) -> Treadmill {
let half_space = start + ((end - start) / 2);
let mut from_space = vec![];
let mut to_space = vec![];
let mut addr = start;
while addr < half_space {
from_space.push(TreadmillNode::new(addr));
addr = addr + BLOCK_SIZE;
}
while addr < end {
to_space.push(TreadmillNode::new(addr));
addr = addr + BLOCK_SIZE;
}
Treadmill {
from_space_next: 0,
from: 0,
to: 1,
spaces: [from_space, to_space]
}
}
fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
match self.find_contiguous_blocks(n_blocks) {
Some(start) => {
if TRACE_TREADMILL {
trace!(
"found contiguous {} blocks, starting from {}",
n_blocks,
start
);
}
let ref mut from_space = self.spaces[self.from];
// zero blocks
let return_address = from_space[start].payload;
Treadmill::zeroing_blocks(return_address, n_blocks);
if start != self.from_space_next {
// contiguous blocks are not next few ones
// we need to move the blocks
// take the allocated blocks out
let new_allocated = {
let mut ret = vec![];
for _ in 0..n_blocks {
let block = from_space.remove(start);
if TRACE_TREADMILL {
trace!("remove allocated block from from_space: {}", block);
trace!("from space: ");
for i in 0..from_space.len() {
trace!("{}", from_space[i]);
}
}
ret.push(block);
}
ret
};
// insert back and mov cursor
let mut cursor = self.from_space_next;
for block in new_allocated {
if TRACE_TREADMILL {
trace!("insert block {} to from_space at {}", block, cursor);
}
from_space.insert(cursor, block);
if TRACE_TREADMILL {
trace!("from space: ");
for i in 0..from_space.len() {
trace!("{}", from_space[i]);
}
}
cursor += 1;
}
self.from_space_next = cursor;
} else {
// just move cursor
self.from_space_next += n_blocks;
}
return_address
}
None => {
if TRACE_TREADMILL {
trace!("cannot find {} contiguous blocks", n_blocks);
}
unsafe { Address::zero() }
}
}
}
fn find_contiguous_blocks(&mut self, n_blocks: usize) -> Option<usize> {
// e.g. we have 10 blocks, and we require 3 blocks,
// we wont be able to find contiguous blocks if starting block is more than 7
// (only 8, 9 might be available)
// 7 = 10 (total) - 3 (required)
// Rust range is exclusive of the end, so we use (total - required + 1)
// we can always assume contiguous blocks are arrange next to each other
// since we will do a sort after GC
// we do not have enough blocks (no need to check if they are contiguous
if self.from_space_next + n_blocks > self.spaces[self.from].len() {
return None;
}
for i in self.from_space_next..(self.spaces[self.from].len() - n_blocks + 1) {
if self.has_contiguous_blocks_starting_at(n_blocks, i) {
return Some(i);
}
}
None
}
fn has_contiguous_blocks_starting_at(&mut self, n_blocks: usize, start: usize) -> bool {
let ref from_space = self.spaces[self.from];
if start + n_blocks > from_space.len() {
// if we have fewer blocks than required, it is impossible to find required blocks
false
} else {
// we need to check if next n_blocks are contiguous
// e.g. we have 10 blocks, and we want to check if we have 3 contiguous blocks from #7
// we need to check if 7&8, 8&9 (cursor is 7, and 8)
let mut cursor = start;
while cursor < start + n_blocks - 1 {
if from_space[cursor].payload + BLOCK_SIZE != from_space[cursor + 1].payload {
return false;
}
cursor += 1;
}
true
}
}
fn zeroing_blocks(start: Address, n_blocks: usize) {
use utils::mem::memsec;
unsafe {
memsec::memzero(start.to_ptr_mut::<u8>(), BLOCK_SIZE * n_blocks);
}
}
}
impl fmt::Display for Treadmill {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "next: {}\n", self.from_space_next).unwrap();
write!(f, "from:").unwrap();
for i in 0..self.spaces[self.from].len() {
write!(f, "{}->", self.spaces[self.from][i]).unwrap();
}
write!(f, "\n").unwrap();
write!(f, "to:").unwrap();
for i in 0..self.spaces[self.to].len() {
write!(f, "{}->", self.spaces[self.to][i]).unwrap();
}
Ok(())
}
}
struct TreadmillNode {
payload: Address
}
impl TreadmillNode {
fn new(addr: Address) -> TreadmillNode {
TreadmillNode { payload: addr }
}
}
impl fmt::Display for TreadmillNode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{}]", self.payload)
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::BLOCK_SIZE;
#[test]
fn test_new_treadmill_space() {
let space = FreeListSpace::new(BLOCK_SIZE * 10);
println!("{}", space);
}
#[test]
fn test_treadmill_alloc() {
let space = FreeListSpace::new(BLOCK_SIZE * 20);
for i in 0..10 {
let ret = space.alloc(BLOCK_SIZE / 2, 8);
println!("Allocation{}: {}", i, ret);
assert!(!ret.is_zero());
}
}
#[test]
fn test_treadmill_alloc_spanblock() {
let space = FreeListSpace::new(BLOCK_SIZE * 20);
for i in 0..5 {
let ret = space.alloc(BLOCK_SIZE + BLOCK_SIZE / 2, 8);
println!("Allocation{}: {}", i, ret);
assert!(!ret.is_zero());
}
}
#[test]
fn test_treadmill_sweep() {
let space = FreeListSpace::new(BLOCK_SIZE * 20);
for i in 0..5 {
let ret = space.alloc(BLOCK_SIZE + BLOCK_SIZE / 2, 8);
println!("Allocation{}: {}", i, ret);
assert!(!ret.is_zero());
}
}
}
......@@ -18,7 +18,7 @@ use heap::immix::*;
use heap::gc;
use objectmodel::*;
use utils::bit_utils;
use utils::mem::memmap;
use utils::mem::*;
use utils::mem::memsec;
use std::*;
......@@ -75,8 +75,8 @@ pub struct ImmixSpace {
pub last_gc_used_lines: usize,
// 16 bytes
#[allow(dead_code)]
mmap: memmap::MmapMut,
mmap_start: Address,
mmap_size: ByteSize,
// padding to space metadata takes 64KB
padding: [u64; ((BYTES_IN_BLOCK - 32 - 32 - 88 - 32 - 16) >> 3)],
......@@ -125,7 +125,9 @@ impl Space for ImmixSpace {
true
}
fn destroy(&mut self) {}
fn destroy(&mut self) {
munmap(self.mmap_start, self.size);
}
fn prepare_for_gc(&mut self) {
// erase lines marks
......@@ -301,16 +303,11 @@ impl RawMemoryMetadata for ImmixBlock {
impl ImmixSpace {
pub fn new(desc: SpaceDescriptor, space_size: ByteSize) -> Raw<ImmixSpace> {
// acquire memory through mmap
let mut anon_mmap: memmap::MmapMut = match memmap::MmapMut::map_anon(
BYTES_PREALLOC_SPACE * 2 // for alignment
) {
Ok(m) => m,
Err(_) => panic!("failed to reserve addresss pace for mmap")
};
let mmap_ptr = anon_mmap.as_mut_ptr();
trace!(" mmap ptr: {:?}", mmap_ptr);
let mmap_size = BYTES_PREALLOC_SPACE * 2;
let mmap_start = mmap_large(mmap_size);
trace!(" mmap ptr: {}", mmap_start);
let meta_start: Address = Address::from_ptr::<u8>(mmap_ptr).align_up(SPACE_ALIGN);
let meta_start: Address = mmap_start.align_up(SPACE_ALIGN);