GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

treadmill.rs 9.59 KB
Newer Older
qinsoon's avatar
qinsoon committed
1 2
#![allow(dead_code)]

3 4
extern crate doubly;

qinsoon's avatar
qinsoon committed
5 6
use utils::Address;
use utils::mem::memmap;
7 8
use common::AddressMap;

9 10
use objectmodel;

qinsoon's avatar
qinsoon committed
11
use std::sync::Arc;
12 13
use std::fmt;
use std::sync::Mutex;
qinsoon's avatar
qinsoon committed
14

15 16
use self::doubly::DoublyLinkedList;

qinsoon's avatar
qinsoon committed
17 18 19
const SPACE_ALIGN : usize = 1 << 19;
const BLOCK_SIZE  : usize = 1 << 12;    // 4kb

20 21
const TRACE_TREADMILL : bool = false;

qinsoon's avatar
qinsoon committed
22 23 24 25 26 27 28 29 30 31 32
#[repr(C)]
pub struct FreeListSpace {
    start : Address,
    end   : Address,

    pub alloc_map : Arc<AddressMap<u8>>,
    pub trace_map : Arc<AddressMap<u8>>,

    #[allow(dead_code)]
    mmap : memmap::Mmap,

33
    treadmill: Mutex<Treadmill>
qinsoon's avatar
qinsoon committed
34 35 36 37 38 39 40 41 42 43 44
}

impl FreeListSpace {
    pub fn new(space_size: usize) -> FreeListSpace {
        let anon_mmap : memmap::Mmap = match memmap::Mmap::anonymous(space_size + SPACE_ALIGN, memmap::Protection::ReadWrite) {
            Ok(m) => m,
            Err(_) => panic!("failed to call mmap")
        };
        let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
        let end   : Address = start.plus(space_size);

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
        let trace_map = AddressMap::new(start, end);
        let alloc_map = AddressMap::new(start, end);
        if cfg!(debug_assertions) {
            trace_map.init_all(0);
            alloc_map.init_all(0);
        }

        let treadmill = Treadmill::new(start, end);

        FreeListSpace {
            start: start,
            end: end,
            alloc_map: Arc::new(alloc_map),
            trace_map: Arc::new(trace_map),
            mmap: anon_mmap,
            treadmill: Mutex::new(treadmill)
        }
    }

    pub fn alloc(&self, size: usize, align: usize) -> Address {
        // every block is 'BLOCK_SIZE' aligned, usually we do not need to align
        assert!(BLOCK_SIZE % align == 0);

68 69
        let size = size + objectmodel::OBJECT_HEADER_SIZE;

70 71 72 73 74 75
        let blocks_needed = if size % BLOCK_SIZE == 0 {
            size / BLOCK_SIZE
        } else {
            size / BLOCK_SIZE + 1
        };

76 77 78
        if TRACE_TREADMILL {
            trace!("before allocation, space: {}", self);
        }
79

80
        trace!("requiring {} bytes ({} blocks)", size, blocks_needed);
81
        let res = {
82 83 84 85
            if blocks_needed > 1 {
                unimplemented!()
            }

86 87 88 89
            let mut treadmill = self.treadmill.lock().unwrap();
            treadmill.alloc_blocks(blocks_needed)
        };

90 91 92
        if TRACE_TREADMILL {
            trace!("after allocation, space: {}", self);
        }
93

94 95 96 97 98
        if res.is_zero() {
            res
        } else {
            res.offset(-objectmodel::OBJECT_HEADER_OFFSET)
        }
99 100
    }

101 102 103 104 105 106 107 108 109 110 111 112
    #[inline(always)]
    #[cfg(feature = "use-sidemap")]
    fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
        objectmodel::is_traced(self.trace_map(), self.start, unsafe { addr.to_object_reference() }, mark_state)
    }

    #[inline(always)]
    #[cfg(not(feature = "use-sidemap"))]
    fn is_traced(&self, addr: Address, mark_state: u8) -> bool {
        objectmodel::is_traced(unsafe{addr.to_object_reference()}, mark_state)
    }

113
    pub fn sweep(&self) {
114
        trace!("going to sweep treadmill space");
115 116 117 118 119 120 121
        if TRACE_TREADMILL {
            trace!("{}", self);
        }

        let mut nodes_scanned = 0;
        let mut free_nodes_scanned = 0;
        let mut alive_nodes_scanned = 0;
122

123
        let mut treadmill = self.treadmill.lock().unwrap();
124

125 126
        {
            let mark_state = objectmodel::load_mark_state();
127

128 129
            let from = treadmill.from;
            let to   = treadmill.to;
130

131 132 133 134 135
            let total_nodes = treadmill.spaces[from].len();
            let mut i = 0;
            while nodes_scanned < total_nodes {
                trace!("scanning {}", treadmill.spaces[from][i]);
                let addr = treadmill.spaces[from][i].payload;
136

137
                nodes_scanned += 1;
138

139 140 141
                let traced = self.is_traced(addr, mark_state);

                if traced {
142 143
                    // this object is alive
                    alive_nodes_scanned += 1;
144

145 146 147
                    // move to tospace
                    let node = treadmill.spaces[from].remove(i);
                    treadmill.spaces[to].push_front(node);
148

149
                    trace!("is alive");
150

151
                    // do not increment i
152
                } else {
153 154
                    free_nodes_scanned += 1;
                    i += 1;
155 156 157
                }
            }

158 159
            // check if we have any free nodes
            if free_nodes_scanned == 0 && treadmill.spaces[treadmill.to].len() == 0 {
160 161 162 163
                println!("didnt free up any memory in treadmill space");
                panic!("we ran out of memory in large object space")
            }
        }
164

165 166 167 168 169 170 171 172 173 174 175 176
        // next allocation in to_space will starts from alive_nodes_scanned
        treadmill.from_space_next = alive_nodes_scanned;

        // flip
        if treadmill.from == 0 {
            treadmill.from = 1;
            treadmill.to   = 0;
        } else {
            treadmill.from = 0;
            treadmill.to   = 1;
        }

177 178 179 180 181 182
        if cfg!(debug_assertions) {
            debug!("---tread mill space---");
            debug!("total nodes scanned: {}", nodes_scanned);
            debug!("alive nodes scanned: {}", alive_nodes_scanned);
            debug!("free  nodes scanned: {}", free_nodes_scanned);
        }
qinsoon's avatar
qinsoon committed
183 184 185
    }
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
use heap::Space;

impl Space for FreeListSpace {
    #[inline(always)]
    fn start(&self) -> Address {
        self.start
    }

    #[inline(always)]
    fn end(&self) -> Address {
        self.end
    }

    #[inline(always)]
    fn alloc_map(&self) -> *mut u8 {
        self.alloc_map.ptr
    }

    #[inline(always)]
    fn trace_map(&self) -> *mut u8 {
        self.trace_map.ptr
    }
}

unsafe impl Sync for FreeListSpace {}
unsafe impl Send for FreeListSpace {}

impl fmt::Display for FreeListSpace {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "FreeListSpace\n").unwrap();
        write!(f, "range={:#X} ~ {:#X}\n", self.start, self.end).unwrap();

        let treadmill : &Treadmill = &self.treadmill.lock().unwrap();
        write!(f, "treadmill: {}", treadmill)
    }
qinsoon's avatar
qinsoon committed
221 222
}

223
struct Treadmill{
224 225 226 227
    from_space_next : usize, // next available node in from_space
    from: usize,
    to  : usize,
    spaces      : [DoublyLinkedList<TreadmillNode>; 2]
228 229 230 231
}

impl Treadmill {
    fn new(start: Address, end: Address) -> Treadmill {
232
        let half_space = start.plus(end.diff(start) / 2);
qinsoon's avatar
qinsoon committed
233

234 235
        let mut from_space = DoublyLinkedList::new();
        let mut to_space   = DoublyLinkedList::new();
236

237 238 239 240 241 242
        let mut addr = start;

        while addr < half_space {
            from_space.push_back(TreadmillNode::new(addr));
            addr = addr.plus(BLOCK_SIZE);
        }
qinsoon's avatar
qinsoon committed
243 244

        while addr < end {
245
            to_space.push_back(TreadmillNode::new(addr));
246
            addr = addr.plus(BLOCK_SIZE);
qinsoon's avatar
qinsoon committed
247 248
        }

249
        Treadmill {
250 251 252 253
            from_space_next: 0,
            from: 0,
            to  : 1,
            spaces: [from_space, to_space]
254 255 256 257
        }
    }

    fn alloc_blocks(&mut self, n_blocks: usize) -> Address {
258 259
        let ref from_space = self.spaces[self.from];
        if self.from_space_next + n_blocks <= from_space.len() {
260 261 262 263 264 265 266 267 268 269
            // zero blocks
            for i in 0..n_blocks {
                let block_i = self.from_space_next + i;
                let block_start = from_space[block_i].payload;

                Treadmill::zeroing_block(block_start);
            }

            // return first block
            // FIXME: the blocks may not be contiguous!!! we cannot allocate multiple blocks
270 271
            let ret = from_space[self.from_space_next].payload;
            self.from_space_next += n_blocks;
272

273 274 275
            ret
        } else {
            unsafe {Address::zero()}
276
        }
qinsoon's avatar
qinsoon committed
277
    }
278 279 280 281 282 283 284 285

    fn zeroing_block(start: Address) {
        use utils::mem::memsec;

        unsafe {
            memsec::memzero(start.to_ptr_mut::<u8>(), BLOCK_SIZE);
        }
    }
qinsoon's avatar
qinsoon committed
286 287
}

288 289
impl fmt::Display for Treadmill {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
290
        write!(f, "next: {}\n", self.from_space_next).unwrap();
291 292 293 294
        write!(f, "from:").unwrap();
        for i in 0..self.spaces[self.from].len() {
            write!(f, "{}->", self.spaces[self.from][i]).unwrap();
        }
295
        write!(f, "\n").unwrap();
296 297 298
        write!(f, "to:").unwrap();
        for i in 0..self.spaces[self.to].len() {
            write!(f, "{}->", self.spaces[self.to][i]).unwrap();
299
        }
300
        write!(f, "\n")
301 302 303 304
    }
}

struct TreadmillNode {
305
    payload: Address
306 307 308
}

impl TreadmillNode {
309 310 311
    fn new(addr: Address) -> TreadmillNode {
        TreadmillNode {
            payload: addr
312 313
        }
    }
314
}
315

316

317 318 319
impl fmt::Display for TreadmillNode {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "[{}]", self.payload)
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use super::BLOCK_SIZE;

    #[test]
    fn test_new_treadmill_space() {
        let space = FreeListSpace::new(BLOCK_SIZE * 10);

        println!("{}", space);
    }

    #[test]
    fn test_treadmill_alloc() {
qinsoon's avatar
qinsoon committed
337
        let space = FreeListSpace::new(BLOCK_SIZE * 10);
338 339

        for i in 0..10 {
340
            let ret = space.alloc(BLOCK_SIZE / 2, 8);
341 342 343 344 345
            println!("Allocation{}: {}", i, ret);
        }
    }

    #[test]
346
    #[ignore]
347
    fn test_treadmill_alloc_spanblock() {
qinsoon's avatar
qinsoon committed
348
        let space = FreeListSpace::new(BLOCK_SIZE * 10);
349 350 351 352 353 354 355 356 357

        for i in 0..5 {
            let ret = space.alloc(BLOCK_SIZE * 2, 8);
            println!("Allocation{}: {}", i, ret);
        }
    }

    #[test]
    fn test_treadmill_alloc_exhaust() {
qinsoon's avatar
qinsoon committed
358
        let space = FreeListSpace::new(BLOCK_SIZE * 10);
359 360

        for i in 0..20 {
361
            let ret = space.alloc(BLOCK_SIZE / 2, 8);
362 363 364
            println!("Allocation{}: {}", i, ret);
        }
    }
365
}