#![allow(dead_code)] use ast::ir::*; use ast::ptr::*; use ast::types::*; use vm::VM; use runtime::RuntimeValue; use runtime::gc; use utils::ByteSize; use utils::Address; use utils::mem::memmap; use utils::mem::memsec; use std::sync::Arc; use std::thread; use std::thread::JoinHandle; pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb #[cfg(target_arch = "x86_64")] pub const PAGE_SIZE : ByteSize = (4 << 10); // 4kb impl_mu_entity!(MuThread); impl_mu_entity!(MuStack); pub struct MuStack { pub hdr: MuEntityHeader, func_id: MuID, state: MuStackState, size: ByteSize, // lo addr hi addr // | overflow guard page | actual stack ..................... | underflow guard page| // | | | | // overflowGuard lowerBound upperBound // underflowGuard overflow_guard : Address, lower_bound : Address, upper_bound : Address, underflow_guard: Address, // this frame pointers should only be used when stack is not active sp : Address, bp : Address, ip : Address, exception_obj : Option
, #[allow(dead_code)] mmap : memmap::Mmap } impl MuStack { pub fn new(id: MuID, func: &MuFunction) -> MuStack { let total_size = PAGE_SIZE * 2 + STACK_SIZE; let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) { Ok(m) => m, Err(_) => panic!("failed to mmap for a stack"), }; let mmap_start = Address::from_ptr(anon_mmap.ptr()); debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE)); let overflow_guard = mmap_start; let lower_bound = mmap_start.plus(PAGE_SIZE); let upper_bound = lower_bound.plus(STACK_SIZE); let underflow_guard = upper_bound; unsafe { memsec::mprotect(overflow_guard.to_ptr_mut::>), // ready to resume when values of given types are supplied (can be empty)
Active,
Dead
}
pub struct MuThread {
pub hdr: MuEntityHeader,
allocator: Box