Commit f3670528 authored by qinsoon's avatar qinsoon

[wip] keep on thread impl

parent 6d46f0fb
...@@ -18,4 +18,5 @@ simple_logger = "0.4.0" ...@@ -18,4 +18,5 @@ simple_logger = "0.4.0"
nalgebra = "0.8.2" nalgebra = "0.8.2"
linked-hash-map = "0.0.10" linked-hash-map = "0.0.10"
hprof = "0.1.3" hprof = "0.1.3"
memmap = "0.4.0" memmap = "0.4.0"
\ No newline at end of file memsec = "0.1.9"
\ No newline at end of file
...@@ -11,11 +11,11 @@ use std::default; ...@@ -11,11 +11,11 @@ use std::default;
use std::sync::RwLock; use std::sync::RwLock;
use std::cell::Cell; use std::cell::Cell;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use utils::Address;
pub type WPID = usize; pub type WPID = usize;
pub type MuID = usize; pub type MuID = usize;
pub type MuName = &'static str; pub type MuName = &'static str;
pub type Address = usize; // TODO: replace this with Address(usize)
pub type OpIndex = usize; pub type OpIndex = usize;
...@@ -633,8 +633,8 @@ impl fmt::Display for MemoryLocation { ...@@ -633,8 +633,8 @@ impl fmt::Display for MemoryLocation {
#[derive(Debug)] // Display, PartialEq #[derive(Debug)] // Display, PartialEq
pub struct MuEntityHeader { pub struct MuEntityHeader {
id: MuID, pub id: MuID,
name: RwLock<Option<MuName>> pub name: RwLock<Option<MuName>>
} }
impl MuEntityHeader { impl MuEntityHeader {
......
#[macro_use] #[macro_use]
pub mod inst; pub mod ir;
pub mod inst;
pub mod types; pub mod types;
pub mod ir;
pub mod ir_semantics; pub mod ir_semantics;
pub mod ptr; pub mod ptr;
pub mod op; pub mod op;
\ No newline at end of file
...@@ -42,6 +42,9 @@ struct ASMCode { ...@@ -42,6 +42,9 @@ struct ASMCode {
block_liveout: HashMap<MuName, Vec<MuID>> block_liveout: HashMap<MuName, Vec<MuID>>
} }
unsafe impl Send for ASMCode {}
unsafe impl Sync for ASMCode {}
impl MachineCode for ASMCode { impl MachineCode for ASMCode {
fn number_of_insts(&self) -> usize { fn number_of_insts(&self) -> usize {
self.code.len() self.code.len()
...@@ -1087,10 +1090,10 @@ pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) { ...@@ -1087,10 +1090,10 @@ pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) {
use std::path; use std::path;
let funcs = vm.funcs().read().unwrap(); let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&fv.func_id).unwrap().borrow(); let func = funcs.get(&fv.func_id).unwrap().read().unwrap();
let compiled_funcs = vm.compiled_funcs().read().unwrap(); let compiled_funcs = vm.compiled_funcs().read().unwrap();
let cf = compiled_funcs.get(&fv.id()).unwrap().borrow(); let cf = compiled_funcs.get(&fv.id()).unwrap().read().unwrap();
let code = cf.mc.emit(); let code = cf.mc.emit();
......
...@@ -161,7 +161,7 @@ impl <'a> InstructionSelection { ...@@ -161,7 +161,7 @@ impl <'a> InstructionSelection {
if self.match_funcref_const(func) { if self.match_funcref_const(func) {
let target_id = self.emit_get_funcref_const(func); let target_id = self.emit_get_funcref_const(func);
let funcs = vm.funcs().read().unwrap(); let funcs = vm.funcs().read().unwrap();
let target = funcs.get(&target_id).unwrap().borrow(); let target = funcs.get(&target_id).unwrap().read().unwrap();
if vm.is_running() { if vm.is_running() {
unimplemented!() unimplemented!()
...@@ -790,7 +790,7 @@ impl CompilerPass for InstructionSelection { ...@@ -790,7 +790,7 @@ impl CompilerPass for InstructionSelection {
debug!("{}", self.name()); debug!("{}", self.name());
let funcs = vm.funcs().read().unwrap(); let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_ver.func_id).unwrap().borrow(); let func = funcs.get(&func_ver.func_id).unwrap().read().unwrap();
self.backend.start_code(func.name().unwrap()); self.backend.start_code(func.name().unwrap());
// prologue (get arguments from entry block first) // prologue (get arguments from entry block first)
......
...@@ -57,7 +57,7 @@ impl CompilerPass for PeepholeOptimization { ...@@ -57,7 +57,7 @@ impl CompilerPass for PeepholeOptimization {
fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) { fn visit_function(&mut self, vm: &VM, func: &mut MuFunctionVersion) {
let compiled_funcs = vm.compiled_funcs().read().unwrap(); let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().borrow_mut(); let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
for i in 0..cf.mc.number_of_insts() { for i in 0..cf.mc.number_of_insts() {
self.remove_redundant_move(i, &mut cf); self.remove_redundant_move(i, &mut cf);
......
...@@ -25,7 +25,7 @@ impl RegisterAllocation { ...@@ -25,7 +25,7 @@ impl RegisterAllocation {
// returns true if we spill registers (which requires another instruction selection) // returns true if we spill registers (which requires another instruction selection)
fn coloring(&mut self, vm: &VM, func: &mut MuFunctionVersion) -> bool { fn coloring(&mut self, vm: &VM, func: &mut MuFunctionVersion) -> bool {
let compiled_funcs = vm.compiled_funcs().read().unwrap(); let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().borrow_mut(); let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
cf.mc.trace_mc(); cf.mc.trace_mc();
......
...@@ -6,6 +6,7 @@ extern crate immix_rust as gc; ...@@ -6,6 +6,7 @@ extern crate immix_rust as gc;
#[macro_use] #[macro_use]
pub mod utils; pub mod utils;
#[macro_use]
pub mod ast; pub mod ast;
pub mod vm; pub mod vm;
pub mod compiler; pub mod compiler;
......
pub mod thread; pub extern crate immix_rust as gc;
\ No newline at end of file
pub use gc::common::Address;
pub use gc::common::ObjectReference;
pub type Word = usize;
pub mod thread;
pub enum RuntimeValue {
Pointer(Address),
Value(Word)
}
\ No newline at end of file
...@@ -6,7 +6,3 @@ __thread void* mu_tls; ...@@ -6,7 +6,3 @@ __thread void* mu_tls;
void* init_thread_local(void* local) { void* init_thread_local(void* local) {
return NULL; return NULL;
} }
void* aligned_mem(uint64_t size, uint64_t align) {
}
extern crate memmap; #![allow(dead_code)]
use gc::Mutator;
use ast::ir::*; use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
use vm::VM;
use runtime::RuntimeValue;
use runtime::gc;
use utils::ByteSize; use utils::ByteSize;
use utils::Address;
use utils::mem::memmap;
use utils::mem::memsec;
use std::sync::Arc;
use std::thread;
use std::thread::JoinHandle;
pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE : ByteSize = (4 << 10); // 4kb pub const PAGE_SIZE : ByteSize = (4 << 10); // 4kb
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);
pub struct MuStack { pub struct MuStack {
hdr: MuEntityHeader, pub hdr: MuEntityHeader,
func_id: MuID, func_id: MuID,
size: ByteSize, state: MuStackState,
size: ByteSize,
// lo addr hi addr // lo addr hi addr
// | overflow guard page | actual stack ..................... | underflow guard page| // | overflow guard page | actual stack ..................... | underflow guard page|
// | | | | // | | | |
...@@ -26,6 +42,11 @@ pub struct MuStack { ...@@ -26,6 +42,11 @@ pub struct MuStack {
upper_bound : Address, upper_bound : Address,
underflow_guard: Address, underflow_guard: Address,
// this frame pointers should only be used when stack is not active
sp : Address,
bp : Address,
ip : Address,
exception_obj : Option<Address>, exception_obj : Option<Address>,
#[allow(dead_code)] #[allow(dead_code)]
...@@ -33,7 +54,7 @@ pub struct MuStack { ...@@ -33,7 +54,7 @@ pub struct MuStack {
} }
impl MuStack { impl MuStack {
pub fn new(id: MuID, func_id: MuID) -> MuStack { pub fn new(id: MuID, func: &MuFunction) -> MuStack {
let total_size = PAGE_SIZE * 2 + STACK_SIZE; let total_size = PAGE_SIZE * 2 + STACK_SIZE;
let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) { let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
...@@ -41,15 +62,84 @@ impl MuStack { ...@@ -41,15 +62,84 @@ impl MuStack {
Err(_) => panic!("failed to mmap for a stack"), Err(_) => panic!("failed to mmap for a stack"),
}; };
let overflow_guard = Address::from_ptr(anon_mmap.ptr()); let mmap_start = Address::from_ptr(anon_mmap.ptr());
debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
let overflow_guard = mmap_start;
let lower_bound = mmap_start.plus(PAGE_SIZE);
let upper_bound = lower_bound.plus(STACK_SIZE);
let underflow_guard = upper_bound;
unsafe {
memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
}
unimplemented!() debug!("creating stack {} with entry func {:?}", id, func);
debug!("overflow_guard : {}", overflow_guard);
debug!("lower_bound : {}", lower_bound);
debug!("upper_bound : {}", upper_bound);
debug!("underflow_guard: {}", underflow_guard);
MuStack {
hdr: MuEntityHeader::unnamed(id),
func_id: func.id(),
state: MuStackState::Ready(func.sig.arg_tys.clone()),
size: STACK_SIZE,
overflow_guard: overflow_guard,
lower_bound: lower_bound,
upper_bound: upper_bound,
underflow_guard: upper_bound,
sp: upper_bound,
bp: upper_bound,
ip: unsafe {Address::zero()},
exception_obj: None,
mmap: anon_mmap
}
} }
} }
pub enum MuStackState {
Ready(Vec<P<MuType>>), // ready to resume when values of given types are supplied (can be empty)
Active,
Dead
}
pub struct MuThread { pub struct MuThread {
hdr: MuEntityHeader, pub hdr: MuEntityHeader,
allocator: Box<Mutator>, allocator: Box<gc::Mutator>,
stack: Option<Box<MuStack>>,
user_tls: Option<Address>
}
impl MuThread {
pub fn new(id: MuID, allocator: Box<gc::Mutator>, stack: Box<MuStack>, user_tls: Option<Address>) -> MuThread {
MuThread {
hdr: MuEntityHeader::unnamed(id),
allocator: allocator,
stack: Some(stack),
user_tls: user_tls
}
}
user_tls: Address pub fn launch(id: MuID, stack: Box<MuStack>, user_tls: Option<Address>, vals: Vec<RuntimeValue>, vm: &VM) -> JoinHandle<()> {
match thread::Builder::new().name(format!("Mu Thread #{}", id)).spawn(move || {
let mut muthread = Box::new(MuThread::new(id, gc::new_mutator(), stack, user_tls));
MuThread::thread_entry(muthread, vm);
}) {
Ok(handle) => handle,
Err(_) => panic!("failed to create a thread")
}
}
/// entry function for launching a mu thread
pub fn thread_entry(mu_thread: Box<MuThread>, vm: &VM) -> ! {
}
} }
\ No newline at end of file
pub extern crate memmap;
pub extern crate memsec;
\ No newline at end of file
#![allow(dead_code)] #![allow(dead_code)]
extern crate immix_rust as gc;
pub use gc::common::Address;
pub use gc::common::ObjectReference;
pub type ByteSize = usize; pub type ByteSize = usize;
pub mod mem;
mod linked_hashset; mod linked_hashset;
pub use utils::linked_hashset::LinkedHashSet; pub use utils::linked_hashset::LinkedHashSet;
pub use utils::linked_hashset::LinkedHashMap; pub use utils::linked_hashset::LinkedHashMap;
pub use runtime::Address;
pub use runtime::ObjectReference;
macro_rules! select_value { macro_rules! select_value {
($cond: expr, $res1 : expr, $res2 : expr) => { ($cond: expr, $res1 : expr, $res2 : expr) => {
if $cond { if $cond {
......
...@@ -456,7 +456,7 @@ impl MuCtx { ...@@ -456,7 +456,7 @@ impl MuCtx {
fv fv
} else { } else {
let guard = ctx.internal.vm.funcs().read().unwrap(); let guard = ctx.internal.vm.funcs().read().unwrap();
let mut func = guard.get(&fid).unwrap().borrow_mut(); let mut func = guard.get(&fid).unwrap().write().unwrap();
let fv = MuFunctionVersion::new(fv_id, fid, func.sig.clone()); let fv = MuFunctionVersion::new(fv_id, fid, func.sig.clone());
func.new_version(fv_id); func.new_version(fv_id);
......
extern crate immix_rust as gc;
use std::collections::HashMap; use std::collections::HashMap;
use ast::ptr::P; use ast::ptr::P;
...@@ -9,12 +7,16 @@ use compiler::backend; ...@@ -9,12 +7,16 @@ use compiler::backend;
use compiler::backend::BackendTypeInfo; use compiler::backend::BackendTypeInfo;
use vm::machine_code::CompiledFunction; use vm::machine_code::CompiledFunction;
use vm::vm_options::VMOptions; use vm::vm_options::VMOptions;
use runtime::gc;
use runtime::thread::MuStack; use runtime::thread::MuStack;
use runtime::RuntimeValue;
use runtime::thread::MuThread; use runtime::thread::MuThread;
use utils::Address;
use std::sync::RwLock; use std::sync::RwLock;
use std::cell::RefCell; use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT, Ordering}; use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT, Ordering};
use std::thread::JoinHandle;
pub struct VM { pub struct VM {
next_id: AtomicUsize, next_id: AtomicUsize,
...@@ -31,10 +33,12 @@ pub struct VM { ...@@ -31,10 +33,12 @@ pub struct VM {
func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>, func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>,
// key: (func_id, func_ver_id) // key: (func_id, func_ver_id)
func_vers: RwLock<HashMap<(MuID, MuID), RefCell<MuFunctionVersion>>>, func_vers: RwLock<HashMap<(MuID, MuID), RwLock<MuFunctionVersion>>>,
funcs: RwLock<HashMap<MuID, RefCell<MuFunction>>>, funcs: RwLock<HashMap<MuID, RwLock<MuFunction>>>,
compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>,
compiled_funcs: RwLock<HashMap<MuID, RefCell<CompiledFunction>>> threads: RwLock<Vec<JoinHandle<()>>>
} }
impl <'a> VM { impl <'a> VM {
...@@ -56,7 +60,9 @@ impl <'a> VM { ...@@ -56,7 +60,9 @@ impl <'a> VM {
func_sigs: RwLock::new(HashMap::new()), func_sigs: RwLock::new(HashMap::new()),
func_vers: RwLock::new(HashMap::new()), func_vers: RwLock::new(HashMap::new()),
funcs: RwLock::new(HashMap::new()), funcs: RwLock::new(HashMap::new()),
compiled_funcs: RwLock::new(HashMap::new()) compiled_funcs: RwLock::new(HashMap::new()),
threads: RwLock::new(vec!())
}; };
ret.is_running.store(false, Ordering::SeqCst); ret.is_running.store(false, Ordering::SeqCst);
...@@ -148,7 +154,7 @@ impl <'a> VM { ...@@ -148,7 +154,7 @@ impl <'a> VM {
pub fn declare_func (&self, func: MuFunction) { pub fn declare_func (&self, func: MuFunction) {
info!("declare function {}", func); info!("declare function {}", func);
let mut funcs = self.funcs.write().unwrap(); let mut funcs = self.funcs.write().unwrap();
funcs.insert(func.id(), RefCell::new(func)); funcs.insert(func.id(), RwLock::new(func));
} }
pub fn define_func_version (&self, func_ver: MuFunctionVersion) { pub fn define_func_version (&self, func_ver: MuFunctionVersion) {
...@@ -157,17 +163,17 @@ impl <'a> VM { ...@@ -157,17 +163,17 @@ impl <'a> VM {
let func_ver_key = (func_ver.func_id, func_ver.id()); let func_ver_key = (func_ver.func_id, func_ver.id());
{ {
let mut func_vers = self.func_vers.write().unwrap(); let mut func_vers = self.func_vers.write().unwrap();
func_vers.insert(func_ver_key, RefCell::new(func_ver)); func_vers.insert(func_ver_key, RwLock::new(func_ver));
} }
// acquire a reference to the func_ver // acquire a reference to the func_ver
let func_vers = self.func_vers.read().unwrap(); let func_vers = self.func_vers.read().unwrap();
let func_ver = func_vers.get(&func_ver_key).unwrap().borrow(); let func_ver = func_vers.get(&func_ver_key).unwrap().write().unwrap();
// change current version to this (obsolete old versions) // change current version to this (obsolete old versions)
let funcs = self.funcs.read().unwrap(); let funcs = self.funcs.read().unwrap();
debug_assert!(funcs.contains_key(&func_ver.func_id)); // it should be declared before defining debug_assert!(funcs.contains_key(&func_ver.func_id)); // it should be declared before defining
let mut func = funcs.get(&func_ver.func_id).unwrap().borrow_mut(); let mut func = funcs.get(&func_ver.func_id).unwrap().write().unwrap();
func.new_version(func_ver.id()); func.new_version(func_ver.id());
...@@ -179,7 +185,7 @@ impl <'a> VM { ...@@ -179,7 +185,7 @@ impl <'a> VM {
debug_assert!(self.funcs.read().unwrap().contains_key(&func.func_id)); debug_assert!(self.funcs.read().unwrap().contains_key(&func.func_id));
debug_assert!(self.func_vers.read().unwrap().contains_key(&(func.func_id, func.func_ver_id))); debug_assert!(self.func_vers.read().unwrap().contains_key(&(func.func_id, func.func_ver_id)));
self.compiled_funcs.write().unwrap().insert(func.func_ver_id, RefCell::new(func)); self.compiled_funcs.write().unwrap().insert(func.func_ver_id, RwLock::new(func));
} }
pub fn get_backend_type_info(&self, tyid: MuID) -> P<BackendTypeInfo> { pub fn get_backend_type_info(&self, tyid: MuID) -> P<BackendTypeInfo> {
...@@ -214,15 +220,15 @@ impl <'a> VM { ...@@ -214,15 +220,15 @@ impl <'a> VM {
&self.globals &self.globals
} }
pub fn funcs(&self) -> &RwLock<HashMap<MuID, RefCell<MuFunction>>> { pub fn funcs(&self) -> &RwLock<HashMap<MuID, RwLock<MuFunction>>> {
&self.funcs &self.funcs
} }
pub fn func_vers(&self) -> &RwLock<HashMap<(MuID, MuID), RefCell<MuFunctionVersion>>> { pub fn func_vers(&self) -> &RwLock<HashMap<(MuID, MuID), RwLock<MuFunctionVersion>>> {
&self.func_vers &self.func_vers
} }
pub fn compiled_funcs(&self) -> &RwLock<HashMap<MuID, RefCell<CompiledFunction>>> { pub fn compiled_funcs(&self) -> &RwLock<HashMap<MuID, RwLock<CompiledFunction>>> {
&self.compiled_funcs &self.compiled_funcs
} }
...@@ -235,6 +241,24 @@ impl <'a> VM { ...@@ -235,6 +241,24 @@ impl <'a> VM {
} }
pub fn new_stack(&self, func_id: MuID) -> Box<MuStack> { pub fn new_stack(&self, func_id: MuID) -> Box<MuStack> {
Box::new(MuStack::new(self.next_id(), func_id)) let funcs = self.funcs.read().unwrap();
let func : &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
Box::new(MuStack::new(self.next_id(), func))
}
pub fn new_thread_normal(&self, stack: Box<MuStack>, threadlocal: Address, vals: Vec<RuntimeValue>) {
let user_tls = {
if threadlocal.is_zero() {
None
} else {
Some(threadlocal)
}
};
// set up arguments on stack
unimplemented!();
MuThread::launch(self.next_id(), stack, user_tls, vals, self);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment