GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

thread.rs 13.7 KB
Newer Older
qinsoon's avatar
qinsoon committed
1
#![allow(dead_code)]
qinsoon's avatar
qinsoon committed
2 3

use ast::ir::*;
qinsoon's avatar
qinsoon committed
4 5 6
use ast::ptr::*;
use ast::types::*;
use vm::VM;
7
use runtime;
8
use runtime::ValueLocation;
9
use runtime::mm;
qinsoon's avatar
qinsoon committed
10

qinsoon's avatar
qinsoon committed
11
use utils::ByteSize;
qinsoon's avatar
qinsoon committed
12
use utils::Address;
qinsoon's avatar
qinsoon committed
13
use utils::Word;
qinsoon's avatar
qinsoon committed
14 15 16
use utils::mem::memmap;
use utils::mem::memsec;

qinsoon's avatar
qinsoon committed
17
use std::ptr;
18
use std::mem;
qinsoon's avatar
qinsoon committed
19 20
use std::thread;
use std::thread::JoinHandle;
21
use std::sync::Arc;
22
use std::fmt;
qinsoon's avatar
qinsoon committed
23 24 25 26 27 28

pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb

#[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
29 30 31
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);

32
#[repr(C)]
qinsoon's avatar
qinsoon committed
33
pub struct MuStack {
qinsoon's avatar
qinsoon committed
34
    pub hdr: MuEntityHeader,
35 36 37

    // address, id
    func: Option<(ValueLocation, MuID)>,
qinsoon's avatar
qinsoon committed
38
    
qinsoon's avatar
qinsoon committed
39
    size: ByteSize,
qinsoon's avatar
qinsoon committed
40 41 42 43 44 45 46 47 48 49
    //    lo addr                                                    hi addr
    //     | overflow guard page | actual stack ..................... | underflow guard page|
    //     |                     |                                    |                     |
    // overflowGuard           lowerBound                           upperBound
    //                                                              underflowGuard    
    overflow_guard : Address,
    lower_bound    : Address,
    upper_bound    : Address,
    underflow_guard: Address,
    
qinsoon's avatar
qinsoon committed
50 51 52 53 54
    // this frame pointers should only be used when stack is not active
    sp : Address,
    bp : Address,
    ip : Address,
    
55
    state: MuStackState,
qinsoon's avatar
qinsoon committed
56
    #[allow(dead_code)]
57
    mmap           : Option<memmap::Mmap>
qinsoon's avatar
qinsoon committed
58 59 60
}

impl MuStack {
61
    pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
qinsoon's avatar
qinsoon committed
62 63 64 65 66 67 68
        let total_size = PAGE_SIZE * 2 + STACK_SIZE;
        
        let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
            Ok(m) => m,
            Err(_) => panic!("failed to mmap for a stack"),
        };
        
qinsoon's avatar
qinsoon committed
69 70 71 72 73 74 75 76 77 78 79 80
        let mmap_start = Address::from_ptr(anon_mmap.ptr());
        debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
        
        let overflow_guard = mmap_start;
        let lower_bound = mmap_start.plus(PAGE_SIZE);
        let upper_bound = lower_bound.plus(STACK_SIZE);
        let underflow_guard = upper_bound;
        
        unsafe {
            memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(),  PAGE_SIZE, memsec::Prot::NoAccess);
            memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
        }
qinsoon's avatar
qinsoon committed
81
        
qinsoon's avatar
qinsoon committed
82 83 84 85 86 87 88 89
        debug!("creating stack {} with entry func {:?}", id, func);
        debug!("overflow_guard : {}", overflow_guard);
        debug!("lower_bound    : {}", lower_bound);
        debug!("upper_bound    : {}", upper_bound);
        debug!("underflow_guard: {}", underflow_guard);
        
        MuStack {
            hdr: MuEntityHeader::unnamed(id),
90
            func: Some((func_addr, func.id())),
qinsoon's avatar
qinsoon committed
91 92 93 94 95 96 97 98 99 100 101 102 103
            
            state: MuStackState::Ready(func.sig.arg_tys.clone()),
            
            size: STACK_SIZE,
            overflow_guard: overflow_guard,
            lower_bound: lower_bound,
            upper_bound: upper_bound,
            underflow_guard: upper_bound,
            
            sp: upper_bound,
            bp: upper_bound,
            ip: unsafe {Address::zero()},
            
104
            mmap: Some(anon_mmap)
qinsoon's avatar
qinsoon committed
105
        }
qinsoon's avatar
qinsoon committed
106
    }
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
    
    #[cfg(target_arch = "x86_64")]
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::x86_64;
        
        let mut gpr_used = vec![];
        let mut fpr_used = vec![];
        
        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();
            
            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
            }
        }
        
        let mut stack_ptr = self.sp;
        for i in 0..x86_64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
        
        for i in 0..x86_64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
qinsoon's avatar
qinsoon committed
156

157 158
        // save it back
        self.sp = stack_ptr;
159 160 161 162 163 164 165 166 167 168 169
        
        self.print_stack(Some(20));
    }
    
    pub fn print_stack(&self, n_entries: Option<usize>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        
        let mut cursor = self.upper_bound.sub(WORD_SIZE);
        let mut count = 0;
        
qinsoon's avatar
qinsoon committed
170
        debug!("0x{:x} | UPPER_BOUND", self.upper_bound);
171 172 173 174 175 176 177 178
        while cursor >= self.lower_bound {
            let val = unsafe{cursor.load::<Word>()};
            print!("0x{:x} | 0x{:x} ({})", cursor, val, val);
            
            if cursor == self.sp {
                print!(" <- SP");
            }
            
qinsoon's avatar
qinsoon committed
179
            debug!("");
180 181 182 183 184
            
            cursor = cursor.sub(WORD_SIZE);
            count += 1;
            
            if n_entries.is_some() && count > n_entries.unwrap() {
qinsoon's avatar
qinsoon committed
185
                debug!("...");
186 187 188 189
                break;
            }
        }
        
qinsoon's avatar
qinsoon committed
190
        debug!("0x{:x} | LOWER_BOUND", self.lower_bound);
191
    }
qinsoon's avatar
qinsoon committed
192 193
}

qinsoon's avatar
qinsoon committed
194 195 196 197 198 199
pub enum MuStackState {
    Ready(Vec<P<MuType>>), // ready to resume when values of given types are supplied (can be empty)
    Active,
    Dead
}

200
#[repr(C)]
201
#[allow(improper_ctypes)]
202
// do not change the layout (unless change the offset of fields correspondingly)
qinsoon's avatar
qinsoon committed
203
pub struct MuThread {
qinsoon's avatar
qinsoon committed
204
    pub hdr: MuEntityHeader,
205
    pub allocator: mm::Mutator,
206
    pub stack: Option<Box<MuStack>>,
qinsoon's avatar
qinsoon committed
207
    
208 209 210 211 212
    pub native_sp_loc: Address,
    pub user_tls: Address, // can be zero

    pub exception_obj: Address,
    pub vm: Arc<VM>
qinsoon's avatar
qinsoon committed
213 214
}

215 216
// this depends on the layout of MuThread
lazy_static! {
217 218 219
    pub static ref ALLOCATOR_OFFSET : usize = mem::size_of::<MuEntityHeader>();

    pub static ref NATIVE_SP_LOC_OFFSET : usize = *ALLOCATOR_OFFSET
220
                + mem::size_of::<mm::Mutator>()
221
                + mem::size_of::<Option<Box<MuStack>>>();
222 223

    pub static ref USER_TLS_OFFSET : usize = *NATIVE_SP_LOC_OFFSET + mem::size_of::<Address>();
qinsoon's avatar
qinsoon committed
224

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
    pub static ref EXCEPTION_OBJ_OFFSET : usize = *USER_TLS_OFFSET + mem::size_of::<Address>();
}

impl fmt::Display for MuThread {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "MuThread    @{:?}: {}\n", self as *const MuThread, self.hdr).unwrap();
        write!(f, "- header    @{:?}\n",       &self.hdr as *const MuEntityHeader).unwrap();
        write!(f, "- allocator @{:?}\n",       &self.allocator as *const mm::Mutator).unwrap();
        write!(f, "- stack     @{:?}: {}\n", &self.stack as *const Option<Box<MuStack>>, self.stack.is_some()).unwrap();
        write!(f, "- native sp @{:?}: {}\n", &self.native_sp_loc as *const Address, self.native_sp_loc).unwrap();
        write!(f, "- user_tls  @{:?}: {}\n", &self.user_tls as *const Address, self.user_tls).unwrap();
        write!(f, "- exc obj   @{:?}: {}\n", &self.exception_obj as *const Address, self.exception_obj).unwrap();

        Ok(())
    }
240 241
}

242
#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
243
#[cfg(any(target_os = "macos", target_os = "linux"))]
244 245
#[link(name = "runtime")]
extern "C" {
246
    pub fn set_thread_local(thread: *mut MuThread);
qinsoon's avatar
qinsoon committed
247
    pub fn muentry_get_thread_local() -> Address;
248 249 250
}

#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
251
#[cfg(any(target_os = "macos", target_os = "linux"))]
252 253 254
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
255
    fn fake_swap_mu_thread(old_sp_loc: Address);
qinsoon's avatar
qinsoon committed
256
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
257
    pub fn get_current_frame_rbp() -> Address;
qinsoon's avatar
qinsoon committed
258
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
259 260
}

qinsoon's avatar
qinsoon committed
261
impl MuThread {
262
    pub fn new(id: MuID, allocator: mm::Mutator, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> MuThread {
qinsoon's avatar
qinsoon committed
263 264 265 266
        MuThread {
            hdr: MuEntityHeader::unnamed(id),
            allocator: allocator,
            stack: Some(stack),
267
            native_sp_loc: unsafe {Address::zero()},
268 269 270
            user_tls: user_tls,
            vm: vm,
            exception_obj: unsafe {Address::zero()}
qinsoon's avatar
qinsoon committed
271 272
        }
    }
qinsoon's avatar
qinsoon committed
273 274 275 276 277

    #[inline(always)]
    pub fn has_current() -> bool {
        ! unsafe {muentry_get_thread_local()}.is_zero()
    }
qinsoon's avatar
qinsoon committed
278
    
279
    #[inline(always)]
280 281
    pub fn current() -> &'static MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
282
            muentry_get_thread_local().to_ptr::<MuThread>().as_ref().unwrap()
283 284 285
        }
    }
    
286
    #[inline(always)]
287 288
    pub fn current_mut() -> &'static mut MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
289
            muentry_get_thread_local().to_ptr_mut::<MuThread>().as_mut().unwrap()
290 291
        }
    }
292

qinsoon's avatar
qinsoon committed
293 294 295
    #[allow(unused_unsafe)]
    // pieces of this function are not safe (we want to mark it unsafe)
    // this function is exposed as unsafe because it is not always safe to call it
296 297 298
    /// returns true if we have created MuThread on this call
    /// (false means we had MuThread for current thread before)
    pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) -> bool {
299 300
        use std::usize;

301 302
        if ! unsafe{muentry_get_thread_local()}.is_zero() {
            warn!("current thread has a thread local (has a muthread to it)");
303
            panic!("should not have muthread here");
304
            return false;
305 306
        }

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
        // fake a stack for current thread
        let fake_mu_stack_for_cur = Box::new(MuStack {
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            func: None,

            state: MuStackState::Active,

            // we do not know anything about current stack
            // treat it as max size
            size: usize::MAX,
            overflow_guard: unsafe {Address::zero()},
            lower_bound: unsafe {Address::zero()},
            upper_bound: unsafe {Address::max()},
            underflow_guard: unsafe {Address::max()},

            // these will only be used when stack is not active (we still save to these fields)
            // their values do not matter now
            sp: unsafe {Address::zero()},
            bp: unsafe {Address::zero()},
            ip: unsafe {Address::zero()},

            // we are not responsible for keeping the memory alive
            mmap: None,
        });

        let fake_mu_thread = MuThread {
            hdr: MuEntityHeader::unnamed(vm.next_id()),

            // valid allocator and stack
            allocator: mm::new_mutator(),
            stack: Some(fake_mu_stack_for_cur),

            // we do not need native_sp_loc (we do not expect the thread to call
            native_sp_loc: unsafe {Address::zero()},
341
            user_tls: threadlocal,
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357

            vm: vm,
            exception_obj: unsafe {Address::zero()}
        };

        let ptr_fake_mu_thread : *mut MuThread = Box::into_raw(Box::new(fake_mu_thread));

        // set thread local
        unsafe {set_thread_local(ptr_fake_mu_thread)};

//        let addr = unsafe {muentry_get_thread_local()};
//        let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
//
//        unsafe {
//            fake_swap_mu_thread(sp_threadlocal_loc);
//        }
358 359

        true
360
    }
361 362

    /// turn this current mu thread back as normal thread
363
    #[allow(unused_variables)]
364
    pub unsafe fn cleanup_current_mu_thread() {
365
        let mu_thread_addr = muentry_get_thread_local();
366 367 368 369 370

        if !mu_thread_addr.is_zero() {
            let mu_thread : *mut MuThread = mu_thread_addr.to_ptr_mut();
            mm::drop_mutator(&mut (*mu_thread).allocator as *mut mm::Mutator);

371
            let mu_thread : Box<MuThread> = Box::from_raw(mu_thread);
372

qinsoon's avatar
qinsoon committed
373 374 375
            // set thread local to zero
            set_thread_local(ptr::null_mut())

376 377 378
            // drop mu_thread here
        }
    }
379 380 381 382 383
    
    pub fn new_thread_normal(mut stack: Box<MuStack>, threadlocal: Address, vals: Vec<ValueLocation>, vm: Arc<VM>) -> JoinHandle<()> {
        // set up arguments on stack
        stack.runtime_load_args(vals);
        
384
        MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm)
385
    }
386
    
387
    #[no_mangle]
qinsoon's avatar
qinsoon committed
388
    #[allow(unused_variables)]
389
    pub extern fn mu_thread_launch(id: MuID, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> JoinHandle<()> {
390
        let new_sp = stack.sp;
391
        let entry = runtime::resolve_symbol(vm.name_of(stack.func.as_ref().unwrap().1));
392 393
        debug!("entry : 0x{:x}", entry);
        
qinsoon's avatar
qinsoon committed
394
        match thread::Builder::new().name(format!("Mu Thread #{}", id)).spawn(move || {
395
            let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
396 397
            
            // set thread local
398 399
            unsafe {set_thread_local(muthread)};
            
qinsoon's avatar
qinsoon committed
400
            let addr = unsafe {muentry_get_thread_local()};
401
            let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
402 403 404
            
            debug!("new sp: 0x{:x}", new_sp);
            debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
405
            
406 407 408
            unsafe {
                swap_to_mu_stack(new_sp, entry, sp_threadlocal_loc); 
            }
409 410
            
            debug!("returned to Rust stack. Going to quit");
qinsoon's avatar
qinsoon committed
411 412 413 414 415
        }) {
            Ok(handle) => handle,
            Err(_) => panic!("failed to create a thread")
        }
    }
416 417
}

qinsoon's avatar
qinsoon committed
418
#[derive(Debug, RustcEncodable, RustcDecodable)]
419 420 421
pub struct MuPrimordialThread {
    pub func_id: MuID,
    pub args: Vec<Constant>
Kunshan Wang's avatar
Kunshan Wang committed
422
}