WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

thread.rs 16.8 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
// Copyright 2017 The Australian National University
// 
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// 
//     http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
#![allow(dead_code)]
qinsoon's avatar
qinsoon committed
16
17

use ast::ir::*;
qinsoon's avatar
qinsoon committed
18
19
20
use ast::ptr::*;
use ast::types::*;
use vm::VM;
21
use runtime;
22
use runtime::ValueLocation;
23
use runtime::mm;
qinsoon's avatar
qinsoon committed
24

qinsoon's avatar
qinsoon committed
25
use utils::ByteSize;
qinsoon's avatar
qinsoon committed
26
use utils::Address;
qinsoon's avatar
qinsoon committed
27
use utils::Word;
qinsoon's avatar
qinsoon committed
28
29
30
use utils::mem::memmap;
use utils::mem::memsec;

31
use std;
qinsoon's avatar
qinsoon committed
32
use std::ptr;
qinsoon's avatar
qinsoon committed
33
34
use std::thread;
use std::thread::JoinHandle;
35
use std::sync::Arc;
36
use std::fmt;
qinsoon's avatar
qinsoon committed
37
38
39

pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb

40
41
42
#[cfg(target_arch = "aarch64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
43
44
45
#[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
46
47
48
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);

49
#[repr(C)]
qinsoon's avatar
qinsoon committed
50
pub struct MuStack {
qinsoon's avatar
qinsoon committed
51
    pub hdr: MuEntityHeader,
52
53
54

    // address, id
    func: Option<(ValueLocation, MuID)>,
qinsoon's avatar
qinsoon committed
55
    
qinsoon's avatar
qinsoon committed
56
    size: ByteSize,
qinsoon's avatar
qinsoon committed
57
58
59
60
61
62
63
64
65
66
    //    lo addr                                                    hi addr
    //     | overflow guard page | actual stack ..................... | underflow guard page|
    //     |                     |                                    |                     |
    // overflowGuard           lowerBound                           upperBound
    //                                                              underflowGuard    
    overflow_guard : Address,
    lower_bound    : Address,
    upper_bound    : Address,
    underflow_guard: Address,
    
qinsoon's avatar
qinsoon committed
67
68
69
70
71
    // this frame pointers should only be used when stack is not active
    sp : Address,
    bp : Address,
    ip : Address,
    
72
    state: MuStackState,
qinsoon's avatar
qinsoon committed
73
    #[allow(dead_code)]
74
    mmap           : Option<memmap::Mmap>
qinsoon's avatar
qinsoon committed
75
76
77
}

impl MuStack {
78
    pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
qinsoon's avatar
qinsoon committed
79
80
81
82
83
84
85
        let total_size = PAGE_SIZE * 2 + STACK_SIZE;
        
        let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
            Ok(m) => m,
            Err(_) => panic!("failed to mmap for a stack"),
        };
        
qinsoon's avatar
qinsoon committed
86
87
88
89
90
91
92
93
94
95
96
97
        let mmap_start = Address::from_ptr(anon_mmap.ptr());
        debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
        
        let overflow_guard = mmap_start;
        let lower_bound = mmap_start.plus(PAGE_SIZE);
        let upper_bound = lower_bound.plus(STACK_SIZE);
        let underflow_guard = upper_bound;
        
        unsafe {
            memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(),  PAGE_SIZE, memsec::Prot::NoAccess);
            memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
        }
qinsoon's avatar
qinsoon committed
98
        
qinsoon's avatar
qinsoon committed
99
100
101
102
103
104
105
106
        debug!("creating stack {} with entry func {:?}", id, func);
        debug!("overflow_guard : {}", overflow_guard);
        debug!("lower_bound    : {}", lower_bound);
        debug!("upper_bound    : {}", upper_bound);
        debug!("underflow_guard: {}", underflow_guard);
        
        MuStack {
            hdr: MuEntityHeader::unnamed(id),
107
            func: Some((func_addr, func.id())),
qinsoon's avatar
qinsoon committed
108
109
110
111
112
113
114
115
116
117
118
119
120
            
            state: MuStackState::Ready(func.sig.arg_tys.clone()),
            
            size: STACK_SIZE,
            overflow_guard: overflow_guard,
            lower_bound: lower_bound,
            upper_bound: upper_bound,
            underflow_guard: upper_bound,
            
            sp: upper_bound,
            bp: upper_bound,
            ip: unsafe {Address::zero()},
            
121
            mmap: Some(anon_mmap)
qinsoon's avatar
qinsoon committed
122
        }
qinsoon's avatar
qinsoon committed
123
    }
124
125

    #[cfg(target_arch = "aarch64")]
126
    // TODO: What will hapen if some things need to be loaded on the stack?
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    // TODO: Should we save XR (X8, the indirect locations result register)
    // (NOTE: Any changes to here need to be reflected in swap_to_mu_stack)
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::aarch64;

        let mut gpr_used = vec![];
        let mut fpr_used = vec![];

        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();

            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
145
                RegGroup::GPREX => unimplemented!(),
146
147
148
            }
        }

149
        // Ar these
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
        let mut stack_ptr = self.sp;
        for i in 0..aarch64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };

            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }

        for i in 0..aarch64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };

            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }

        // save it back
        self.sp = stack_ptr;

        self.print_stack(Some(20));
    }

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
    #[cfg(target_arch = "x86_64")]
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::x86_64;
        
        let mut gpr_used = vec![];
        let mut fpr_used = vec![];
        
        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();
            
            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
201
                RegGroup::GPREX => unimplemented!(),
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
                RegGroup::FPR => fpr_used.push(word),
            }
        }
        
        let mut stack_ptr = self.sp;
        for i in 0..x86_64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
        
        for i in 0..x86_64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
qinsoon's avatar
qinsoon committed
234

235
236
        // save it back
        self.sp = stack_ptr;
237
238
239
240
241
242
243
244
245
246
247
        
        self.print_stack(Some(20));
    }
    
    pub fn print_stack(&self, n_entries: Option<usize>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        
        let mut cursor = self.upper_bound.sub(WORD_SIZE);
        let mut count = 0;
        
qinsoon's avatar
qinsoon committed
248
        debug!("0x{:x} | UPPER_BOUND", self.upper_bound);
249
250
251
252
        while cursor >= self.lower_bound {
            let val = unsafe{cursor.load::<Word>()};
            
            if cursor == self.sp {
qinsoon's avatar
qinsoon committed
253
254
255
                debug!("0x{:x} | 0x{:x} ({}) <- SP", cursor, val, val);
            } else {
                debug!("0x{:x} | 0x{:x} ({})", cursor, val, val);
256
257
258
259
260
261
            }
            
            cursor = cursor.sub(WORD_SIZE);
            count += 1;
            
            if n_entries.is_some() && count > n_entries.unwrap() {
qinsoon's avatar
qinsoon committed
262
                debug!("...");
263
264
265
266
                break;
            }
        }
        
qinsoon's avatar
qinsoon committed
267
        debug!("0x{:x} | LOWER_BOUND", self.lower_bound);
268
    }
qinsoon's avatar
qinsoon committed
269
270
271
272

    pub fn print_backtrace(&self) {

    }
qinsoon's avatar
qinsoon committed
273
274
}

qinsoon's avatar
qinsoon committed
275
276
277
278
279
280
pub enum MuStackState {
    Ready(Vec<P<MuType>>), // ready to resume when values of given types are supplied (can be empty)
    Active,
    Dead
}

281
#[repr(C)]
qinsoon's avatar
qinsoon committed
282
pub struct MuThread {
qinsoon's avatar
qinsoon committed
283
    pub hdr: MuEntityHeader,
284
    pub allocator: mm::Mutator,
qinsoon's avatar
qinsoon committed
285
    pub stack: Option<Box<MuStack>>,
qinsoon's avatar
qinsoon committed
286
    
287
288
289
290
291
    pub native_sp_loc: Address,
    pub user_tls: Address, // can be zero

    pub exception_obj: Address,
    pub vm: Arc<VM>
qinsoon's avatar
qinsoon committed
292
293
}

294
295
// this depends on the layout of MuThread
lazy_static! {
296
297
298
299
    pub static ref ALLOCATOR_OFFSET     : usize = offset_of!(MuThread=>allocator).get_byte_offset();
    pub static ref NATIVE_SP_LOC_OFFSET : usize = offset_of!(MuThread=>native_sp_loc).get_byte_offset();
    pub static ref USER_TLS_OFFSET      : usize = offset_of!(MuThread=>user_tls).get_byte_offset();
    pub static ref EXCEPTION_OBJ_OFFSET : usize = offset_of!(MuThread=>exception_obj).get_byte_offset();
300
301
302
303
304
305
306
307
308
309
310
311
312
313
}

impl fmt::Display for MuThread {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "MuThread    @{:?}: {}\n", self as *const MuThread, self.hdr).unwrap();
        write!(f, "- header    @{:?}\n",       &self.hdr as *const MuEntityHeader).unwrap();
        write!(f, "- allocator @{:?}\n",       &self.allocator as *const mm::Mutator).unwrap();
        write!(f, "- stack     @{:?}: {}\n", &self.stack as *const Option<Box<MuStack>>, self.stack.is_some()).unwrap();
        write!(f, "- native sp @{:?}: {}\n", &self.native_sp_loc as *const Address, self.native_sp_loc).unwrap();
        write!(f, "- user_tls  @{:?}: {}\n", &self.user_tls as *const Address, self.user_tls).unwrap();
        write!(f, "- exc obj   @{:?}: {}\n", &self.exception_obj as *const Address, self.exception_obj).unwrap();

        Ok(())
    }
314
315
}

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
#[cfg(target_arch = "aarch64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
    fn fake_swap_mu_thread(old_sp_loc: Address);
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
    pub fn get_current_frame_rbp() -> Address;
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
}

#[cfg(target_arch = "aarch64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "runtime")]
#[allow(improper_ctypes)]
extern "C" {
    pub fn set_thread_local(thread: *mut MuThread);
    pub fn muentry_get_thread_local() -> Address;
334
    pub fn muentry_set_retval(val: u32);
335
336
}

337
#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
338
#[cfg(any(target_os = "macos", target_os = "linux"))]
339
340
#[link(name = "runtime")]
extern "C" {
qinsoon's avatar
qinsoon committed
341
    pub fn set_thread_local(thread: *mut MuThread);
qinsoon's avatar
qinsoon committed
342
    pub fn muentry_get_thread_local() -> Address;
343
    pub fn muentry_set_retval(val: u32);
344
345
346
}

#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
347
#[cfg(any(target_os = "macos", target_os = "linux"))]
348
349
350
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
351
    fn fake_swap_mu_thread(old_sp_loc: Address);
qinsoon's avatar
qinsoon committed
352
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
qinsoon's avatar
qinsoon committed
353
    pub fn get_current_frame_rbp() -> Address;
qinsoon's avatar
qinsoon committed
354
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
355
356
}

qinsoon's avatar
qinsoon committed
357
impl MuThread {
358
    pub fn new(id: MuID, allocator: mm::Mutator, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> MuThread {
qinsoon's avatar
qinsoon committed
359
360
361
362
        MuThread {
            hdr: MuEntityHeader::unnamed(id),
            allocator: allocator,
            stack: Some(stack),
363
            native_sp_loc: unsafe {Address::zero()},
364
365
366
            user_tls: user_tls,
            vm: vm,
            exception_obj: unsafe {Address::zero()}
qinsoon's avatar
qinsoon committed
367
368
        }
    }
qinsoon's avatar
qinsoon committed
369
370
371
372
373

    #[inline(always)]
    pub fn has_current() -> bool {
        ! unsafe {muentry_get_thread_local()}.is_zero()
    }
qinsoon's avatar
qinsoon committed
374
    
qinsoon's avatar
qinsoon committed
375
    #[inline(always)]
376
377
    pub fn current() -> &'static MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
378
            muentry_get_thread_local().to_ptr::<MuThread>().as_ref().unwrap()
qinsoon's avatar
qinsoon committed
379
380
381
        }
    }
    
qinsoon's avatar
qinsoon committed
382
    #[inline(always)]
383
384
    pub fn current_mut() -> &'static mut MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
385
            muentry_get_thread_local().to_ptr_mut::<MuThread>().as_mut().unwrap()
386
387
        }
    }
388

qinsoon's avatar
qinsoon committed
389
390
391
    #[allow(unused_unsafe)]
    // pieces of this function are not safe (we want to mark it unsafe)
    // this function is exposed as unsafe because it is not always safe to call it
392
393
394
    /// returns true if we have created MuThread on this call
    /// (false means we had MuThread for current thread before)
    pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) -> bool {
395
396
        use std::usize;

397
398
399
        // build exception table
        vm.build_exception_table();

400
401
        if ! unsafe{muentry_get_thread_local()}.is_zero() {
            warn!("current thread has a thread local (has a muthread to it)");
402
            return false;
403
404
        }

405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
        // fake a stack for current thread
        let fake_mu_stack_for_cur = Box::new(MuStack {
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            func: None,

            state: MuStackState::Active,

            // we do not know anything about current stack
            // treat it as max size
            size: usize::MAX,
            overflow_guard: unsafe {Address::zero()},
            lower_bound: unsafe {Address::zero()},
            upper_bound: unsafe {Address::max()},
            underflow_guard: unsafe {Address::max()},

            // these will only be used when stack is not active (we still save to these fields)
            // their values do not matter now
            sp: unsafe {Address::zero()},
            bp: unsafe {Address::zero()},
            ip: unsafe {Address::zero()},

            // we are not responsible for keeping the memory alive
            mmap: None,
        });

        let fake_mu_thread = MuThread {
            hdr: MuEntityHeader::unnamed(vm.next_id()),

            // valid allocator and stack
            allocator: mm::new_mutator(),
            stack: Some(fake_mu_stack_for_cur),

            // we do not need native_sp_loc (we do not expect the thread to call
            native_sp_loc: unsafe {Address::zero()},
439
            user_tls: threadlocal,
440
441
442
443
444
445
446
447
448
449

            vm: vm,
            exception_obj: unsafe {Address::zero()}
        };

        let ptr_fake_mu_thread : *mut MuThread = Box::into_raw(Box::new(fake_mu_thread));

        // set thread local
        unsafe {set_thread_local(ptr_fake_mu_thread)};

450
        true
451
    }
452
453

    /// turn this current mu thread back as normal thread
454
    #[allow(unused_variables)]
455
    pub unsafe fn cleanup_current_mu_thread() {
456
        let mu_thread_addr = muentry_get_thread_local();
457
458
459
460
461

        if !mu_thread_addr.is_zero() {
            let mu_thread : *mut MuThread = mu_thread_addr.to_ptr_mut();
            mm::drop_mutator(&mut (*mu_thread).allocator as *mut mm::Mutator);

462
            let mu_thread : Box<MuThread> = Box::from_raw(mu_thread);
463

qinsoon's avatar
qinsoon committed
464
465
466
            // set thread local to zero
            set_thread_local(ptr::null_mut())

467
468
469
            // drop mu_thread here
        }
    }
470
471
472
473
474
    
    pub fn new_thread_normal(mut stack: Box<MuStack>, threadlocal: Address, vals: Vec<ValueLocation>, vm: Arc<VM>) -> JoinHandle<()> {
        // set up arguments on stack
        stack.runtime_load_args(vals);
        
475
        MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm)
476
    }
477
    
478
    #[no_mangle]
qinsoon's avatar
qinsoon committed
479
    #[allow(unused_variables)]
480
    pub extern fn mu_thread_launch(id: MuID, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> JoinHandle<()> {
481
        let new_sp = stack.sp;
482
        let entry = runtime::resolve_symbol(vm.name_of(stack.func.as_ref().unwrap().1));
483
484
        debug!("entry : 0x{:x}", entry);
        
qinsoon's avatar
qinsoon committed
485
        match thread::Builder::new().name(format!("Mu Thread #{}", id)).spawn(move || {
486
            let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
487
488
            
            // set thread local
489
490
            unsafe {set_thread_local(muthread)};
            
qinsoon's avatar
qinsoon committed
491
            let addr = unsafe {muentry_get_thread_local()};
492
            let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
493
494
495
            
            debug!("new sp: 0x{:x}", new_sp);
            debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
496
            
497
498
499
            unsafe {
                swap_to_mu_stack(new_sp, entry, sp_threadlocal_loc); 
            }
500
501
            
            debug!("returned to Rust stack. Going to quit");
qinsoon's avatar
qinsoon committed
502
503
504
505
506
        }) {
            Ok(handle) => handle,
            Err(_) => panic!("failed to create a thread")
        }
    }
507
508
}

509
510
rodal_struct!(MuPrimordialThread{func_id, args, has_const_args});
#[derive(Debug)]
511
pub struct MuPrimordialThread {
512
513
514
    pub func_id: MuID, // +0
    pub args: Vec<Constant>, // +8
    pub has_const_args: bool, // +32
Kunshan Wang's avatar
Kunshan Wang committed
515
}