WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

thread.rs 16.1 KB
Newer Older
qinsoon's avatar
qinsoon committed
1
#![allow(dead_code)]
qinsoon's avatar
qinsoon committed
2
3

use ast::ir::*;
qinsoon's avatar
qinsoon committed
4
5
6
use ast::ptr::*;
use ast::types::*;
use vm::VM;
7
use runtime;
8
use runtime::ValueLocation;
9
use runtime::mm;
qinsoon's avatar
qinsoon committed
10

qinsoon's avatar
qinsoon committed
11
use utils::ByteSize;
qinsoon's avatar
qinsoon committed
12
use utils::Address;
qinsoon's avatar
qinsoon committed
13
use utils::Word;
qinsoon's avatar
qinsoon committed
14
15
16
use utils::mem::memmap;
use utils::mem::memsec;

qinsoon's avatar
qinsoon committed
17
use std::ptr;
qinsoon's avatar
qinsoon committed
18
19
use std::thread;
use std::thread::JoinHandle;
20
use std::sync::Arc;
21
use std::fmt;
qinsoon's avatar
qinsoon committed
22
23
24

pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb

25
26
27
#[cfg(target_arch = "aarch64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
28
29
30
#[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
31
32
33
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);

34
#[repr(C)]
qinsoon's avatar
qinsoon committed
35
pub struct MuStack {
qinsoon's avatar
qinsoon committed
36
    pub hdr: MuEntityHeader,
37
38
39

    // address, id
    func: Option<(ValueLocation, MuID)>,
qinsoon's avatar
qinsoon committed
40
    
qinsoon's avatar
qinsoon committed
41
    size: ByteSize,
qinsoon's avatar
qinsoon committed
42
43
44
45
46
47
48
49
50
51
    //    lo addr                                                    hi addr
    //     | overflow guard page | actual stack ..................... | underflow guard page|
    //     |                     |                                    |                     |
    // overflowGuard           lowerBound                           upperBound
    //                                                              underflowGuard    
    overflow_guard : Address,
    lower_bound    : Address,
    upper_bound    : Address,
    underflow_guard: Address,
    
qinsoon's avatar
qinsoon committed
52
53
54
55
56
    // this frame pointers should only be used when stack is not active
    sp : Address,
    bp : Address,
    ip : Address,
    
57
    state: MuStackState,
qinsoon's avatar
qinsoon committed
58
    #[allow(dead_code)]
59
    mmap           : Option<memmap::Mmap>
qinsoon's avatar
qinsoon committed
60
61
62
}

impl MuStack {
63
    pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
qinsoon's avatar
qinsoon committed
64
65
66
67
68
69
70
        let total_size = PAGE_SIZE * 2 + STACK_SIZE;
        
        let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
            Ok(m) => m,
            Err(_) => panic!("failed to mmap for a stack"),
        };
        
qinsoon's avatar
qinsoon committed
71
72
73
74
75
76
77
78
79
80
81
82
        let mmap_start = Address::from_ptr(anon_mmap.ptr());
        debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
        
        let overflow_guard = mmap_start;
        let lower_bound = mmap_start.plus(PAGE_SIZE);
        let upper_bound = lower_bound.plus(STACK_SIZE);
        let underflow_guard = upper_bound;
        
        unsafe {
            memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(),  PAGE_SIZE, memsec::Prot::NoAccess);
            memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
        }
qinsoon's avatar
qinsoon committed
83
        
qinsoon's avatar
qinsoon committed
84
85
86
87
88
89
90
91
        debug!("creating stack {} with entry func {:?}", id, func);
        debug!("overflow_guard : {}", overflow_guard);
        debug!("lower_bound    : {}", lower_bound);
        debug!("upper_bound    : {}", upper_bound);
        debug!("underflow_guard: {}", underflow_guard);
        
        MuStack {
            hdr: MuEntityHeader::unnamed(id),
92
            func: Some((func_addr, func.id())),
qinsoon's avatar
qinsoon committed
93
94
95
96
97
98
99
100
101
102
103
104
105
            
            state: MuStackState::Ready(func.sig.arg_tys.clone()),
            
            size: STACK_SIZE,
            overflow_guard: overflow_guard,
            lower_bound: lower_bound,
            upper_bound: upper_bound,
            underflow_guard: upper_bound,
            
            sp: upper_bound,
            bp: upper_bound,
            ip: unsafe {Address::zero()},
            
106
            mmap: Some(anon_mmap)
qinsoon's avatar
qinsoon committed
107
        }
qinsoon's avatar
qinsoon committed
108
    }
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

    #[cfg(target_arch = "aarch64")]
    // TODO: Should we save XR (X8, the indirect locations result register)
    // (NOTE: Any changes to here need to be reflected in swap_to_mu_stack)
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::aarch64;

        let mut gpr_used = vec![];
        let mut fpr_used = vec![];

        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();

            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
            }
        }

132
        // Ar these
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
        let mut stack_ptr = self.sp;
        for i in 0..aarch64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };

            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }

        for i in 0..aarch64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };

            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }

        // save it back
        self.sp = stack_ptr;

        self.print_stack(Some(20));
    }

168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
    #[cfg(target_arch = "x86_64")]
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::x86_64;
        
        let mut gpr_used = vec![];
        let mut fpr_used = vec![];
        
        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();
            
            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
            }
        }
        
        let mut stack_ptr = self.sp;
        for i in 0..x86_64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
        
        for i in 0..x86_64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
qinsoon's avatar
qinsoon committed
216

217
218
        // save it back
        self.sp = stack_ptr;
219
220
221
222
223
224
225
226
227
228
229
        
        self.print_stack(Some(20));
    }
    
    pub fn print_stack(&self, n_entries: Option<usize>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        
        let mut cursor = self.upper_bound.sub(WORD_SIZE);
        let mut count = 0;
        
qinsoon's avatar
qinsoon committed
230
        debug!("0x{:x} | UPPER_BOUND", self.upper_bound);
231
232
233
234
        while cursor >= self.lower_bound {
            let val = unsafe{cursor.load::<Word>()};
            
            if cursor == self.sp {
qinsoon's avatar
qinsoon committed
235
236
237
                debug!("0x{:x} | 0x{:x} ({}) <- SP", cursor, val, val);
            } else {
                debug!("0x{:x} | 0x{:x} ({})", cursor, val, val);
238
239
240
241
242
243
            }
            
            cursor = cursor.sub(WORD_SIZE);
            count += 1;
            
            if n_entries.is_some() && count > n_entries.unwrap() {
qinsoon's avatar
qinsoon committed
244
                debug!("...");
245
246
247
248
                break;
            }
        }
        
qinsoon's avatar
qinsoon committed
249
        debug!("0x{:x} | LOWER_BOUND", self.lower_bound);
250
    }
qinsoon's avatar
qinsoon committed
251
252
253
254

    pub fn print_backtrace(&self) {

    }
qinsoon's avatar
qinsoon committed
255
256
}

qinsoon's avatar
qinsoon committed
257
258
259
260
261
262
pub enum MuStackState {
    Ready(Vec<P<MuType>>), // ready to resume when values of given types are supplied (can be empty)
    Active,
    Dead
}

263
#[repr(C)]
qinsoon's avatar
qinsoon committed
264
pub struct MuThread {
qinsoon's avatar
qinsoon committed
265
    pub hdr: MuEntityHeader,
266
    pub allocator: mm::Mutator,
qinsoon's avatar
qinsoon committed
267
    pub stack: Option<Box<MuStack>>,
qinsoon's avatar
qinsoon committed
268
    
269
270
271
272
273
    pub native_sp_loc: Address,
    pub user_tls: Address, // can be zero

    pub exception_obj: Address,
    pub vm: Arc<VM>
qinsoon's avatar
qinsoon committed
274
275
}

276
277
// this depends on the layout of MuThread
lazy_static! {
278
279
280
281
    pub static ref ALLOCATOR_OFFSET     : usize = offset_of!(MuThread=>allocator).get_byte_offset();
    pub static ref NATIVE_SP_LOC_OFFSET : usize = offset_of!(MuThread=>native_sp_loc).get_byte_offset();
    pub static ref USER_TLS_OFFSET      : usize = offset_of!(MuThread=>user_tls).get_byte_offset();
    pub static ref EXCEPTION_OBJ_OFFSET : usize = offset_of!(MuThread=>exception_obj).get_byte_offset();
282
283
284
285
286
287
288
289
290
291
292
293
294
295
}

impl fmt::Display for MuThread {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "MuThread    @{:?}: {}\n", self as *const MuThread, self.hdr).unwrap();
        write!(f, "- header    @{:?}\n",       &self.hdr as *const MuEntityHeader).unwrap();
        write!(f, "- allocator @{:?}\n",       &self.allocator as *const mm::Mutator).unwrap();
        write!(f, "- stack     @{:?}: {}\n", &self.stack as *const Option<Box<MuStack>>, self.stack.is_some()).unwrap();
        write!(f, "- native sp @{:?}: {}\n", &self.native_sp_loc as *const Address, self.native_sp_loc).unwrap();
        write!(f, "- user_tls  @{:?}: {}\n", &self.user_tls as *const Address, self.user_tls).unwrap();
        write!(f, "- exc obj   @{:?}: {}\n", &self.exception_obj as *const Address, self.exception_obj).unwrap();

        Ok(())
    }
296
297
}

298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
#[cfg(target_arch = "aarch64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
    fn fake_swap_mu_thread(old_sp_loc: Address);
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
    pub fn get_current_frame_rbp() -> Address;
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
}

#[cfg(target_arch = "aarch64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "runtime")]
#[allow(improper_ctypes)]
extern "C" {
    pub fn set_thread_local(thread: *mut MuThread);
    pub fn muentry_get_thread_local() -> Address;
}

318
#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
319
#[cfg(any(target_os = "macos", target_os = "linux"))]
320
321
#[link(name = "runtime")]
extern "C" {
qinsoon's avatar
qinsoon committed
322
    pub fn set_thread_local(thread: *mut MuThread);
qinsoon's avatar
qinsoon committed
323
    pub fn muentry_get_thread_local() -> Address;
324
325
326
}

#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
327
#[cfg(any(target_os = "macos", target_os = "linux"))]
328
329
330
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
331
    fn fake_swap_mu_thread(old_sp_loc: Address);
qinsoon's avatar
qinsoon committed
332
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
qinsoon's avatar
qinsoon committed
333
    pub fn get_current_frame_rbp() -> Address;
qinsoon's avatar
qinsoon committed
334
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
335
336
}

qinsoon's avatar
qinsoon committed
337
impl MuThread {
338
    pub fn new(id: MuID, allocator: mm::Mutator, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> MuThread {
qinsoon's avatar
qinsoon committed
339
340
341
342
        MuThread {
            hdr: MuEntityHeader::unnamed(id),
            allocator: allocator,
            stack: Some(stack),
343
            native_sp_loc: unsafe {Address::zero()},
344
345
346
            user_tls: user_tls,
            vm: vm,
            exception_obj: unsafe {Address::zero()}
qinsoon's avatar
qinsoon committed
347
348
        }
    }
qinsoon's avatar
qinsoon committed
349
350
351
352
353

    #[inline(always)]
    pub fn has_current() -> bool {
        ! unsafe {muentry_get_thread_local()}.is_zero()
    }
qinsoon's avatar
qinsoon committed
354
    
qinsoon's avatar
qinsoon committed
355
    #[inline(always)]
356
357
    pub fn current() -> &'static MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
358
            muentry_get_thread_local().to_ptr::<MuThread>().as_ref().unwrap()
qinsoon's avatar
qinsoon committed
359
360
361
        }
    }
    
qinsoon's avatar
qinsoon committed
362
    #[inline(always)]
363
364
    pub fn current_mut() -> &'static mut MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
365
            muentry_get_thread_local().to_ptr_mut::<MuThread>().as_mut().unwrap()
366
367
        }
    }
368

qinsoon's avatar
qinsoon committed
369
370
371
    #[allow(unused_unsafe)]
    // pieces of this function are not safe (we want to mark it unsafe)
    // this function is exposed as unsafe because it is not always safe to call it
372
373
374
    /// returns true if we have created MuThread on this call
    /// (false means we had MuThread for current thread before)
    pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) -> bool {
375
376
        use std::usize;

377
378
        if ! unsafe{muentry_get_thread_local()}.is_zero() {
            warn!("current thread has a thread local (has a muthread to it)");
379
            return false;
380
381
        }

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
        // fake a stack for current thread
        let fake_mu_stack_for_cur = Box::new(MuStack {
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            func: None,

            state: MuStackState::Active,

            // we do not know anything about current stack
            // treat it as max size
            size: usize::MAX,
            overflow_guard: unsafe {Address::zero()},
            lower_bound: unsafe {Address::zero()},
            upper_bound: unsafe {Address::max()},
            underflow_guard: unsafe {Address::max()},

            // these will only be used when stack is not active (we still save to these fields)
            // their values do not matter now
            sp: unsafe {Address::zero()},
            bp: unsafe {Address::zero()},
            ip: unsafe {Address::zero()},

            // we are not responsible for keeping the memory alive
            mmap: None,
        });

        let fake_mu_thread = MuThread {
            hdr: MuEntityHeader::unnamed(vm.next_id()),

            // valid allocator and stack
            allocator: mm::new_mutator(),
            stack: Some(fake_mu_stack_for_cur),

            // we do not need native_sp_loc (we do not expect the thread to call
            native_sp_loc: unsafe {Address::zero()},
416
            user_tls: threadlocal,
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432

            vm: vm,
            exception_obj: unsafe {Address::zero()}
        };

        let ptr_fake_mu_thread : *mut MuThread = Box::into_raw(Box::new(fake_mu_thread));

        // set thread local
        unsafe {set_thread_local(ptr_fake_mu_thread)};

//        let addr = unsafe {muentry_get_thread_local()};
//        let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
//
//        unsafe {
//            fake_swap_mu_thread(sp_threadlocal_loc);
//        }
433
434

        true
435
    }
436
437

    /// turn this current mu thread back as normal thread
438
    #[allow(unused_variables)]
439
    pub unsafe fn cleanup_current_mu_thread() {
440
        let mu_thread_addr = muentry_get_thread_local();
441
442
443
444
445

        if !mu_thread_addr.is_zero() {
            let mu_thread : *mut MuThread = mu_thread_addr.to_ptr_mut();
            mm::drop_mutator(&mut (*mu_thread).allocator as *mut mm::Mutator);

446
            let mu_thread : Box<MuThread> = Box::from_raw(mu_thread);
447

qinsoon's avatar
qinsoon committed
448
449
450
            // set thread local to zero
            set_thread_local(ptr::null_mut())

451
452
453
            // drop mu_thread here
        }
    }
454
455
456
457
458
    
    pub fn new_thread_normal(mut stack: Box<MuStack>, threadlocal: Address, vals: Vec<ValueLocation>, vm: Arc<VM>) -> JoinHandle<()> {
        // set up arguments on stack
        stack.runtime_load_args(vals);
        
459
        MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm)
460
    }
461
    
462
    #[no_mangle]
qinsoon's avatar
qinsoon committed
463
    #[allow(unused_variables)]
464
    pub extern fn mu_thread_launch(id: MuID, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> JoinHandle<()> {
465
        let new_sp = stack.sp;
466
        let entry = runtime::resolve_symbol(vm.name_of(stack.func.as_ref().unwrap().1));
467
468
        debug!("entry : 0x{:x}", entry);
        
qinsoon's avatar
qinsoon committed
469
        match thread::Builder::new().name(format!("Mu Thread #{}", id)).spawn(move || {
470
            let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
471
472
            
            // set thread local
473
474
            unsafe {set_thread_local(muthread)};
            
qinsoon's avatar
qinsoon committed
475
            let addr = unsafe {muentry_get_thread_local()};
476
            let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
477
478
479
            
            debug!("new sp: 0x{:x}", new_sp);
            debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
480
            
481
482
483
            unsafe {
                swap_to_mu_stack(new_sp, entry, sp_threadlocal_loc); 
            }
484
485
            
            debug!("returned to Rust stack. Going to quit");
qinsoon's avatar
qinsoon committed
486
487
488
489
490
        }) {
            Ok(handle) => handle,
            Err(_) => panic!("failed to create a thread")
        }
    }
491
492
}

qinsoon's avatar
qinsoon committed
493
#[derive(Debug, RustcEncodable, RustcDecodable)]
494
495
pub struct MuPrimordialThread {
    pub func_id: MuID,
qinsoon's avatar
qinsoon committed
496
497

    pub has_const_args: bool,
498
    pub args: Vec<Constant>
Kunshan Wang's avatar
Kunshan Wang committed
499
}