WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

thread.rs 16.1 KB
Newer Older
qinsoon's avatar
qinsoon committed
1
#![allow(dead_code)]
qinsoon's avatar
qinsoon committed
2
3

use ast::ir::*;
qinsoon's avatar
qinsoon committed
4
5
6
use ast::ptr::*;
use ast::types::*;
use vm::VM;
7
use runtime;
8
use runtime::ValueLocation;
9
use runtime::mm;
qinsoon's avatar
qinsoon committed
10

qinsoon's avatar
qinsoon committed
11
use utils::ByteSize;
qinsoon's avatar
qinsoon committed
12
use utils::Address;
qinsoon's avatar
qinsoon committed
13
use utils::Word;
qinsoon's avatar
qinsoon committed
14
15
16
use utils::mem::memmap;
use utils::mem::memsec;

qinsoon's avatar
qinsoon committed
17
use std::ptr;
qinsoon's avatar
qinsoon committed
18
19
use std::thread;
use std::thread::JoinHandle;
20
use std::sync::Arc;
21
use std::fmt;
qinsoon's avatar
qinsoon committed
22
23
24

pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb

25
26
27
#[cfg(target_arch = "aarch64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
28
29
30
#[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
31
32
33
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);

34
#[repr(C)]
qinsoon's avatar
qinsoon committed
35
pub struct MuStack {
qinsoon's avatar
qinsoon committed
36
    pub hdr: MuEntityHeader,
37
38
39

    // address, id
    func: Option<(ValueLocation, MuID)>,
qinsoon's avatar
qinsoon committed
40
    
qinsoon's avatar
qinsoon committed
41
    size: ByteSize,
qinsoon's avatar
qinsoon committed
42
43
44
45
46
47
48
49
50
51
    //    lo addr                                                    hi addr
    //     | overflow guard page | actual stack ..................... | underflow guard page|
    //     |                     |                                    |                     |
    // overflowGuard           lowerBound                           upperBound
    //                                                              underflowGuard    
    overflow_guard : Address,
    lower_bound    : Address,
    upper_bound    : Address,
    underflow_guard: Address,
    
qinsoon's avatar
qinsoon committed
52
53
54
55
56
    // this frame pointers should only be used when stack is not active
    sp : Address,
    bp : Address,
    ip : Address,
    
57
    state: MuStackState,
qinsoon's avatar
qinsoon committed
58
    #[allow(dead_code)]
59
    mmap           : Option<memmap::Mmap>
qinsoon's avatar
qinsoon committed
60
61
62
}

impl MuStack {
63
    pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
qinsoon's avatar
qinsoon committed
64
65
66
67
68
69
70
        let total_size = PAGE_SIZE * 2 + STACK_SIZE;
        
        let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
            Ok(m) => m,
            Err(_) => panic!("failed to mmap for a stack"),
        };
        
qinsoon's avatar
qinsoon committed
71
72
73
74
75
76
77
78
79
80
81
82
        let mmap_start = Address::from_ptr(anon_mmap.ptr());
        debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
        
        let overflow_guard = mmap_start;
        let lower_bound = mmap_start.plus(PAGE_SIZE);
        let upper_bound = lower_bound.plus(STACK_SIZE);
        let underflow_guard = upper_bound;
        
        unsafe {
            memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(),  PAGE_SIZE, memsec::Prot::NoAccess);
            memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
        }
qinsoon's avatar
qinsoon committed
83
        
qinsoon's avatar
qinsoon committed
84
85
86
87
88
89
90
91
        debug!("creating stack {} with entry func {:?}", id, func);
        debug!("overflow_guard : {}", overflow_guard);
        debug!("lower_bound    : {}", lower_bound);
        debug!("upper_bound    : {}", upper_bound);
        debug!("underflow_guard: {}", underflow_guard);
        
        MuStack {
            hdr: MuEntityHeader::unnamed(id),
92
            func: Some((func_addr, func.id())),
qinsoon's avatar
qinsoon committed
93
94
95
96
97
98
99
100
101
102
103
104
105
            
            state: MuStackState::Ready(func.sig.arg_tys.clone()),
            
            size: STACK_SIZE,
            overflow_guard: overflow_guard,
            lower_bound: lower_bound,
            upper_bound: upper_bound,
            underflow_guard: upper_bound,
            
            sp: upper_bound,
            bp: upper_bound,
            ip: unsafe {Address::zero()},
            
106
            mmap: Some(anon_mmap)
qinsoon's avatar
qinsoon committed
107
        }
qinsoon's avatar
qinsoon committed
108
    }
109
110

    #[cfg(target_arch = "aarch64")]
111
    // TODO: What will hapen if some things need to be loaded on the stack?
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
    // TODO: Should we save XR (X8, the indirect locations result register)
    // (NOTE: Any changes to here need to be reflected in swap_to_mu_stack)
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::aarch64;

        let mut gpr_used = vec![];
        let mut fpr_used = vec![];

        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();

            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
            }
        }

133
        // Ar these
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
        let mut stack_ptr = self.sp;
        for i in 0..aarch64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };

            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }

        for i in 0..aarch64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };

            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }

        // save it back
        self.sp = stack_ptr;

        self.print_stack(Some(20));
    }

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
    #[cfg(target_arch = "x86_64")]
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::x86_64;
        
        let mut gpr_used = vec![];
        let mut fpr_used = vec![];
        
        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();
            
            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
            }
        }
        
        let mut stack_ptr = self.sp;
        for i in 0..x86_64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
        
        for i in 0..x86_64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
qinsoon's avatar
qinsoon committed
217

218
219
        // save it back
        self.sp = stack_ptr;
220
221
222
223
224
225
226
227
228
229
230
        
        self.print_stack(Some(20));
    }
    
    pub fn print_stack(&self, n_entries: Option<usize>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        
        let mut cursor = self.upper_bound.sub(WORD_SIZE);
        let mut count = 0;
        
qinsoon's avatar
qinsoon committed
231
        debug!("0x{:x} | UPPER_BOUND", self.upper_bound);
232
233
234
235
        while cursor >= self.lower_bound {
            let val = unsafe{cursor.load::<Word>()};
            
            if cursor == self.sp {
qinsoon's avatar
qinsoon committed
236
237
238
                debug!("0x{:x} | 0x{:x} ({}) <- SP", cursor, val, val);
            } else {
                debug!("0x{:x} | 0x{:x} ({})", cursor, val, val);
239
240
241
242
243
244
            }
            
            cursor = cursor.sub(WORD_SIZE);
            count += 1;
            
            if n_entries.is_some() && count > n_entries.unwrap() {
qinsoon's avatar
qinsoon committed
245
                debug!("...");
246
247
248
249
                break;
            }
        }
        
qinsoon's avatar
qinsoon committed
250
        debug!("0x{:x} | LOWER_BOUND", self.lower_bound);
251
    }
qinsoon's avatar
qinsoon committed
252
253
254
255

    pub fn print_backtrace(&self) {

    }
qinsoon's avatar
qinsoon committed
256
257
}

qinsoon's avatar
qinsoon committed
258
259
260
261
262
263
pub enum MuStackState {
    Ready(Vec<P<MuType>>), // ready to resume when values of given types are supplied (can be empty)
    Active,
    Dead
}

264
#[repr(C)]
qinsoon's avatar
qinsoon committed
265
pub struct MuThread {
qinsoon's avatar
qinsoon committed
266
    pub hdr: MuEntityHeader,
267
    pub allocator: mm::Mutator,
qinsoon's avatar
qinsoon committed
268
    pub stack: Option<Box<MuStack>>,
qinsoon's avatar
qinsoon committed
269
    
270
271
272
273
274
    pub native_sp_loc: Address,
    pub user_tls: Address, // can be zero

    pub exception_obj: Address,
    pub vm: Arc<VM>
qinsoon's avatar
qinsoon committed
275
276
}

277
278
// this depends on the layout of MuThread
lazy_static! {
279
280
281
282
    pub static ref ALLOCATOR_OFFSET     : usize = offset_of!(MuThread=>allocator).get_byte_offset();
    pub static ref NATIVE_SP_LOC_OFFSET : usize = offset_of!(MuThread=>native_sp_loc).get_byte_offset();
    pub static ref USER_TLS_OFFSET      : usize = offset_of!(MuThread=>user_tls).get_byte_offset();
    pub static ref EXCEPTION_OBJ_OFFSET : usize = offset_of!(MuThread=>exception_obj).get_byte_offset();
283
284
285
286
287
288
289
290
291
292
293
294
295
296
}

impl fmt::Display for MuThread {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "MuThread    @{:?}: {}\n", self as *const MuThread, self.hdr).unwrap();
        write!(f, "- header    @{:?}\n",       &self.hdr as *const MuEntityHeader).unwrap();
        write!(f, "- allocator @{:?}\n",       &self.allocator as *const mm::Mutator).unwrap();
        write!(f, "- stack     @{:?}: {}\n", &self.stack as *const Option<Box<MuStack>>, self.stack.is_some()).unwrap();
        write!(f, "- native sp @{:?}: {}\n", &self.native_sp_loc as *const Address, self.native_sp_loc).unwrap();
        write!(f, "- user_tls  @{:?}: {}\n", &self.user_tls as *const Address, self.user_tls).unwrap();
        write!(f, "- exc obj   @{:?}: {}\n", &self.exception_obj as *const Address, self.exception_obj).unwrap();

        Ok(())
    }
297
298
}

299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
#[cfg(target_arch = "aarch64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
    fn fake_swap_mu_thread(old_sp_loc: Address);
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
    pub fn get_current_frame_rbp() -> Address;
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
}

#[cfg(target_arch = "aarch64")]
#[cfg(any(target_os = "macos", target_os = "linux"))]
#[link(name = "runtime")]
#[allow(improper_ctypes)]
extern "C" {
    pub fn set_thread_local(thread: *mut MuThread);
    pub fn muentry_get_thread_local() -> Address;
}

319
#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
320
#[cfg(any(target_os = "macos", target_os = "linux"))]
321
322
#[link(name = "runtime")]
extern "C" {
qinsoon's avatar
qinsoon committed
323
    pub fn set_thread_local(thread: *mut MuThread);
qinsoon's avatar
qinsoon committed
324
    pub fn muentry_get_thread_local() -> Address;
325
326
327
}

#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
328
#[cfg(any(target_os = "macos", target_os = "linux"))]
329
330
331
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
332
    fn fake_swap_mu_thread(old_sp_loc: Address);
qinsoon's avatar
qinsoon committed
333
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
qinsoon's avatar
qinsoon committed
334
    pub fn get_current_frame_rbp() -> Address;
qinsoon's avatar
qinsoon committed
335
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
336
337
}

qinsoon's avatar
qinsoon committed
338
impl MuThread {
339
    pub fn new(id: MuID, allocator: mm::Mutator, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> MuThread {
qinsoon's avatar
qinsoon committed
340
341
342
343
        MuThread {
            hdr: MuEntityHeader::unnamed(id),
            allocator: allocator,
            stack: Some(stack),
344
            native_sp_loc: unsafe {Address::zero()},
345
346
347
            user_tls: user_tls,
            vm: vm,
            exception_obj: unsafe {Address::zero()}
qinsoon's avatar
qinsoon committed
348
349
        }
    }
qinsoon's avatar
qinsoon committed
350
351
352
353
354

    #[inline(always)]
    pub fn has_current() -> bool {
        ! unsafe {muentry_get_thread_local()}.is_zero()
    }
qinsoon's avatar
qinsoon committed
355
    
qinsoon's avatar
qinsoon committed
356
    #[inline(always)]
357
358
    pub fn current() -> &'static MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
359
            muentry_get_thread_local().to_ptr::<MuThread>().as_ref().unwrap()
qinsoon's avatar
qinsoon committed
360
361
362
        }
    }
    
qinsoon's avatar
qinsoon committed
363
    #[inline(always)]
364
365
    pub fn current_mut() -> &'static mut MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
366
            muentry_get_thread_local().to_ptr_mut::<MuThread>().as_mut().unwrap()
367
368
        }
    }
369

qinsoon's avatar
qinsoon committed
370
371
372
    #[allow(unused_unsafe)]
    // pieces of this function are not safe (we want to mark it unsafe)
    // this function is exposed as unsafe because it is not always safe to call it
373
374
375
    /// returns true if we have created MuThread on this call
    /// (false means we had MuThread for current thread before)
    pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) -> bool {
376
377
        use std::usize;

378
379
        if ! unsafe{muentry_get_thread_local()}.is_zero() {
            warn!("current thread has a thread local (has a muthread to it)");
380
            return false;
381
382
        }

383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
        // fake a stack for current thread
        let fake_mu_stack_for_cur = Box::new(MuStack {
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            func: None,

            state: MuStackState::Active,

            // we do not know anything about current stack
            // treat it as max size
            size: usize::MAX,
            overflow_guard: unsafe {Address::zero()},
            lower_bound: unsafe {Address::zero()},
            upper_bound: unsafe {Address::max()},
            underflow_guard: unsafe {Address::max()},

            // these will only be used when stack is not active (we still save to these fields)
            // their values do not matter now
            sp: unsafe {Address::zero()},
            bp: unsafe {Address::zero()},
            ip: unsafe {Address::zero()},

            // we are not responsible for keeping the memory alive
            mmap: None,
        });

        let fake_mu_thread = MuThread {
            hdr: MuEntityHeader::unnamed(vm.next_id()),

            // valid allocator and stack
            allocator: mm::new_mutator(),
            stack: Some(fake_mu_stack_for_cur),

            // we do not need native_sp_loc (we do not expect the thread to call
            native_sp_loc: unsafe {Address::zero()},
417
            user_tls: threadlocal,
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433

            vm: vm,
            exception_obj: unsafe {Address::zero()}
        };

        let ptr_fake_mu_thread : *mut MuThread = Box::into_raw(Box::new(fake_mu_thread));

        // set thread local
        unsafe {set_thread_local(ptr_fake_mu_thread)};

//        let addr = unsafe {muentry_get_thread_local()};
//        let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
//
//        unsafe {
//            fake_swap_mu_thread(sp_threadlocal_loc);
//        }
434
435

        true
436
    }
437
438

    /// turn this current mu thread back as normal thread
439
    #[allow(unused_variables)]
440
    pub unsafe fn cleanup_current_mu_thread() {
441
        let mu_thread_addr = muentry_get_thread_local();
442
443
444
445
446

        if !mu_thread_addr.is_zero() {
            let mu_thread : *mut MuThread = mu_thread_addr.to_ptr_mut();
            mm::drop_mutator(&mut (*mu_thread).allocator as *mut mm::Mutator);

447
            let mu_thread : Box<MuThread> = Box::from_raw(mu_thread);
448

qinsoon's avatar
qinsoon committed
449
450
451
            // set thread local to zero
            set_thread_local(ptr::null_mut())

452
453
454
            // drop mu_thread here
        }
    }
455
456
457
458
459
    
    pub fn new_thread_normal(mut stack: Box<MuStack>, threadlocal: Address, vals: Vec<ValueLocation>, vm: Arc<VM>) -> JoinHandle<()> {
        // set up arguments on stack
        stack.runtime_load_args(vals);
        
460
        MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm)
461
    }
462
    
463
    #[no_mangle]
qinsoon's avatar
qinsoon committed
464
    #[allow(unused_variables)]
465
    pub extern fn mu_thread_launch(id: MuID, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> JoinHandle<()> {
466
        let new_sp = stack.sp;
467
        let entry = runtime::resolve_symbol(vm.name_of(stack.func.as_ref().unwrap().1));
468
469
        debug!("entry : 0x{:x}", entry);
        
qinsoon's avatar
qinsoon committed
470
        match thread::Builder::new().name(format!("Mu Thread #{}", id)).spawn(move || {
471
            let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
472
473
            
            // set thread local
474
475
            unsafe {set_thread_local(muthread)};
            
qinsoon's avatar
qinsoon committed
476
            let addr = unsafe {muentry_get_thread_local()};
477
            let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
478
479
480
            
            debug!("new sp: 0x{:x}", new_sp);
            debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
481
            
482
483
484
            unsafe {
                swap_to_mu_stack(new_sp, entry, sp_threadlocal_loc); 
            }
485
486
            
            debug!("returned to Rust stack. Going to quit");
qinsoon's avatar
qinsoon committed
487
488
489
490
491
        }) {
            Ok(handle) => handle,
            Err(_) => panic!("failed to create a thread")
        }
    }
492
493
}

qinsoon's avatar
qinsoon committed
494
#[derive(Debug, RustcEncodable, RustcDecodable)]
495
496
pub struct MuPrimordialThread {
    pub func_id: MuID,
qinsoon's avatar
qinsoon committed
497
498

    pub has_const_args: bool,
499
    pub args: Vec<Constant>
Kunshan Wang's avatar
Kunshan Wang committed
500
}