To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

thread.rs 12 KB
Newer Older
qinsoon's avatar
qinsoon committed
1
#![allow(dead_code)]
qinsoon's avatar
qinsoon committed
2
3

use ast::ir::*;
qinsoon's avatar
qinsoon committed
4
5
6
use ast::ptr::*;
use ast::types::*;
use vm::VM;
7
use runtime;
8
use runtime::ValueLocation;
9
use runtime::mm;
qinsoon's avatar
qinsoon committed
10

qinsoon's avatar
qinsoon committed
11
use utils::ByteSize;
qinsoon's avatar
qinsoon committed
12
use utils::Address;
qinsoon's avatar
qinsoon committed
13
use utils::Word;
qinsoon's avatar
qinsoon committed
14
15
16
use utils::mem::memmap;
use utils::mem::memsec;

17
use std::mem;
qinsoon's avatar
qinsoon committed
18
19
use std::thread;
use std::thread::JoinHandle;
20
use std::sync::Arc;
qinsoon's avatar
qinsoon committed
21
22
23
24
25
26

pub const STACK_SIZE : ByteSize = (4 << 20); // 4mb

#[cfg(target_arch = "x86_64")]
pub const PAGE_SIZE  : ByteSize = (4 << 10); // 4kb

qinsoon's avatar
qinsoon committed
27
28
29
impl_mu_entity!(MuThread);
impl_mu_entity!(MuStack);

30
#[repr(C)]
qinsoon's avatar
qinsoon committed
31
pub struct MuStack {
qinsoon's avatar
qinsoon committed
32
    pub hdr: MuEntityHeader,
33
34
35

    // address, id
    func: Option<(ValueLocation, MuID)>,
qinsoon's avatar
qinsoon committed
36
    
qinsoon's avatar
qinsoon committed
37
    size: ByteSize,
qinsoon's avatar
qinsoon committed
38
39
40
41
42
43
44
45
46
47
    //    lo addr                                                    hi addr
    //     | overflow guard page | actual stack ..................... | underflow guard page|
    //     |                     |                                    |                     |
    // overflowGuard           lowerBound                           upperBound
    //                                                              underflowGuard    
    overflow_guard : Address,
    lower_bound    : Address,
    upper_bound    : Address,
    underflow_guard: Address,
    
qinsoon's avatar
qinsoon committed
48
49
50
51
52
    // this frame pointers should only be used when stack is not active
    sp : Address,
    bp : Address,
    ip : Address,
    
53
    state: MuStackState,
qinsoon's avatar
qinsoon committed
54
    #[allow(dead_code)]
55
    mmap           : Option<memmap::Mmap>
qinsoon's avatar
qinsoon committed
56
57
58
}

impl MuStack {
59
    pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
qinsoon's avatar
qinsoon committed
60
61
62
63
64
65
66
        let total_size = PAGE_SIZE * 2 + STACK_SIZE;
        
        let anon_mmap = match memmap::Mmap::anonymous(total_size, memmap::Protection::ReadWrite) {
            Ok(m) => m,
            Err(_) => panic!("failed to mmap for a stack"),
        };
        
qinsoon's avatar
qinsoon committed
67
68
69
70
71
72
73
74
75
76
77
78
        let mmap_start = Address::from_ptr(anon_mmap.ptr());
        debug_assert!(mmap_start.is_aligned_to(PAGE_SIZE));
        
        let overflow_guard = mmap_start;
        let lower_bound = mmap_start.plus(PAGE_SIZE);
        let upper_bound = lower_bound.plus(STACK_SIZE);
        let underflow_guard = upper_bound;
        
        unsafe {
            memsec::mprotect(overflow_guard.to_ptr_mut::<u8>(),  PAGE_SIZE, memsec::Prot::NoAccess);
            memsec::mprotect(underflow_guard.to_ptr_mut::<u8>(), PAGE_SIZE, memsec::Prot::NoAccess);
        }
qinsoon's avatar
qinsoon committed
79
        
qinsoon's avatar
qinsoon committed
80
81
82
83
84
85
86
87
        debug!("creating stack {} with entry func {:?}", id, func);
        debug!("overflow_guard : {}", overflow_guard);
        debug!("lower_bound    : {}", lower_bound);
        debug!("upper_bound    : {}", upper_bound);
        debug!("underflow_guard: {}", underflow_guard);
        
        MuStack {
            hdr: MuEntityHeader::unnamed(id),
88
            func: Some((func_addr, func.id())),
qinsoon's avatar
qinsoon committed
89
90
91
92
93
94
95
96
97
98
99
100
101
            
            state: MuStackState::Ready(func.sig.arg_tys.clone()),
            
            size: STACK_SIZE,
            overflow_guard: overflow_guard,
            lower_bound: lower_bound,
            upper_bound: upper_bound,
            underflow_guard: upper_bound,
            
            sp: upper_bound,
            bp: upper_bound,
            ip: unsafe {Address::zero()},
            
102
            mmap: Some(anon_mmap)
qinsoon's avatar
qinsoon committed
103
        }
qinsoon's avatar
qinsoon committed
104
    }
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
    
    #[cfg(target_arch = "x86_64")]
    pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        use compiler::backend::RegGroup;
        use compiler::backend::x86_64;
        
        let mut gpr_used = vec![];
        let mut fpr_used = vec![];
        
        for i in 0..vals.len() {
            let ref val = vals[i];
            let (reg_group, word) = val.load_value();
            
            match reg_group {
                RegGroup::GPR => gpr_used.push(word),
                RegGroup::FPR => fpr_used.push(word),
            }
        }
        
        let mut stack_ptr = self.sp;
        for i in 0..x86_64::ARGUMENT_FPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < fpr_used.len() {
                    fpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
        
        for i in 0..x86_64::ARGUMENT_GPRs.len() {
            stack_ptr = stack_ptr.sub(WORD_SIZE);
            let val = {
                if i < gpr_used.len() {
                    gpr_used[i]
                } else {
                    0 as Word
                }
            };
            
            debug!("store {} to {}", val, stack_ptr);
            unsafe {stack_ptr.store(val);}
        }
qinsoon's avatar
qinsoon committed
154

155
156
        // save it back
        self.sp = stack_ptr;
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
        
        self.print_stack(Some(20));
    }
    
    pub fn print_stack(&self, n_entries: Option<usize>) {
        use compiler::backend::Word;
        use compiler::backend::WORD_SIZE;
        
        let mut cursor = self.upper_bound.sub(WORD_SIZE);
        let mut count = 0;
        
        println!("0x{:x} | UPPER_BOUND", self.upper_bound); 
        while cursor >= self.lower_bound {
            let val = unsafe{cursor.load::<Word>()};
            print!("0x{:x} | 0x{:x} ({})", cursor, val, val);
            
            if cursor == self.sp {
                print!(" <- SP");
            }
            
            println!("");
            
            cursor = cursor.sub(WORD_SIZE);
            count += 1;
            
            if n_entries.is_some() && count > n_entries.unwrap() {
                println!("...");
                break;
            }
        }
        
        println!("0x{:x} | LOWER_BOUND", self.lower_bound); 
189
    }
qinsoon's avatar
qinsoon committed
190
191
}

qinsoon's avatar
qinsoon committed
192
193
194
195
196
197
pub enum MuStackState {
    Ready(Vec<P<MuType>>), // ready to resume when values of given types are supplied (can be empty)
    Active,
    Dead
}

198
#[repr(C)]
199
#[allow(improper_ctypes)]
200
// do not change the layout (unless change the offset of fields correspondingly)
qinsoon's avatar
qinsoon committed
201
pub struct MuThread {
qinsoon's avatar
qinsoon committed
202
    pub hdr: MuEntityHeader,
203
    allocator: mm::Mutator,
qinsoon's avatar
qinsoon committed
204
    pub stack: Option<Box<MuStack>>,
qinsoon's avatar
qinsoon committed
205
    
206
    native_sp_loc: Address,
207
    user_tls: Address, // can be zero
208
    
qinsoon's avatar
qinsoon committed
209
210
    pub vm: Arc<VM>,
    pub exception_obj: Address
qinsoon's avatar
qinsoon committed
211
212
}

213
214
// this depends on the layout of MuThread
lazy_static! {
215
216
217
    pub static ref ALLOCATOR_OFFSET : usize = mem::size_of::<MuEntityHeader>();

    pub static ref NATIVE_SP_LOC_OFFSET : usize = *ALLOCATOR_OFFSET
218
219
                + mem::size_of::<Box<mm::Mutator>>()
                + mem::size_of::<Option<Box<MuStack>>>();
220
221

    pub static ref USER_TLS_OFFSET : usize = *NATIVE_SP_LOC_OFFSET + mem::size_of::<Address>();
qinsoon's avatar
qinsoon committed
222
    
223
    pub static ref VM_OFFSET : usize = *USER_TLS_OFFSET + mem::size_of::<Address>();
qinsoon's avatar
qinsoon committed
224
225

    pub static ref EXCEPTION_OBJ_OFFSET : usize = *VM_OFFSET + mem::size_of::<Arc<VM>>();                
226
227
}

228
#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
229
#[cfg(any(target_os = "macos", target_os = "linux"))]
230
231
#[link(name = "runtime")]
extern "C" {
qinsoon's avatar
qinsoon committed
232
    pub fn set_thread_local(thread: *mut MuThread);
qinsoon's avatar
qinsoon committed
233
    pub fn muentry_get_thread_local() -> Address;
234
235
236
}

#[cfg(target_arch = "x86_64")]
Kunshan Wang's avatar
Kunshan Wang committed
237
#[cfg(any(target_os = "macos", target_os = "linux"))]
238
239
240
#[link(name = "swap_stack")]
extern "C" {
    fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
241
    fn fake_swap_mu_thread(old_sp_loc: Address);
qinsoon's avatar
qinsoon committed
242
    fn muentry_swap_back_to_native_stack(sp_loc: Address);
qinsoon's avatar
qinsoon committed
243
    pub fn get_current_frame_rbp() -> Address;
qinsoon's avatar
qinsoon committed
244
    pub fn exception_restore(dest: Address, callee_saved: *const Word, rsp: Address) -> !;
245
246
}

qinsoon's avatar
qinsoon committed
247
impl MuThread {
248
    pub fn new(id: MuID, allocator: mm::Mutator, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> MuThread {
qinsoon's avatar
qinsoon committed
249
250
251
252
        MuThread {
            hdr: MuEntityHeader::unnamed(id),
            allocator: allocator,
            stack: Some(stack),
253
            native_sp_loc: unsafe {Address::zero()},
254
255
256
            user_tls: user_tls,
            vm: vm,
            exception_obj: unsafe {Address::zero()}
qinsoon's avatar
qinsoon committed
257
258
        }
    }
qinsoon's avatar
qinsoon committed
259
    
qinsoon's avatar
qinsoon committed
260
    #[inline(always)]
261
262
    pub fn current() -> &'static MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
263
            muentry_get_thread_local().to_ptr::<MuThread>().as_ref().unwrap()
qinsoon's avatar
qinsoon committed
264
265
266
        }
    }
    
qinsoon's avatar
qinsoon committed
267
    #[inline(always)]
268
269
    pub fn current_mut() -> &'static mut MuThread {
        unsafe{
qinsoon's avatar
qinsoon committed
270
            muentry_get_thread_local().to_ptr_mut::<MuThread>().as_mut().unwrap()
271
272
        }
    }
273

qinsoon's avatar
qinsoon committed
274
275
276
    #[allow(unused_unsafe)]
    // pieces of this function are not safe (we want to mark it unsafe)
    // this function is exposed as unsafe because it is not always safe to call it
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
    pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) {
        use std::usize;

        // fake a stack for current thread
        let fake_mu_stack_for_cur = Box::new(MuStack {
            hdr: MuEntityHeader::unnamed(vm.next_id()),
            func: None,

            state: MuStackState::Active,

            // we do not know anything about current stack
            // treat it as max size
            size: usize::MAX,
            overflow_guard: unsafe {Address::zero()},
            lower_bound: unsafe {Address::zero()},
            upper_bound: unsafe {Address::max()},
            underflow_guard: unsafe {Address::max()},

            // these will only be used when stack is not active (we still save to these fields)
            // their values do not matter now
            sp: unsafe {Address::zero()},
            bp: unsafe {Address::zero()},
            ip: unsafe {Address::zero()},

            // we are not responsible for keeping the memory alive
            mmap: None,
        });

        let fake_mu_thread = MuThread {
            hdr: MuEntityHeader::unnamed(vm.next_id()),

            // valid allocator and stack
            allocator: mm::new_mutator(),
            stack: Some(fake_mu_stack_for_cur),

            // we do not need native_sp_loc (we do not expect the thread to call
            native_sp_loc: unsafe {Address::zero()},
314
            user_tls: threadlocal,
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

            vm: vm,
            exception_obj: unsafe {Address::zero()}
        };

        let ptr_fake_mu_thread : *mut MuThread = Box::into_raw(Box::new(fake_mu_thread));

        // set thread local
        unsafe {set_thread_local(ptr_fake_mu_thread)};

//        let addr = unsafe {muentry_get_thread_local()};
//        let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
//
//        unsafe {
//            fake_swap_mu_thread(sp_threadlocal_loc);
//        }

        debug!("returned to Rust thread (fake muthread)");
    }
334
335
336
337
338
    
    pub fn new_thread_normal(mut stack: Box<MuStack>, threadlocal: Address, vals: Vec<ValueLocation>, vm: Arc<VM>) -> JoinHandle<()> {
        // set up arguments on stack
        stack.runtime_load_args(vals);
        
339
        MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm)
340
    }
341
    
342
    #[no_mangle]
qinsoon's avatar
qinsoon committed
343
    #[allow(unused_variables)]
344
    pub extern fn mu_thread_launch(id: MuID, stack: Box<MuStack>, user_tls: Address, vm: Arc<VM>) -> JoinHandle<()> {
345
        let new_sp = stack.sp;
346
        let entry = runtime::resolve_symbol(vm.name_of(stack.func.as_ref().unwrap().1));
347
348
        debug!("entry : 0x{:x}", entry);
        
qinsoon's avatar
qinsoon committed
349
        match thread::Builder::new().name(format!("Mu Thread #{}", id)).spawn(move || {
350
            let muthread : *mut MuThread = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
351
352
            
            // set thread local
353
354
            unsafe {set_thread_local(muthread)};
            
qinsoon's avatar
qinsoon committed
355
            let addr = unsafe {muentry_get_thread_local()};
356
            let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
357
358
359
            
            debug!("new sp: 0x{:x}", new_sp);
            debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
360
            
361
362
363
            unsafe {
                swap_to_mu_stack(new_sp, entry, sp_threadlocal_loc); 
            }
364
365
            
            debug!("returned to Rust stack. Going to quit");
qinsoon's avatar
qinsoon committed
366
367
368
369
370
        }) {
            Ok(handle) => handle,
            Err(_) => panic!("failed to create a thread")
        }
    }
371
372
}

qinsoon's avatar
qinsoon committed
373
#[derive(Debug, RustcEncodable, RustcDecodable)]
374
375
376
pub struct MuPrimordialThread {
    pub func_id: MuID,
    pub args: Vec<Constant>
Kunshan Wang's avatar
Kunshan Wang committed
377
}