vm.rs 67.7 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
// Copyright 2017 The Australian National University
2
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
3
4
5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
9
10
11
12
13
14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
16
use std::collections::HashMap;

17
use rodal;
qinsoon's avatar
qinsoon committed
18
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
19
use ast::ir::*;
20
use ast::inst::*;
qinsoon's avatar
qinsoon committed
21
use ast::types;
qinsoon's avatar
qinsoon committed
22
use ast::types::*;
qinsoon's avatar
qinsoon committed
23
use compiler::{Compiler, CompilerPolicy};
qinsoon's avatar
qinsoon committed
24
use compiler::backend;
qinsoon's avatar
qinsoon committed
25
use compiler::backend::BackendType;
26
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
27

28
use runtime::thread::*;
29
use runtime::*;
qinsoon's avatar
qinsoon committed
30
use utils::ByteSize;
31
use utils::BitSize;
32
use utils::Address;
33
use runtime::mm as gc;
34
use vm::handle::*;
qinsoon's avatar
qinsoon committed
35
36
use vm::vm_options::VMOptions;
use vm::vm_options::MuLogLevel;
37

qinsoon's avatar
qinsoon committed
38
use log::LogLevel;
39
use std::sync::Arc;
qinsoon's avatar
qinsoon committed
40
use std::sync::RwLock;
41
use std::sync::Mutex;
42
use std::sync::RwLockWriteGuard;
43
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
44
45
use std::thread::JoinHandle;
use std::collections::LinkedList;
46
47
use std;
use utils::bit_utils::{bits_ones, u64_asr};
qinsoon's avatar
qinsoon committed
48

qinsoon's avatar
qinsoon committed
49
50
51
/// The VM struct. This stores metadata for the currently running Zebu instance.
/// This struct gets persisted in the boot image, and when the boot image is loaded,
/// everything should be back to the same status as before persisting.
qinsoon's avatar
[wip]    
qinsoon committed
52
///
qinsoon's avatar
qinsoon committed
53
54
55
/// This struct is usually used as Arc<VM> so it can be shared among threads. The
/// Arc<VM> is stored in every thread local of a Mu thread, so that they can refer
/// to the VM easily.
qinsoon's avatar
[wip]    
qinsoon committed
56
///
qinsoon's avatar
qinsoon committed
57
58
59
60
/// We are using fine-grained lock on VM to allow mutability on different fields in VM.
/// Also we use two-level locks for some data structures such as MuFunction/
/// MuFunctionVersion/CompiledFunction so that we can mutate on two
/// different functions/funcvers/etc at the same time.
qinsoon's avatar
[wip]    
qinsoon committed
61
62
63
64

//  FIXME: However, there are problems with fine-grained lock design,
//  and we will need to rethink. See Issue #2.
//  TODO: besides fields in VM, there are some 'globals' we need to persist
qinsoon's avatar
qinsoon committed
65
66
67
68
//  such as STRUCT_TAG_MAP, INTERNAL_ID and internal types from ir crate. The point is
//  ir crate should be independent and self-contained. But when persisting the 'world',
//  besides persisting VM struct (containing most of the 'world'), we also need to
//  specifically persist those globals.
69
70
pub struct VM {
    // The comments are the offset into the struct
71
    // ---serialize---
qinsoon's avatar
[wip]    
qinsoon committed
72
    /// next MuID to assign
73
    next_id: AtomicUsize, // +0
qinsoon's avatar
[wip]    
qinsoon committed
74
    /// a map from MuID to MuName (for client to query)
75
    id_name_map: RwLock<HashMap<MuID, MuName>>, // +8
qinsoon's avatar
[wip]    
qinsoon committed
76
    /// a map from MuName to ID (for client to query)
77
    name_id_map: RwLock<HashMap<MuName, MuID>>, // +64
qinsoon's avatar
[wip]    
qinsoon committed
78
    /// types declared to the VM
79
    types: RwLock<HashMap<MuID, P<MuType>>>, // +120
80
81
    /// Ref types declared by 'make_strong_type', the key is the ID of the Referant
    ref_types: RwLock<HashMap<MuID, P<MuType>>>,
qinsoon's avatar
[wip]    
qinsoon committed
82
    /// types that are resolved as BackendType
83
    backend_type_info: RwLock<HashMap<MuID, Box<BackendType>>>,
qinsoon's avatar
[wip]    
qinsoon committed
84
    /// constants declared to the VM
85
    constants: RwLock<HashMap<MuID, P<Value>>>,
qinsoon's avatar
[wip]    
qinsoon committed
86
    /// globals declared to the VM
87
    globals: RwLock<HashMap<MuID, P<Value>>>,
qinsoon's avatar
[wip]    
qinsoon committed
88
    /// function signatures declared
89
    func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>,
qinsoon's avatar
[wip]    
qinsoon committed
90
    /// functions declared to the VM
91
    funcs: RwLock<HashMap<MuID, RwLock<MuFunction>>>,
qinsoon's avatar
[wip]    
qinsoon committed
92
    /// primordial function that is set to make boot image
93
    primordial: RwLock<Option<PrimordialThreadInfo>>,
94

qinsoon's avatar
[wip]    
qinsoon committed
95
    /// current options for this VM
96
    pub vm_options: VMOptions, // +624
97

98
    // ---partially serialize---
qinsoon's avatar
[wip]    
qinsoon committed
99
100
    /// compiled functions
    /// (we are not persisting generated code with compiled function)
101
    compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
102

103
104
    /// match each functions version to a map, mapping each of it's containing callsites
    /// to the name of the catch block
105
    callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
106

107
    // ---do not serialize---
qinsoon's avatar
vm.rs    
qinsoon committed
108
109
110
    /// global cell locations. We use this map to create handles for global cells,
    /// or dump globals into boot image. (this map does not get persisted because
    /// the location is changed in different runs)
111
    global_locations: RwLock<HashMap<MuID, ValueLocation>>,
112
    func_vers: RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>>,
113

qinsoon's avatar
[wip]    
qinsoon committed
114
115
116
117
    /// all the funcref that clients want to store for AOT which are pending stores
    /// For AOT scenario, when client tries to store funcref to the heap, the store
    /// happens before we have an actual address for the function so we store a fake
    /// funcref and when generating boot image, we fix the funcref with a relocatable symbol
118
119
    aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,

120
121
    /// runtime callsite table for exception handling
    /// a map from callsite address to CompiledCallsite
122
    compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>>, // 896
123
124

    /// Nnmber of callsites in the callsite tables
125
    callsite_count: AtomicUsize,
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
126
127
128

    /// A list of all threads currently waiting to be joined
    pub pending_joins: Mutex<LinkedList<JoinHandle<()>>>
129
}
qinsoon's avatar
vm.rs    
qinsoon committed
130

131
unsafe impl rodal::Dump for VM {
132
    fn dump<D: ?Sized + rodal::Dumper>(&self, dumper: &mut D) {
133
134
135
136
137
138
        dumper.debug_record("VM", "dump");

        dumper.dump_object(&self.next_id);
        dumper.dump_object(&self.id_name_map);
        dumper.dump_object(&self.name_id_map);
        dumper.dump_object(&self.types);
139
        dumper.dump_object(&self.ref_types);
140
141
142
143
144
145
146
147
        dumper.dump_object(&self.backend_type_info);
        dumper.dump_object(&self.constants);
        dumper.dump_object(&self.globals);
        dumper.dump_object(&self.func_sigs);
        dumper.dump_object(&self.funcs);
        dumper.dump_object(&self.primordial);
        dumper.dump_object(&self.vm_options);
        dumper.dump_object(&self.compiled_funcs);
148
        dumper.dump_object(&self.callsite_table);
149

150
151
        // Dump empty maps so that we can safely read and modify them once loaded
        dumper.dump_padding(&self.global_locations);
152
153
        let global_locations = RwLock::new(rodal::EmptyHashMap::<MuID, ValueLocation>::new());
        dumper.dump_object_here(&global_locations);
154
155

        dumper.dump_padding(&self.func_vers);
156
        let func_vers = RwLock::new(
157
            rodal::EmptyHashMap::<MuID, RwLock<MuFunctionVersion>>::new()
158
159
        );
        dumper.dump_object_here(&func_vers);
160
161

        dumper.dump_padding(&self.aot_pending_funcref_store);
162
163
164
        let aot_pending_funcref_store =
            RwLock::new(rodal::EmptyHashMap::<Address, ValueLocation>::new());
        dumper.dump_object_here(&aot_pending_funcref_store);
165

166
        dumper.dump_padding(&self.compiled_callsite_table);
167
168
169
170
        let compiled_callsite_table =
            RwLock::new(rodal::EmptyHashMap::<Address, CompiledCallsite>::new());
        dumper.dump_object_here(&compiled_callsite_table);

171
        dumper.dump_object(&self.callsite_count);
172
173

        dumper.dump_padding(&self.pending_joins);
174
175
        let pending_joins = Mutex::new(rodal::EmptyLinkedList::<JoinHandle<()>>::new());
        dumper.dump_object_here(&pending_joins);
176
    }
177
}
178

qinsoon's avatar
[wip]    
qinsoon committed
179
180
181
182
/// a fake funcref to store for AOT when client tries to store a funcref via API
//  For AOT scenario, when client tries to store funcref to the heap, the store
//  happens before we have an actual address for the function so we store a fake
//  funcref and when generating boot image, we fix the funcref with a relocatable symbol
183
const PENDING_FUNCREF: u64 = {
qinsoon's avatar
[wip]    
qinsoon committed
184
185
186
    use std::u64;
    u64::MAX
};
187

qinsoon's avatar
[wip]    
qinsoon committed
188
/// a macro to generate int8/16/32/64 from/to API calls
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
macro_rules! gen_handle_int {
    ($fn_from: ident, $fn_to: ident, $int_ty: ty) => {
        pub fn $fn_from (&self, num: $int_ty, len: BitSize) -> APIHandleResult {
            let handle_id = self.next_id();
            self.new_handle (APIHandle {
                id: handle_id,
                v: APIHandleValue::Int(num as u64, len)
            })
        }

        pub fn $fn_to (&self, handle: APIHandleArg) -> $int_ty {
            handle.v.as_int() as $int_ty
        }
    }
}

205
206
207
208
209
210
impl Drop for VM {
    fn drop(&mut self) {
        self.destroy();
    }
}

211
impl<'a> VM {
qinsoon's avatar
[wip]    
qinsoon committed
212
    /// creates a VM with default options
qinsoon's avatar
qinsoon committed
213
    pub fn new() -> VM {
qinsoon's avatar
qinsoon committed
214
215
216
        VM::new_internal(VMOptions::default())
    }

qinsoon's avatar
[wip]    
qinsoon committed
217
    /// creates a VM with specified options
qinsoon's avatar
qinsoon committed
218
219
220
221
    pub fn new_with_opts(str: &str) -> VM {
        VM::new_internal(VMOptions::init(str))
    }

qinsoon's avatar
[wip]    
qinsoon committed
222
    /// internal function to create a VM with options
223
    #[cfg(not(feature = "sel4-rumprun"))]
qinsoon's avatar
qinsoon committed
224
225
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
qinsoon's avatar
qinsoon committed
226

qinsoon's avatar
qinsoon committed
227
        let ret = VM {
228
            next_id: ATOMIC_USIZE_INIT,
qinsoon's avatar
qinsoon committed
229
            vm_options: options,
qinsoon's avatar
qinsoon committed
230
231
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
232
233
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
234
            ref_types: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
235
            backend_type_info: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
236
            globals: RwLock::new(HashMap::new()),
237
            global_locations: RwLock::new(hashmap!{}),
qinsoon's avatar
qinsoon committed
238
            func_sigs: RwLock::new(HashMap::new()),
239
            func_vers: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
240
            funcs: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
241
            compiled_funcs: RwLock::new(HashMap::new()),
242
            callsite_table: RwLock::new(HashMap::new()),
243
            primordial: RwLock::new(None),
244
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
245
            compiled_callsite_table: RwLock::new(HashMap::new()),
246
            callsite_count: ATOMIC_USIZE_INIT,
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
247
            pending_joins: Mutex::new(LinkedList::new())
248
        };
qinsoon's avatar
qinsoon committed
249

250
        // insert all internal types
qinsoon's avatar
qinsoon committed
251
252
253
254
255
256
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
qinsoon's avatar
qinsoon committed
257

qinsoon's avatar
[wip]    
qinsoon committed
258
        // starts allocating ID from USER_ID_START
259
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
qinsoon's avatar
qinsoon committed
260

261
262
263
        // init types
        types::init_types();

qinsoon's avatar
[wip]    
qinsoon committed
264
        // init runtime
265
        ret.init_runtime();
qinsoon's avatar
qinsoon committed
266

267
268
        ret
    }
269

270
271
272
273
274
    /// internal function to create a VM with options for sel4-rumprun
    /// default memory sizes are different from other platforms
    #[cfg(feature = "sel4-rumprun")]
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
275

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
        let mut ret = VM {
            next_id: ATOMIC_USIZE_INIT,
            vm_options: options,
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
            backend_type_info: RwLock::new(HashMap::new()),
            globals: RwLock::new(HashMap::new()),
            global_locations: RwLock::new(hashmap!{}),
            func_sigs: RwLock::new(HashMap::new()),
            func_vers: RwLock::new(HashMap::new()),
            funcs: RwLock::new(HashMap::new()),
            compiled_funcs: RwLock::new(HashMap::new()),
            callsite_table: RwLock::new(HashMap::new()),
            primordial: RwLock::new(None),
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
            compiled_callsite_table: RwLock::new(HashMap::new()),
            callsite_count: ATOMIC_USIZE_INIT
        };
296

297
298
        // currently, the default sizes don't work on sel4-rumprun platform
        // this is due to memory allocation size limitations
299
300
301
        ret.vm_options.flag_gc_immixspace_size = 1 << 19;
        ret.vm_options.flag_gc_lospace_size = 1 << 19;

302
303
304
305
306
307
308
        // insert all internal types
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
309

310
311
        // starts allocating ID from USER_ID_START
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
312

313
314
        // init types
        types::init_types();
315

316
317
        // init runtime
        ret.init_runtime();
318

319
320
        ret
    }
qinsoon's avatar
qinsoon committed
321

qinsoon's avatar
[wip]    
qinsoon committed
322
    /// initializes runtime
323
    fn init_runtime(&self) {
qinsoon's avatar
qinsoon committed
324
325
326
        // init gc
        {
            let ref options = self.vm_options;
327
328
329
330
            gc::gc_init(
                options.flag_gc_immixspace_size,
                options.flag_gc_lospace_size,
                options.flag_gc_nthreads,
331
                !options.flag_gc_disable_collection
332
            );
qinsoon's avatar
qinsoon committed
333
334
335
        }
    }

qinsoon's avatar
[wip]    
qinsoon committed
336
    /// starts logging based on MuLogLevel flag
qinsoon's avatar
qinsoon committed
337
    fn start_logging(level: MuLogLevel) {
338
        use std::env;
qinsoon's avatar
qinsoon committed
339
        match level {
340
            MuLogLevel::None => {}
qinsoon's avatar
qinsoon committed
341
            MuLogLevel::Error => VM::start_logging_internal(LogLevel::Error),
342
343
            MuLogLevel::Warn => VM::start_logging_internal(LogLevel::Warn),
            MuLogLevel::Info => VM::start_logging_internal(LogLevel::Info),
qinsoon's avatar
qinsoon committed
344
345
            MuLogLevel::Debug => VM::start_logging_internal(LogLevel::Debug),
            MuLogLevel::Trace => VM::start_logging_internal(LogLevel::Trace),
346
347
348
349
350
            MuLogLevel::Env => {
                match env::var("MU_LOG_LEVEL") {
                    Ok(s) => VM::start_logging(MuLogLevel::from_string(s)),
                    _ => {} // Don't log
                }
351
            }
qinsoon's avatar
qinsoon committed
352
        }
qinsoon's avatar
qinsoon committed
353
354
    }

qinsoon's avatar
[wip]    
qinsoon committed
355
    /// starts trace-level logging
qinsoon's avatar
qinsoon committed
356
    pub fn start_logging_trace() {
qinsoon's avatar
qinsoon committed
357
358
        VM::start_logging_internal(LogLevel::Trace)
    }
qinsoon's avatar
[wip]    
qinsoon committed
359
360

    /// starts logging based on MU_LOG_LEVEL environment variable
361
362
363
    pub fn start_logging_env() {
        VM::start_logging(MuLogLevel::Env)
    }
qinsoon's avatar
qinsoon committed
364

qinsoon's avatar
[wip]    
qinsoon committed
365
366
    /// starts logging based on Rust's LogLevel
    /// (this function actually initializes logger and deals with error)
qinsoon's avatar
qinsoon committed
367
    fn start_logging_internal(level: LogLevel) {
368
369
370
371
        use stderrlog;

        let verbose = match level {
            LogLevel::Error => 0,
372
373
            LogLevel::Warn => 1,
            LogLevel::Info => 2,
374
            LogLevel::Debug => 3,
375
            LogLevel::Trace => 4
376
        };
qinsoon's avatar
qinsoon committed
377

378
        match stderrlog::new().verbosity(verbose).init() {
379
            Ok(()) => { info!("logger initialized") }
380
381
382
383
384
385
            Err(e) => {
                error!(
                    "failed to init logger, probably already initialized: {:?}",
                    e
                )
            }
qinsoon's avatar
qinsoon committed
386
387
        }
    }
388

389
390
391
392
393
    /// cleans up currenet VM
    fn destroy(&mut self) {
        gc::gc_destoy();
    }

qinsoon's avatar
[wip]    
qinsoon committed
394
395
    /// adds an exception callsite and catch block
    /// (later we will use this info to build an exception table for unwinding use)
396
397
    pub fn add_exception_callsite(&self, callsite: Callsite, fv: MuID) {
        let mut table = self.callsite_table.write().unwrap();
398
399

        if table.contains_key(&fv) {
400
            table.get_mut(&fv).unwrap().push(callsite);
401
        } else {
402
            table.insert(fv, vec![callsite]);
403
        };
404
405
        // TODO: do wee need a stronger ordering??
        self.callsite_count.fetch_add(1, Ordering::Relaxed);
406
407
    }

qinsoon's avatar
[wip]    
qinsoon committed
408
409
    /// resumes persisted VM. Ideally the VM should be back to the status when we start
    /// persisting it except a few fields that we do not want to persist.
410
    pub fn resume_vm(dumped_vm: *mut Arc<VM>) -> Arc<VM> {
qinsoon's avatar
[wip]    
qinsoon committed
411
        // load the vm back
412
        let vm = unsafe { rodal::load_asm_pointer_move(dumped_vm) };
qinsoon's avatar
[wip]    
qinsoon committed
413
414

        // initialize runtime
415
        vm.init_runtime();
qinsoon's avatar
qinsoon committed
416

qinsoon's avatar
[wip]    
qinsoon committed
417
        // construct exception table
418
        vm.build_callsite_table();
qinsoon's avatar
[wip]    
qinsoon committed
419

qinsoon's avatar
qinsoon committed
420
421
422
        // restore gc types
        {
            let type_info_guard = vm.backend_type_info.read().unwrap();
423
424
            let mut type_info_vec: Vec<Box<BackendType>> =
                type_info_guard.values().map(|x| x.clone()).collect();
qinsoon's avatar
qinsoon committed
425
426
427
428
429
430
431
432
433
434
435
436
437
438
            type_info_vec.sort_by(|a, b| a.gc_type.id.cmp(&b.gc_type.id));

            let mut expect_id = 0;
            for ty_info in type_info_vec.iter() {
                use runtime::mm;

                let ref gc_type = ty_info.gc_type;

                if gc_type.id != expect_id {
                    debug_assert!(expect_id < gc_type.id);

                    while expect_id < gc_type.id {
                        use runtime::mm::common::gctype::GCType;

qinsoon's avatar
qinsoon committed
439
                        mm::add_gc_type(GCType::new_noreftype(0, 8));
qinsoon's avatar
qinsoon committed
440
441
442
443
444
445
446
447
448
449
450
                        expect_id += 1;
                    }
                }

                // now expect_id == gc_type.id
                debug_assert!(expect_id == gc_type.id);

                mm::add_gc_type(gc_type.as_ref().clone());
                expect_id += 1;
            }
        }
451
        // construct exception table
452
        vm.build_callsite_table();
453
454
        vm
    }
455

qinsoon's avatar
[wip]    
qinsoon committed
456
457
458
459
460
    /// builds a succinct exception table for fast query during exception unwinding
    /// We need this step because for AOT compilation, we do not know symbol address at compile,
    /// and resolving symbol address during exception handling is expensive. Thus when boot image
    /// gets executed, we first resolve symbols and store the results in another table for fast
    /// query.
461
462
    pub fn build_callsite_table(&self) {
        let callsite_table = self.callsite_table.read().unwrap();
463
        let compiled_funcs = self.compiled_funcs.read().unwrap();
464
        let mut compiled_callsite_table = self.compiled_callsite_table.write().unwrap();
465
466
        // TODO: Use a different ordering?
        compiled_callsite_table.reserve(self.callsite_count.load(Ordering::Relaxed));
467
468
469
470
        for (fv, callsite_list) in callsite_table.iter() {
            let compiled_func = compiled_funcs.get(fv).unwrap().read().unwrap();
            let callee_saved_table = Arc::new(compiled_func.frame.callee_saved.clone());
            for callsite in callsite_list.iter() {
471
472
473
474
475
                compiled_callsite_table.insert(
                    resolve_symbol(callsite.name.clone()),
                    CompiledCallsite::new(
                        &callsite,
                        compiled_func.func_ver_id,
476
477
                        callee_saved_table.clone()
                    )
478
                );
479
480
            }
        }
481
    }
qinsoon's avatar
[wip]    
qinsoon committed
482
483

    /// returns a valid ID for use next
484
    pub fn next_id(&self) -> MuID {
Kunshan Wang's avatar
Kunshan Wang committed
485
486
487
        // This only needs to be atomic, and does not need to be a synchronisation operation. The
        // only requirement for IDs is that all IDs obtained from `next_id()` are different. So
        // `Ordering::Relaxed` is sufficient.
488
        self.next_id.fetch_add(1, Ordering::Relaxed)
489
    }
qinsoon's avatar
[wip]    
qinsoon committed
490

491
492
    /// are we doing AOT compilation? (feature = aot when building Zebu)
    pub fn is_doing_aot(&self) -> bool {
493
        return cfg!(feature = "aot");
494
    }
495
496
497

    /// are we doing JIT compilation? (feature = jit when building Zebu)
    pub fn is_doing_jit(&self) -> bool {
498
        return cfg!(feature = "jit");
qinsoon's avatar
qinsoon committed
499
    }
500
501

    /// informs VM about a client-supplied name
502
    pub fn set_name(&self, entity: &MuEntity) {
qinsoon's avatar
qinsoon committed
503
        let id = entity.id();
504
        let name = entity.name();
505

qinsoon's avatar
qinsoon committed
506
        let mut map = self.id_name_map.write().unwrap();
507
        map.insert(id, name.clone());
508

qinsoon's avatar
qinsoon committed
509
510
511
        let mut map2 = self.name_id_map.write().unwrap();
        map2.insert(name, id);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
512
513
514
515
516

    /// returns Mu ID for a client-supplied name
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
    pub fn id_of(&self, name: &str) -> MuID {
qinsoon's avatar
qinsoon committed
517
        let map = self.name_id_map.read().unwrap();
518
        match map.get(&name.to_string()) {
519
            Some(id) => *id,
520
            None => panic!("cannot find id for name: {}", name)
521
        }
Kunshan Wang's avatar
Kunshan Wang committed
522
    }
523

qinsoon's avatar
vm.rs    
qinsoon committed
524
525
526
    /// returns the client-supplied Mu name for Mu ID
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
qinsoon's avatar
qinsoon committed
527
528
    pub fn name_of(&self, id: MuID) -> MuName {
        let map = self.id_name_map.read().unwrap();
529
        map.get(&id).unwrap().clone()
qinsoon's avatar
qinsoon committed
530
    }
qinsoon's avatar
vm.rs    
qinsoon committed
531
532

    /// declares a constant
533
    pub fn declare_const(&self, entity: MuEntityHeader, ty: P<MuType>, val: Constant) -> P<Value> {
534
535
536
        let ret = P(Value {
            hdr: entity,
            ty: ty,
537
            v: Value_::Constant(val)
538
        });
539

qinsoon's avatar
vm.rs    
qinsoon committed
540
        let mut constants = self.constants.write().unwrap();
541
        self.declare_const_internal(&mut constants, ret.id(), ret.clone());
542

qinsoon's avatar
qinsoon committed
543
544
        ret
    }
545

qinsoon's avatar
vm.rs    
qinsoon committed
546
    /// adds a constant to the map (already acquired lock)
547
548
549
550
    fn declare_const_internal(
        &self,
        map: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        id: MuID,
551
        val: P<Value>
552
    ) {
553
554
        debug_assert!(!map.contains_key(&id));

qinsoon's avatar
vm.rs    
qinsoon committed
555
        info!("declare const #{} = {}", id, val);
556
557
        map.insert(id, val);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
558
559

    /// gets the constant P<Value> for a given Mu ID, panics if there is no type with the ID
560
561
562
563
    pub fn get_const(&self, id: MuID) -> P<Value> {
        let const_lock = self.constants.read().unwrap();
        match const_lock.get(&id) {
            Some(ret) => ret.clone(),
564
            None => panic!("cannot find const #{}", id)
565
566
        }
    }
567

qinsoon's avatar
vm.rs    
qinsoon committed
568
569
    /// allocates memory for a constant that needs to be put in memory
    /// For AOT, we simply create a label for it, and let code emitter allocate the memory
570
    #[cfg(feature = "aot")]
qinsoon's avatar
qinsoon committed
571
    pub fn allocate_const(&self, val: &P<Value>) -> ValueLocation {
572
        let id = val.id();
573
        let name = format!("CONST_{}_{}", id, val.name());
574

575
        ValueLocation::Relocatable(backend::RegGroup::GPR, Arc::new(name))
576
    }
qinsoon's avatar
vm.rs    
qinsoon committed
577
578

    /// declares a global
579
    pub fn declare_global(&self, entity: MuEntityHeader, ty: P<MuType>) -> P<Value> {
qinsoon's avatar
vm.rs    
qinsoon committed
580
        // create iref value for the global
581
        let global = P(Value {
582
            hdr: entity,
583
584
            ty: self.declare_type(
                MuEntityHeader::unnamed(self.next_id()),
585
                MuType_::iref(ty.clone())
586
            ),
587
            v: Value_::Global(ty)
qinsoon's avatar
qinsoon committed
588
        });
qinsoon's avatar
vm.rs    
qinsoon committed
589

qinsoon's avatar
qinsoon committed
590
        let mut globals = self.globals.write().unwrap();
591
        let mut global_locs = self.global_locations.write().unwrap();
592
        self.declare_global_internal(&mut globals, &mut global_locs, global.id(), global.clone());
593

qinsoon's avatar
qinsoon committed
594
        global
qinsoon's avatar
qinsoon committed
595
    }
596

qinsoon's avatar
vm.rs    
qinsoon committed
597
    /// adds the global to the map (already acquired lock), and allocates memory for it
598
599
600
601
    fn declare_global_internal(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
602
        id: MuID,
603
        val: P<Value>
604
605
606
607
608
    ) {
        self.declare_global_internal_no_alloc(globals, id, val.clone());
        self.alloc_global(global_locs, id, val);
    }

qinsoon's avatar
vm.rs    
qinsoon committed
609
    /// adds the global to the map (already acquired lock)
qinsoon's avatar
qinsoon committed
610
611
    /// when bulk declaring, we hold locks for everything, we cannot resolve backend type
    /// and do alloc so we add globals to the map, and then allocate them later
612
613
614
    fn declare_global_internal_no_alloc(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
615
        id: MuID,
616
        val: P<Value>
617
618
619
620
621
622
623
    ) {
        debug_assert!(!globals.contains_key(&id));

        info!("declare global #{} = {}", id, val);
        globals.insert(id, val.clone());
    }

qinsoon's avatar
vm.rs    
qinsoon committed
624
    /// allocates memory for a global cell
625
626
627
    fn alloc_global(
        &self,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
628
        id: MuID,
629
        val: P<Value>
630
    ) {
631
        let backend_ty = self.get_backend_type_info(val.ty.get_referent_ty().unwrap().id());
632
        let loc = gc::allocate_global(val, backend_ty);
qinsoon's avatar
vm.rs    
qinsoon committed
633
        trace!("allocate global #{} as {}", id, loc);
634
635
        global_locs.insert(id, loc);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
636
637

    /// declares a type
638
    pub fn declare_type(&self, entity: MuEntityHeader, ty: MuType_) -> P<MuType> {
639
        let ty = P(MuType { hdr: entity, v: ty });
640

qinsoon's avatar
vm.rs    
qinsoon committed
641
        let mut types = self.types.write().unwrap();
642
        self.declare_type_internal(&mut types, ty.id(), ty.clone());
643

qinsoon's avatar
qinsoon committed
644
645
        ty
    }
646

qinsoon's avatar
vm.rs    
qinsoon committed
647
    /// adds the type to the map (already acquired lock)
648
649
650
651
    fn declare_type_internal(
        &self,
        types: &mut RwLockWriteGuard<HashMap<MuID, P<MuType>>>,
        id: MuID,
652
        ty: P<MuType>
653
    ) {
654
655
656
        debug_assert!(!types.contains_key(&id));

        types.insert(id, ty.clone());
qinsoon's avatar
vm.rs    
qinsoon committed
657
        info!("declare type #{} = {}", id, ty);
qinsoon's avatar
qinsoon committed
658

qinsoon's avatar
vm.rs    
qinsoon committed
659
        // for struct/hybrid, also adds to struct/hybrid tag map
qinsoon's avatar
qinsoon committed
660
661
662
663
        if ty.is_struct() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let struct_map_guard = STRUCT_TAG_MAP.read().unwrap();
            let struct_inner = struct_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
664
            trace!("  {}", struct_inner);
qinsoon's avatar
qinsoon committed
665
666
667
668
        } else if ty.is_hybrid() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let hybrid_map_guard = HYBRID_TAG_MAP.read().unwrap();
            let hybrid_inner = hybrid_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
669
            trace!("  {}", hybrid_inner);
qinsoon's avatar
qinsoon committed
670
        }
671
    }
qinsoon's avatar
vm.rs    
qinsoon committed
672
673

    /// gets the type for a given Mu ID, panics if there is no type with the ID
674
675
676
677
    pub fn get_type(&self, id: MuID) -> P<MuType> {
        let type_lock = self.types.read().unwrap();
        match type_lock.get(&id) {
            Some(ret) => ret.clone(),
678
            None => panic!("cannot find type #{}", id)
679
        }
680
    }
qinsoon's avatar
vm.rs    
qinsoon committed
681
682

    /// declares a function signature
683
684
685
686
    pub fn declare_func_sig(
        &self,
        entity: MuEntityHeader,
        ret_tys: Vec<P<MuType>>,
687
        arg_tys: Vec<P<MuType>>
688
689
690
691
    ) -> P<MuFuncSig> {
        let ret = P(MuFuncSig {
            hdr: entity,
            ret_tys: ret_tys,
692
            arg_tys: arg_tys
693
        });
694
695

        let mut func_sigs = self.func_sigs.write().unwrap();
696
        self.declare_func_sig_internal(&mut func_sigs, ret.id(), ret.clone());
697

qinsoon's avatar
qinsoon committed
698
699
        ret
    }
700

qinsoon's avatar
vm.rs    
qinsoon committed
701
    /// adds a function signature to the map (already acquired lock)
702
703
704
705
    fn declare_func_sig_internal(
        &self,
        sigs: &mut RwLockWriteGuard<HashMap<MuID, P<MuFuncSig>>>,
        id: MuID,
706
        sig: P<MuFuncSig>
707
    ) {
708
709
710
711
712
        debug_assert!(!sigs.contains_key(&id));

        info!("declare func sig #{} = {}", id, sig);
        sigs.insert(id, sig);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
713
714

    /// gets the function signature for a given ID, panics if there is no func sig with the ID
715
716
717
718
    pub fn get_func_sig(&self, id: MuID) -> P<MuFuncSig> {
        let func_sig_lock = self.func_sigs.read().unwrap();
        match func_sig_lock.get(&id) {
            Some(ret) => ret.clone(),
719
            None => panic!("cannot find func sig #{}", id)
720
721
        }
    }
qinsoon's avatar
vm.rs    
qinsoon committed
722
723

    /// declares a Mu function
724
    pub fn declare_func(&self, func: MuFunction) {
qinsoon's avatar
qinsoon committed
725
        let mut funcs = self.funcs.write().unwrap();
726
727
728
729

        self.declare_func_internal(&mut funcs, func.id(), func);
    }

qinsoon's avatar
vm.rs    
qinsoon committed
730
    /// adds a Mu function to the map (already acquired lock)
731
732
733
734
    fn declare_func_internal(
        &self,
        funcs: &mut RwLockWriteGuard<HashMap<MuID, RwLock<MuFunction>>>,
        id: MuID,
735
        func: MuFunction
736
    ) {
737
738
739
740
        debug_assert!(!funcs.contains_key(&id));

        info!("declare func #{} = {}", id, func);
        funcs.insert(id, RwLock::new(func));
741
    }
742

qinsoon's avatar
vm.rs    
qinsoon committed
743
744
745
746
    /// gets the function name for a function (by ID), panics if there is no function with the ID
    /// Note this name is the internal name, which is different than
    /// the client-supplied name from vm.name_of()
    pub fn get_name_for_func(&self, id: MuID) -> MuName {
747
748
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
749
            Some(func) => func.read().unwrap().name(),
750
            None => panic!("cannot find name for Mu function #{}")
751
752
        }
    }
qinsoon's avatar
vm.rs    
qinsoon committed
753

qinsoon's avatar
qinsoon committed
754
755
    /// gets the function signature for a function (by ID),
    /// panics if there is no function with the ID
qinsoon's avatar
vm.rs    
qinsoon committed
756
    pub fn get_sig_for_func(&self, id: MuID) -> P<MuFuncSig> {
757
758
759
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
            Some(func) => func.read().unwrap().sig.clone(),
760
            None => panic!("cannot find Mu function #{}", id)
761
        }
qinsoon's avatar
vm.rs    
qinsoon committed
762
763
764
765
766
767
768
769
770
771
    }

    /// gets the current function version for a Mu function (by ID)
    /// returns None if the function does not exist, or no version is defined for the function
    pub fn get_cur_version_for_func(&self, fid: MuID) -> Option<MuID> {
        let funcs_guard = self.funcs.read().unwrap();
        match funcs_guard.get(&fid) {
            Some(rwlock_func) => {
                let func_guard = rwlock_func.read().unwrap();
                func_guard.cur_ver
772
            }
773
            None => None
qinsoon's avatar
vm.rs    
qinsoon committed
774
775
776
777
778
779
        }
    }

    /// gets the address as ValueLocation of a Mu function (by ID)
    pub fn get_address_for_func(&self, func_id: MuID) -> ValueLocation {
        let funcs = self.funcs.read().unwrap();
780
        let func: &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
qinsoon's avatar
vm.rs    
qinsoon committed
781
782
783
784

        if self.is_doing_jit() {
            unimplemented!()
        } else {
785
            ValueLocation::Relocatable(backend::RegGroup::GPR, func.name())
qinsoon's avatar
vm.rs    
qinsoon committed
786
787
788
789
        }
    }

    /// defines a function version
790
    pub fn define_func_version(&self, func_ver: MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
791
        info!("define function version {}", func_ver);
qinsoon's avatar
vm.rs    
qinsoon committed
792
        // add this funcver to map
qinsoon's avatar
qinsoon committed
793
        let func_ver_id = func_ver.id();
794
795
        {
            let mut func_vers = self.func_vers.write().unwrap();
qinsoon's avatar
qinsoon committed
796
            func_vers.insert(func_ver_id, RwLock::new(func_ver));
797
        }
798

799
800
        // acquire a reference to the func_ver
        let func_vers = self.func_vers.read().unwrap();
qinsoon's avatar
qinsoon committed
801
        let func_ver = func_vers.get(&func_ver_id).unwrap().write().unwrap();
802

qinsoon's avatar
vm.rs    
qinsoon committed
803
        // change current version of the function to new version (obsolete old versions)
804
        let funcs = self.funcs.read().unwrap();
qinsoon's avatar
qinsoon committed
805
806
        // it should be declared before defining
        debug_assert!(funcs.contains_key(&func_ver.func_id));
qinsoon's avatar
qinsoon committed
807
        let mut func = funcs.get(&func_ver.func_id).unwrap().write().unwrap();
808

809
        func.new_version(func_ver.id());
qinsoon's avatar
vm.rs    
qinsoon committed
810
811
812
813
814

        if self.is_doing_jit() {
            // redefinition may happen, we need to check
            unimplemented!()
        }
815
    }
816

817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
    pub fn make_strong_type(&self, ty: P<MuType>) -> P<MuType> {
        match &ty.v {
            &MuType_::WeakRef(ref t) => {
                let res = self.ref_types
                    .read()
                    .unwrap()
                    .get(&t.id())
                    .map(|x| x.clone());
                match res {
                    Some(ty) => ty,
                    None => {
                        let ty = P(MuType::new(self.next_id(), MuType_::muref(t.clone())));
                        self.ref_types.write().unwrap().insert(t.id(), ty.clone());
                        ty
                    }
                }
            }
            _ => ty.clone()
        }
    }

qinsoon's avatar
vm.rs    
qinsoon committed
838
839
840
841
    /// adds a new bundle into VM.
    /// This function will drain the contents of all arguments. Ideally, this function should
    /// happen atomically. e.g. The client should not see a new type added without also seeing
    /// a new function added.
842
843
844
845
846
847
848
849
850
    pub fn declare_many(
        &self,
        new_id_name_map: &mut HashMap<MuID, MuName>,
        new_types: &mut HashMap<MuID, P<MuType>>,
        new_func_sigs: &mut HashMap<MuID, P<MuFuncSig>>,
        new_constants: &mut HashMap<MuID, P<Value>>,
        new_globals: &mut HashMap<MuID, P<Value>>,
        new_funcs: &mut HashMap<MuID, Box<MuFunction>>,
        new_func_vers: &mut HashMap<MuID, Box<MuFunctionVersion>>,