GitLab will be upgraded on 30 Jan 2023 from 2.00 pm (AEDT) to 3.00 pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

vm.rs 66.6 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
// Copyright 2017 The Australian National University
2
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
3
4
5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
9
10
11
12
13
14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
16
use std::collections::HashMap;

17
use rodal;
qinsoon's avatar
qinsoon committed
18
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
19
use ast::ir::*;
20
use ast::inst::*;
qinsoon's avatar
qinsoon committed
21
use ast::types;
qinsoon's avatar
qinsoon committed
22
use ast::types::*;
qinsoon's avatar
qinsoon committed
23
use compiler::{Compiler, CompilerPolicy};
qinsoon's avatar
qinsoon committed
24
use compiler::backend;
qinsoon's avatar
qinsoon committed
25
use compiler::backend::BackendType;
26
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
27

28
use runtime::thread::*;
29
use runtime::*;
qinsoon's avatar
qinsoon committed
30
use utils::ByteSize;
31
use utils::BitSize;
32
use utils::Address;
33
use runtime::mm as gc;
34
use vm::handle::*;
qinsoon's avatar
qinsoon committed
35
36
use vm::vm_options::VMOptions;
use vm::vm_options::MuLogLevel;
37

qinsoon's avatar
qinsoon committed
38
use log::LogLevel;
39
use std::sync::Arc;
qinsoon's avatar
qinsoon committed
40
use std::sync::RwLock;
41
use std::sync::RwLockWriteGuard;
42
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
43

44
45
use std;
use utils::bit_utils::{bits_ones, u64_asr};
qinsoon's avatar
qinsoon committed
46

qinsoon's avatar
qinsoon committed
47
48
49
/// The VM struct. This stores metadata for the currently running Zebu instance.
/// This struct gets persisted in the boot image, and when the boot image is loaded,
/// everything should be back to the same status as before persisting.
qinsoon's avatar
[wip]    
qinsoon committed
50
///
qinsoon's avatar
qinsoon committed
51
52
53
/// This struct is usually used as Arc<VM> so it can be shared among threads. The
/// Arc<VM> is stored in every thread local of a Mu thread, so that they can refer
/// to the VM easily.
qinsoon's avatar
[wip]    
qinsoon committed
54
///
qinsoon's avatar
qinsoon committed
55
56
57
58
/// We are using fine-grained lock on VM to allow mutability on different fields in VM.
/// Also we use two-level locks for some data structures such as MuFunction/
/// MuFunctionVersion/CompiledFunction so that we can mutate on two
/// different functions/funcvers/etc at the same time.
qinsoon's avatar
[wip]    
qinsoon committed
59
60
61
62

//  FIXME: However, there are problems with fine-grained lock design,
//  and we will need to rethink. See Issue #2.
//  TODO: besides fields in VM, there are some 'globals' we need to persist
qinsoon's avatar
qinsoon committed
63
64
65
66
//  such as STRUCT_TAG_MAP, INTERNAL_ID and internal types from ir crate. The point is
//  ir crate should be independent and self-contained. But when persisting the 'world',
//  besides persisting VM struct (containing most of the 'world'), we also need to
//  specifically persist those globals.
67
68
pub struct VM {
    // The comments are the offset into the struct
69
    // ---serialize---
qinsoon's avatar
[wip]    
qinsoon committed
70
    /// next MuID to assign
71
    next_id: AtomicUsize, // +0
qinsoon's avatar
[wip]    
qinsoon committed
72
    /// a map from MuID to MuName (for client to query)
73
    id_name_map: RwLock<HashMap<MuID, MuName>>, // +8
qinsoon's avatar
[wip]    
qinsoon committed
74
    /// a map from MuName to ID (for client to query)
75
    name_id_map: RwLock<HashMap<MuName, MuID>>, // +64
qinsoon's avatar
[wip]    
qinsoon committed
76
    /// types declared to the VM
77
    types: RwLock<HashMap<MuID, P<MuType>>>, // +120
78
79
    /// Ref types declared by 'make_strong_type', the key is the ID of the Referant
    ref_types: RwLock<HashMap<MuID, P<MuType>>>,
qinsoon's avatar
[wip]    
qinsoon committed
80
    /// types that are resolved as BackendType
81
    backend_type_info: RwLock<HashMap<MuID, Box<BackendType>>>,
qinsoon's avatar
[wip]    
qinsoon committed
82
    /// constants declared to the VM
83
    constants: RwLock<HashMap<MuID, P<Value>>>,
qinsoon's avatar
[wip]    
qinsoon committed
84
    /// globals declared to the VM
85
    globals: RwLock<HashMap<MuID, P<Value>>>,
qinsoon's avatar
[wip]    
qinsoon committed
86
    /// function signatures declared
87
    func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>,
qinsoon's avatar
[wip]    
qinsoon committed
88
    /// functions declared to the VM
89
    funcs: RwLock<HashMap<MuID, RwLock<MuFunction>>>,
qinsoon's avatar
[wip]    
qinsoon committed
90
    /// primordial function that is set to make boot image
91
    primordial: RwLock<Option<PrimordialThreadInfo>>,
92

qinsoon's avatar
[wip]    
qinsoon committed
93
    /// current options for this VM
94
    pub vm_options: VMOptions, // +624
95

96
    // ---partially serialize---
qinsoon's avatar
[wip]    
qinsoon committed
97
98
    /// compiled functions
    /// (we are not persisting generated code with compiled function)
99
    compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
100

101
102
    /// match each functions version to a map, mapping each of it's containing callsites
    /// to the name of the catch block
103
    callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
104

105
    // ---do not serialize---
qinsoon's avatar
vm.rs    
qinsoon committed
106
107
108
    /// global cell locations. We use this map to create handles for global cells,
    /// or dump globals into boot image. (this map does not get persisted because
    /// the location is changed in different runs)
109
    global_locations: RwLock<HashMap<MuID, ValueLocation>>,
110
    func_vers: RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>>,
111

qinsoon's avatar
[wip]    
qinsoon committed
112
113
114
115
    /// all the funcref that clients want to store for AOT which are pending stores
    /// For AOT scenario, when client tries to store funcref to the heap, the store
    /// happens before we have an actual address for the function so we store a fake
    /// funcref and when generating boot image, we fix the funcref with a relocatable symbol
116
117
    aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,

118
119
    /// runtime callsite table for exception handling
    /// a map from callsite address to CompiledCallsite
120
    compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>>, // 896
121
122

    /// Nnmber of callsites in the callsite tables
123
    callsite_count: AtomicUsize
124
}
qinsoon's avatar
vm.rs    
qinsoon committed
125

126
unsafe impl rodal::Dump for VM {
127
    fn dump<D: ?Sized + rodal::Dumper>(&self, dumper: &mut D) {
128
129
130
131
132
133
        dumper.debug_record("VM", "dump");

        dumper.dump_object(&self.next_id);
        dumper.dump_object(&self.id_name_map);
        dumper.dump_object(&self.name_id_map);
        dumper.dump_object(&self.types);
134
        dumper.dump_object(&self.ref_types);
135
136
137
138
139
140
141
142
        dumper.dump_object(&self.backend_type_info);
        dumper.dump_object(&self.constants);
        dumper.dump_object(&self.globals);
        dumper.dump_object(&self.func_sigs);
        dumper.dump_object(&self.funcs);
        dumper.dump_object(&self.primordial);
        dumper.dump_object(&self.vm_options);
        dumper.dump_object(&self.compiled_funcs);
143
        dumper.dump_object(&self.callsite_table);
144

145
146
        // Dump empty maps so that we can safely read and modify them once loaded
        dumper.dump_padding(&self.global_locations);
147
        dumper.dump_object_here(&RwLock::new(
148
            rodal::EmptyHashMap::<MuID, ValueLocation>::new()
149
        ));
150
151

        dumper.dump_padding(&self.func_vers);
152
        dumper.dump_object_here(&RwLock::new(
153
            rodal::EmptyHashMap::<MuID, RwLock<MuFunctionVersion>>::new()
154
        ));
155
156

        dumper.dump_padding(&self.aot_pending_funcref_store);
157
        dumper.dump_object_here(&RwLock::new(
158
            rodal::EmptyHashMap::<Address, ValueLocation>::new()
159
        ));
160
161

        // Dump an emepty hashmap for the other hashmaps
162
        dumper.dump_padding(&self.compiled_callsite_table);
163
        dumper.dump_object_here(&RwLock::new(
164
            rodal::EmptyHashMap::<Address, CompiledCallsite>::new()
165
        ));
166
        dumper.dump_object(&self.callsite_count);
167
    }
168
}
169

qinsoon's avatar
[wip]    
qinsoon committed
170
171
172
173
/// a fake funcref to store for AOT when client tries to store a funcref via API
//  For AOT scenario, when client tries to store funcref to the heap, the store
//  happens before we have an actual address for the function so we store a fake
//  funcref and when generating boot image, we fix the funcref with a relocatable symbol
174
const PENDING_FUNCREF: u64 = {
qinsoon's avatar
[wip]    
qinsoon committed
175
176
177
    use std::u64;
    u64::MAX
};
178

qinsoon's avatar
[wip]    
qinsoon committed
179
/// a macro to generate int8/16/32/64 from/to API calls
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
macro_rules! gen_handle_int {
    ($fn_from: ident, $fn_to: ident, $int_ty: ty) => {
        pub fn $fn_from (&self, num: $int_ty, len: BitSize) -> APIHandleResult {
            let handle_id = self.next_id();
            self.new_handle (APIHandle {
                id: handle_id,
                v: APIHandleValue::Int(num as u64, len)
            })
        }

        pub fn $fn_to (&self, handle: APIHandleArg) -> $int_ty {
            handle.v.as_int() as $int_ty
        }
    }
}

196
impl<'a> VM {
qinsoon's avatar
[wip]    
qinsoon committed
197
    /// creates a VM with default options
qinsoon's avatar
qinsoon committed
198
    pub fn new() -> VM {
qinsoon's avatar
qinsoon committed
199
200
201
        VM::new_internal(VMOptions::default())
    }

qinsoon's avatar
[wip]    
qinsoon committed
202
    /// creates a VM with specified options
qinsoon's avatar
qinsoon committed
203
204
205
206
    pub fn new_with_opts(str: &str) -> VM {
        VM::new_internal(VMOptions::init(str))
    }

qinsoon's avatar
[wip]    
qinsoon committed
207
    /// internal function to create a VM with options
208
    #[cfg(not(feature = "sel4-rumprun"))]
qinsoon's avatar
qinsoon committed
209
210
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
qinsoon's avatar
qinsoon committed
211

qinsoon's avatar
qinsoon committed
212
        let ret = VM {
213
            next_id: ATOMIC_USIZE_INIT,
qinsoon's avatar
qinsoon committed
214
            vm_options: options,
qinsoon's avatar
qinsoon committed
215
216
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
217
218
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
219
            ref_types: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
220
            backend_type_info: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
221
            globals: RwLock::new(HashMap::new()),
222
            global_locations: RwLock::new(hashmap!{}),
qinsoon's avatar
qinsoon committed
223
            func_sigs: RwLock::new(HashMap::new()),
224
            func_vers: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
225
            funcs: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
226
            compiled_funcs: RwLock::new(HashMap::new()),
227
            callsite_table: RwLock::new(HashMap::new()),
228
            primordial: RwLock::new(None),
229
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
230
            compiled_callsite_table: RwLock::new(HashMap::new()),
231
            callsite_count: ATOMIC_USIZE_INIT
232
        };
qinsoon's avatar
qinsoon committed
233

234
        // insert all internal types
qinsoon's avatar
qinsoon committed
235
236
237
238
239
240
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
qinsoon's avatar
qinsoon committed
241

qinsoon's avatar
[wip]    
qinsoon committed
242
        // starts allocating ID from USER_ID_START
243
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
qinsoon's avatar
qinsoon committed
244

245
246
247
        // init types
        types::init_types();

qinsoon's avatar
[wip]    
qinsoon committed
248
        // init runtime
249
        ret.init_runtime();
qinsoon's avatar
qinsoon committed
250

251
252
        ret
    }
253

254
255
256
257
258
    /// internal function to create a VM with options for sel4-rumprun
    /// default memory sizes are different from other platforms
    #[cfg(feature = "sel4-rumprun")]
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
259

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
        let mut ret = VM {
            next_id: ATOMIC_USIZE_INIT,
            vm_options: options,
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
            backend_type_info: RwLock::new(HashMap::new()),
            globals: RwLock::new(HashMap::new()),
            global_locations: RwLock::new(hashmap!{}),
            func_sigs: RwLock::new(HashMap::new()),
            func_vers: RwLock::new(HashMap::new()),
            funcs: RwLock::new(HashMap::new()),
            compiled_funcs: RwLock::new(HashMap::new()),
            callsite_table: RwLock::new(HashMap::new()),
            primordial: RwLock::new(None),
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
            compiled_callsite_table: RwLock::new(HashMap::new()),
            callsite_count: ATOMIC_USIZE_INIT
        };
280

281
282
        // currently, the default sizes don't work on sel4-rumprun platform
        // this is due to memory allocation size limitations
283
284
285
        ret.vm_options.flag_gc_immixspace_size = 1 << 19;
        ret.vm_options.flag_gc_lospace_size = 1 << 19;

286
287
288
289
290
291
292
        // insert all internal types
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
293

294
295
        // starts allocating ID from USER_ID_START
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
296

297
298
        // init types
        types::init_types();
299

300
301
        // init runtime
        ret.init_runtime();
302

303
304
        ret
    }
qinsoon's avatar
qinsoon committed
305

qinsoon's avatar
[wip]    
qinsoon committed
306
    /// initializes runtime
307
    fn init_runtime(&self) {
qinsoon's avatar
qinsoon committed
308
309
310
        // init gc
        {
            let ref options = self.vm_options;
311
312
313
314
            gc::gc_init(
                options.flag_gc_immixspace_size,
                options.flag_gc_lospace_size,
                options.flag_gc_nthreads,
315
                !options.flag_gc_disable_collection
316
            );
qinsoon's avatar
qinsoon committed
317
318
319
        }
    }

qinsoon's avatar
[wip]    
qinsoon committed
320
    /// starts logging based on MuLogLevel flag
qinsoon's avatar
qinsoon committed
321
    fn start_logging(level: MuLogLevel) {
322
        use std::env;
qinsoon's avatar
qinsoon committed
323
        match level {
324
            MuLogLevel::None => {}
qinsoon's avatar
qinsoon committed
325
            MuLogLevel::Error => VM::start_logging_internal(LogLevel::Error),
326
327
            MuLogLevel::Warn => VM::start_logging_internal(LogLevel::Warn),
            MuLogLevel::Info => VM::start_logging_internal(LogLevel::Info),
qinsoon's avatar
qinsoon committed
328
329
            MuLogLevel::Debug => VM::start_logging_internal(LogLevel::Debug),
            MuLogLevel::Trace => VM::start_logging_internal(LogLevel::Trace),
330
331
332
333
334
            MuLogLevel::Env => {
                match env::var("MU_LOG_LEVEL") {
                    Ok(s) => VM::start_logging(MuLogLevel::from_string(s)),
                    _ => {} // Don't log
                }
335
            }
qinsoon's avatar
qinsoon committed
336
        }
qinsoon's avatar
qinsoon committed
337
338
    }

qinsoon's avatar
[wip]    
qinsoon committed
339
    /// starts trace-level logging
qinsoon's avatar
qinsoon committed
340
    pub fn start_logging_trace() {
qinsoon's avatar
qinsoon committed
341
342
        VM::start_logging_internal(LogLevel::Trace)
    }
qinsoon's avatar
[wip]    
qinsoon committed
343
344

    /// starts logging based on MU_LOG_LEVEL environment variable
345
346
347
    pub fn start_logging_env() {
        VM::start_logging(MuLogLevel::Env)
    }
qinsoon's avatar
qinsoon committed
348

qinsoon's avatar
[wip]    
qinsoon committed
349
350
    /// starts logging based on Rust's LogLevel
    /// (this function actually initializes logger and deals with error)
qinsoon's avatar
qinsoon committed
351
    fn start_logging_internal(level: LogLevel) {
352
353
354
355
        use stderrlog;

        let verbose = match level {
            LogLevel::Error => 0,
356
357
            LogLevel::Warn => 1,
            LogLevel::Info => 2,
358
            LogLevel::Debug => 3,
359
            LogLevel::Trace => 4
360
        };
qinsoon's avatar
qinsoon committed
361

362
        match stderrlog::new().verbosity(verbose).init() {
363
            Ok(()) => { info!("logger initialized") }
364
365
366
367
368
369
            Err(e) => {
                error!(
                    "failed to init logger, probably already initialized: {:?}",
                    e
                )
            }
qinsoon's avatar
qinsoon committed
370
371
        }
    }
372

qinsoon's avatar
[wip]    
qinsoon committed
373
374
    /// adds an exception callsite and catch block
    /// (later we will use this info to build an exception table for unwinding use)
375
376
    pub fn add_exception_callsite(&self, callsite: Callsite, fv: MuID) {
        let mut table = self.callsite_table.write().unwrap();
377
378

        if table.contains_key(&fv) {
379
            table.get_mut(&fv).unwrap().push(callsite);
380
        } else {
381
            table.insert(fv, vec![callsite]);
382
        };
383
384
        // TODO: do wee need a stronger ordering??
        self.callsite_count.fetch_add(1, Ordering::Relaxed);
385
386
    }

qinsoon's avatar
[wip]    
qinsoon committed
387
388
    /// resumes persisted VM. Ideally the VM should be back to the status when we start
    /// persisting it except a few fields that we do not want to persist.
389
    pub fn resume_vm(dumped_vm: *mut Arc<VM>) -> Arc<VM> {
qinsoon's avatar
[wip]    
qinsoon committed
390
        // load the vm back
391
        let vm = unsafe { rodal::load_asm_pointer_move(dumped_vm) };
qinsoon's avatar
[wip]    
qinsoon committed
392
393

        // initialize runtime
394
        vm.init_runtime();
qinsoon's avatar
qinsoon committed
395

qinsoon's avatar
[wip]    
qinsoon committed
396
        // construct exception table
397
        vm.build_callsite_table();
qinsoon's avatar
[wip]    
qinsoon committed
398

qinsoon's avatar
qinsoon committed
399
400
401
        // restore gc types
        {
            let type_info_guard = vm.backend_type_info.read().unwrap();
402
403
            let mut type_info_vec: Vec<Box<BackendType>> =
                type_info_guard.values().map(|x| x.clone()).collect();
qinsoon's avatar
qinsoon committed
404
405
406
407
408
409
410
411
412
413
414
415
416
417
            type_info_vec.sort_by(|a, b| a.gc_type.id.cmp(&b.gc_type.id));

            let mut expect_id = 0;
            for ty_info in type_info_vec.iter() {
                use runtime::mm;

                let ref gc_type = ty_info.gc_type;

                if gc_type.id != expect_id {
                    debug_assert!(expect_id < gc_type.id);

                    while expect_id < gc_type.id {
                        use runtime::mm::common::gctype::GCType;

qinsoon's avatar
qinsoon committed
418
                        mm::add_gc_type(GCType::new_noreftype(0, 8));
qinsoon's avatar
qinsoon committed
419
420
421
422
423
424
425
426
427
428
429
                        expect_id += 1;
                    }
                }

                // now expect_id == gc_type.id
                debug_assert!(expect_id == gc_type.id);

                mm::add_gc_type(gc_type.as_ref().clone());
                expect_id += 1;
            }
        }
430
        // construct exception table
431
        vm.build_callsite_table();
432
433
        vm
    }
434

qinsoon's avatar
[wip]    
qinsoon committed
435
436
437
438
439
    /// builds a succinct exception table for fast query during exception unwinding
    /// We need this step because for AOT compilation, we do not know symbol address at compile,
    /// and resolving symbol address during exception handling is expensive. Thus when boot image
    /// gets executed, we first resolve symbols and store the results in another table for fast
    /// query.
440
441
    pub fn build_callsite_table(&self) {
        let callsite_table = self.callsite_table.read().unwrap();
442
        let compiled_funcs = self.compiled_funcs.read().unwrap();
443
        let mut compiled_callsite_table = self.compiled_callsite_table.write().unwrap();
444
445
        // TODO: Use a different ordering?
        compiled_callsite_table.reserve(self.callsite_count.load(Ordering::Relaxed));
446
447
448
449
        for (fv, callsite_list) in callsite_table.iter() {
            let compiled_func = compiled_funcs.get(fv).unwrap().read().unwrap();
            let callee_saved_table = Arc::new(compiled_func.frame.callee_saved.clone());
            for callsite in callsite_list.iter() {
450
451
452
453
454
                compiled_callsite_table.insert(
                    resolve_symbol(callsite.name.clone()),
                    CompiledCallsite::new(
                        &callsite,
                        compiled_func.func_ver_id,
455
456
                        callee_saved_table.clone()
                    )
457
                );
458
459
            }
        }
460
    }
qinsoon's avatar
[wip]    
qinsoon committed
461
462

    /// returns a valid ID for use next
463
    pub fn next_id(&self) -> MuID {
Kunshan Wang's avatar
Kunshan Wang committed
464
465
466
        // This only needs to be atomic, and does not need to be a synchronisation operation. The
        // only requirement for IDs is that all IDs obtained from `next_id()` are different. So
        // `Ordering::Relaxed` is sufficient.
467
        self.next_id.fetch_add(1, Ordering::Relaxed)
468
    }
qinsoon's avatar
[wip]    
qinsoon committed
469

470
471
    /// are we doing AOT compilation? (feature = aot when building Zebu)
    pub fn is_doing_aot(&self) -> bool {
472
        return cfg!(feature = "aot");
473
    }
474
475
476

    /// are we doing JIT compilation? (feature = jit when building Zebu)
    pub fn is_doing_jit(&self) -> bool {
477
        return cfg!(feature = "jit");
qinsoon's avatar
qinsoon committed
478
    }
479
480

    /// informs VM about a client-supplied name
481
    pub fn set_name(&self, entity: &MuEntity) {
qinsoon's avatar
qinsoon committed
482
        let id = entity.id();
483
        let name = entity.name();
484

qinsoon's avatar
qinsoon committed
485
        let mut map = self.id_name_map.write().unwrap();
486
        map.insert(id, name.clone());
487

qinsoon's avatar
qinsoon committed
488
489
490
        let mut map2 = self.name_id_map.write().unwrap();
        map2.insert(name, id);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
491
492
493
494
495

    /// returns Mu ID for a client-supplied name
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
    pub fn id_of(&self, name: &str) -> MuID {
qinsoon's avatar
qinsoon committed
496
        let map = self.name_id_map.read().unwrap();
497
498
        match map.get(name) {
            Some(id) => *id,
499
            None => panic!("cannot find id for name: {}", name)
500
        }
Kunshan Wang's avatar
Kunshan Wang committed
501
    }
502

qinsoon's avatar
vm.rs    
qinsoon committed
503
504
505
    /// returns the client-supplied Mu name for Mu ID
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
qinsoon's avatar
qinsoon committed
506
507
    pub fn name_of(&self, id: MuID) -> MuName {
        let map = self.id_name_map.read().unwrap();
508
        map.get(&id).unwrap().clone()
qinsoon's avatar
qinsoon committed
509
    }
qinsoon's avatar
vm.rs    
qinsoon committed
510
511

    /// declares a constant
512
    pub fn declare_const(&self, entity: MuEntityHeader, ty: P<MuType>, val: Constant) -> P<Value> {
513
514
515
        let ret = P(Value {
            hdr: entity,
            ty: ty,
516
            v: Value_::Constant(val)
517
        });
518

qinsoon's avatar
vm.rs    
qinsoon committed
519
        let mut constants = self.constants.write().unwrap();
520
        self.declare_const_internal(&mut constants, ret.id(), ret.clone());
521

qinsoon's avatar
qinsoon committed
522
523
        ret
    }
524

qinsoon's avatar
vm.rs    
qinsoon committed
525
    /// adds a constant to the map (already acquired lock)
526
527
528
529
    fn declare_const_internal(
        &self,
        map: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        id: MuID,
530
        val: P<Value>
531
    ) {
532
533
        debug_assert!(!map.contains_key(&id));

qinsoon's avatar
vm.rs    
qinsoon committed
534
        info!("declare const #{} = {}", id, val);
535
536
        map.insert(id, val);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
537
538

    /// gets the constant P<Value> for a given Mu ID, panics if there is no type with the ID
539
540
541
542
    pub fn get_const(&self, id: MuID) -> P<Value> {
        let const_lock = self.constants.read().unwrap();
        match const_lock.get(&id) {
            Some(ret) => ret.clone(),
543
            None => panic!("cannot find const #{}", id)
544
545
        }
    }
546

qinsoon's avatar
vm.rs    
qinsoon committed
547
548
    /// allocates memory for a constant that needs to be put in memory
    /// For AOT, we simply create a label for it, and let code emitter allocate the memory
549
550
551
    #[cfg(feature = "aot")]
    pub fn allocate_const(&self, val: P<Value>) -> ValueLocation {
        let id = val.id();
552
        let name = format!("CONST_{}_{}", id, val.name());
553
554
555

        ValueLocation::Relocatable(backend::RegGroup::GPR, name)
    }
qinsoon's avatar
vm.rs    
qinsoon committed
556
557

    /// declares a global
558
    pub fn declare_global(&self, entity: MuEntityHeader, ty: P<MuType>) -> P<Value> {
qinsoon's avatar
vm.rs    
qinsoon committed
559
        // create iref value for the global
560
        let global = P(Value {
561
            hdr: entity,
562
563
            ty: self.declare_type(
                MuEntityHeader::unnamed(self.next_id()),
564
                MuType_::iref(ty.clone())
565
            ),
566
            v: Value_::Global(ty)
qinsoon's avatar
qinsoon committed
567
        });
qinsoon's avatar
vm.rs    
qinsoon committed
568

qinsoon's avatar
qinsoon committed
569
        let mut globals = self.globals.write().unwrap();
570
        let mut global_locs = self.global_locations.write().unwrap();
571
        self.declare_global_internal(&mut globals, &mut global_locs, global.id(), global.clone());
572

qinsoon's avatar
qinsoon committed
573
        global
qinsoon's avatar
qinsoon committed
574
    }
575

qinsoon's avatar
vm.rs    
qinsoon committed
576
    /// adds the global to the map (already acquired lock), and allocates memory for it
577
578
579
580
    fn declare_global_internal(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
581
        id: MuID,
582
        val: P<Value>
583
584
585
586
587
    ) {
        self.declare_global_internal_no_alloc(globals, id, val.clone());
        self.alloc_global(global_locs, id, val);
    }

qinsoon's avatar
vm.rs    
qinsoon committed
588
    /// adds the global to the map (already acquired lock)
qinsoon's avatar
qinsoon committed
589
590
    /// when bulk declaring, we hold locks for everything, we cannot resolve backend type
    /// and do alloc so we add globals to the map, and then allocate them later
591
592
593
    fn declare_global_internal_no_alloc(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
594
        id: MuID,
595
        val: P<Value>
596
597
598
599
600
601
602
    ) {
        debug_assert!(!globals.contains_key(&id));

        info!("declare global #{} = {}", id, val);
        globals.insert(id, val.clone());
    }

qinsoon's avatar
vm.rs    
qinsoon committed
603
    /// allocates memory for a global cell
604
605
606
    fn alloc_global(
        &self,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
607
        id: MuID,
608
        val: P<Value>
609
    ) {
610
        let backend_ty = self.get_backend_type_info(val.ty.get_referent_ty().unwrap().id());
611
        let loc = gc::allocate_global(val, backend_ty);
qinsoon's avatar
vm.rs    
qinsoon committed
612
        trace!("allocate global #{} as {}", id, loc);
613
614
        global_locs.insert(id, loc);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
615
616

    /// declares a type
617
    pub fn declare_type(&self, entity: MuEntityHeader, ty: MuType_) -> P<MuType> {
618
        let ty = P(MuType { hdr: entity, v: ty });
619

qinsoon's avatar
vm.rs    
qinsoon committed
620
        let mut types = self.types.write().unwrap();
621
        self.declare_type_internal(&mut types, ty.id(), ty.clone());
622

qinsoon's avatar
qinsoon committed
623
624
        ty
    }
625

qinsoon's avatar
vm.rs    
qinsoon committed
626
    /// adds the type to the map (already acquired lock)
627
628
629
630
    fn declare_type_internal(
        &self,
        types: &mut RwLockWriteGuard<HashMap<MuID, P<MuType>>>,
        id: MuID,
631
        ty: P<MuType>
632
    ) {
633
634
635
        debug_assert!(!types.contains_key(&id));

        types.insert(id, ty.clone());
qinsoon's avatar
vm.rs    
qinsoon committed
636
        info!("declare type #{} = {}", id, ty);
qinsoon's avatar
qinsoon committed
637

qinsoon's avatar
vm.rs    
qinsoon committed
638
        // for struct/hybrid, also adds to struct/hybrid tag map
qinsoon's avatar
qinsoon committed
639
640
641
642
        if ty.is_struct() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let struct_map_guard = STRUCT_TAG_MAP.read().unwrap();
            let struct_inner = struct_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
643
            trace!("  {}", struct_inner);
qinsoon's avatar
qinsoon committed
644
645
646
647
        } else if ty.is_hybrid() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let hybrid_map_guard = HYBRID_TAG_MAP.read().unwrap();
            let hybrid_inner = hybrid_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
648
            trace!("  {}", hybrid_inner);
qinsoon's avatar
qinsoon committed
649
        }
650
    }
qinsoon's avatar
vm.rs    
qinsoon committed
651
652

    /// gets the type for a given Mu ID, panics if there is no type with the ID
653
654
655
656
    pub fn get_type(&self, id: MuID) -> P<MuType> {
        let type_lock = self.types.read().unwrap();
        match type_lock.get(&id) {
            Some(ret) => ret.clone(),
657
            None => panic!("cannot find type #{}", id)
658
        }
659
    }
qinsoon's avatar
vm.rs    
qinsoon committed
660
661

    /// declares a function signature
662
663
664
665
    pub fn declare_func_sig(
        &self,
        entity: MuEntityHeader,
        ret_tys: Vec<P<MuType>>,
666
        arg_tys: Vec<P<MuType>>
667
668
669
670
    ) -> P<MuFuncSig> {
        let ret = P(MuFuncSig {
            hdr: entity,
            ret_tys: ret_tys,
671
            arg_tys: arg_tys
672
        });
673
674

        let mut func_sigs = self.func_sigs.write().unwrap();
675
        self.declare_func_sig_internal(&mut func_sigs, ret.id(), ret.clone());
676

qinsoon's avatar
qinsoon committed
677
678
        ret
    }
679

qinsoon's avatar
vm.rs    
qinsoon committed
680
    /// adds a function signature to the map (already acquired lock)
681
682
683
684
    fn declare_func_sig_internal(
        &self,
        sigs: &mut RwLockWriteGuard<HashMap<MuID, P<MuFuncSig>>>,
        id: MuID,
685
        sig: P<MuFuncSig>
686
    ) {
687
688
689
690
691
        debug_assert!(!sigs.contains_key(&id));

        info!("declare func sig #{} = {}", id, sig);
        sigs.insert(id, sig);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
692
693

    /// gets the function signature for a given ID, panics if there is no func sig with the ID
694
695
696
697
    pub fn get_func_sig(&self, id: MuID) -> P<MuFuncSig> {
        let func_sig_lock = self.func_sigs.read().unwrap();
        match func_sig_lock.get(&id) {
            Some(ret) => ret.clone(),
698
            None => panic!("cannot find func sig #{}", id)
699
700
        }
    }
qinsoon's avatar
vm.rs    
qinsoon committed
701
702

    /// declares a Mu function
703
    pub fn declare_func(&self, func: MuFunction) {
qinsoon's avatar
qinsoon committed
704
        let mut funcs = self.funcs.write().unwrap();
705
706
707
708

        self.declare_func_internal(&mut funcs, func.id(), func);
    }

qinsoon's avatar
vm.rs    
qinsoon committed
709
    /// adds a Mu function to the map (already acquired lock)
710
711
712
713
    fn declare_func_internal(
        &self,
        funcs: &mut RwLockWriteGuard<HashMap<MuID, RwLock<MuFunction>>>,
        id: MuID,
714
        func: MuFunction
715
    ) {
716
717
718
719
        debug_assert!(!funcs.contains_key(&id));

        info!("declare func #{} = {}", id, func);
        funcs.insert(id, RwLock::new(func));
720
    }
721

qinsoon's avatar
vm.rs    
qinsoon committed
722
723
724
725
    /// gets the function name for a function (by ID), panics if there is no function with the ID
    /// Note this name is the internal name, which is different than
    /// the client-supplied name from vm.name_of()
    pub fn get_name_for_func(&self, id: MuID) -> MuName {
726
727
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
728
            Some(func) => func.read().unwrap().name(),
729
            None => panic!("cannot find name for Mu function #{}")
730
731
        }
    }
qinsoon's avatar
vm.rs    
qinsoon committed
732

qinsoon's avatar
qinsoon committed
733
734
    /// gets the function signature for a function (by ID),
    /// panics if there is no function with the ID
qinsoon's avatar
vm.rs    
qinsoon committed
735
    pub fn get_sig_for_func(&self, id: MuID) -> P<MuFuncSig> {
736
737
738
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
            Some(func) => func.read().unwrap().sig.clone(),
739
            None => panic!("cannot find Mu function #{}", id)
740
        }
qinsoon's avatar
vm.rs    
qinsoon committed
741
742
743
744
745
746
747
748
749
750
    }

    /// gets the current function version for a Mu function (by ID)
    /// returns None if the function does not exist, or no version is defined for the function
    pub fn get_cur_version_for_func(&self, fid: MuID) -> Option<MuID> {
        let funcs_guard = self.funcs.read().unwrap();
        match funcs_guard.get(&fid) {
            Some(rwlock_func) => {
                let func_guard = rwlock_func.read().unwrap();
                func_guard.cur_ver
751
            }
752
            None => None
qinsoon's avatar
vm.rs    
qinsoon committed
753
754
755
756
757
758
        }
    }

    /// gets the address as ValueLocation of a Mu function (by ID)
    pub fn get_address_for_func(&self, func_id: MuID) -> ValueLocation {
        let funcs = self.funcs.read().unwrap();
759
        let func: &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
qinsoon's avatar
vm.rs    
qinsoon committed
760
761
762
763

        if self.is_doing_jit() {
            unimplemented!()
        } else {
764
            ValueLocation::Relocatable(backend::RegGroup::GPR, func.name())
qinsoon's avatar
vm.rs    
qinsoon committed
765
766
767
768
        }
    }

    /// defines a function version
769
    pub fn define_func_version(&self, func_ver: MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
770
        info!("define function version {}", func_ver);
qinsoon's avatar
vm.rs    
qinsoon committed
771
        // add this funcver to map
qinsoon's avatar
qinsoon committed
772
        let func_ver_id = func_ver.id();
773
774
        {
            let mut func_vers = self.func_vers.write().unwrap();
qinsoon's avatar
qinsoon committed
775
            func_vers.insert(func_ver_id, RwLock::new(func_ver));
776
        }
777

778
779
        // acquire a reference to the func_ver
        let func_vers = self.func_vers.read().unwrap();
qinsoon's avatar
qinsoon committed
780
        let func_ver = func_vers.get(&func_ver_id).unwrap().write().unwrap();
781

qinsoon's avatar
vm.rs    
qinsoon committed
782
        // change current version of the function to new version (obsolete old versions)
783
        let funcs = self.funcs.read().unwrap();
qinsoon's avatar
qinsoon committed
784
785
        // it should be declared before defining
        debug_assert!(funcs.contains_key(&func_ver.func_id));
qinsoon's avatar
qinsoon committed
786
        let mut func = funcs.get(&func_ver.func_id).unwrap().write().unwrap();
787

788
        func.new_version(func_ver.id());
qinsoon's avatar
vm.rs    
qinsoon committed
789
790
791
792
793

        if self.is_doing_jit() {
            // redefinition may happen, we need to check
            unimplemented!()
        }
794
    }
795

796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
    pub fn make_strong_type(&self, ty: P<MuType>) -> P<MuType> {
        match &ty.v {
            &MuType_::WeakRef(ref t) => {
                let res = self.ref_types
                    .read()
                    .unwrap()
                    .get(&t.id())
                    .map(|x| x.clone());
                match res {
                    Some(ty) => ty,
                    None => {
                        let ty = P(MuType::new(self.next_id(), MuType_::muref(t.clone())));
                        self.ref_types.write().unwrap().insert(t.id(), ty.clone());
                        ty
                    }
                }
            }
            _ => ty.clone()
        }
    }

qinsoon's avatar
vm.rs    
qinsoon committed
817
818
819
820
    /// adds a new bundle into VM.
    /// This function will drain the contents of all arguments. Ideally, this function should
    /// happen atomically. e.g. The client should not see a new type added without also seeing
    /// a new function added.
821
822
823
824
825
826
827
828
829
    pub fn declare_many(
        &self,
        new_id_name_map: &mut HashMap<MuID, MuName>,
        new_types: &mut HashMap<MuID, P<MuType>>,
        new_func_sigs: &mut HashMap<MuID, P<MuFuncSig>>,
        new_constants: &mut HashMap<MuID, P<Value>>,
        new_globals: &mut HashMap<MuID, P<Value>>,
        new_funcs: &mut HashMap<MuID, Box<MuFunction>>,
        new_func_vers: &mut HashMap<MuID, Box<MuFunctionVersion>>,
830
        arc_vm: Arc<VM>
831
    ) {
832
833
        // Make sure other components, if ever acquiring multiple locks at the same time, acquire
        // them in this order, to prevent deadlock.
834
835
836
        {
            let mut id_name_map = self.id_name_map.write().unwrap();
            let mut name_id_map = self.name_id_map.write().unwrap();
837
838
839
840
841
842
            let mut types = self.types.write().unwrap();
            let mut constants = self.constants.write().unwrap();
            let mut globals = self.globals.write().unwrap();
            let mut func_sigs = self.func_sigs.write().unwrap();
            let mut funcs = self.funcs.write().unwrap();
            let mut func_vers = self.func_vers.write().unwrap();
843

844