GitLab will be upgraded on 30 Jan 2023 from 2.00 pm (AEDT) to 3.00 pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

vm.rs 63.2 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
// Copyright 2017 The Australian National University
2
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
3
4
5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
9
10
11
12
13
14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
16
use std::collections::HashMap;

17
use rodal;
qinsoon's avatar
qinsoon committed
18
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
19
use ast::ir::*;
20
use ast::inst::*;
qinsoon's avatar
qinsoon committed
21
use ast::types;
qinsoon's avatar
qinsoon committed
22
use ast::types::*;
qinsoon's avatar
qinsoon committed
23
use compiler::{Compiler, CompilerPolicy};
qinsoon's avatar
qinsoon committed
24
use compiler::backend;
qinsoon's avatar
qinsoon committed
25
use compiler::backend::BackendType;
26
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
27

28
use runtime::thread::*;
29
use runtime::*;
qinsoon's avatar
qinsoon committed
30
use utils::ByteSize;
31
use utils::BitSize;
32
use utils::Address;
33
use runtime::mm as gc;
34
use vm::handle::*;
qinsoon's avatar
qinsoon committed
35
36
use vm::vm_options::VMOptions;
use vm::vm_options::MuLogLevel;
37

qinsoon's avatar
qinsoon committed
38
use log::LogLevel;
39
use std::sync::Arc;
qinsoon's avatar
qinsoon committed
40
use std::sync::RwLock;
41
use std::sync::RwLockWriteGuard;
42
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
43

44
45
use std;
use utils::bit_utils::{bits_ones, u64_asr};
qinsoon's avatar
qinsoon committed
46

qinsoon's avatar
qinsoon committed
47
48
49
/// The VM struct. This stores metadata for the currently running Zebu instance.
/// This struct gets persisted in the boot image, and when the boot image is loaded,
/// everything should be back to the same status as before persisting.
qinsoon's avatar
[wip]    
qinsoon committed
50
///
qinsoon's avatar
qinsoon committed
51
52
53
/// This struct is usually used as Arc<VM> so it can be shared among threads. The
/// Arc<VM> is stored in every thread local of a Mu thread, so that they can refer
/// to the VM easily.
qinsoon's avatar
[wip]    
qinsoon committed
54
///
qinsoon's avatar
qinsoon committed
55
56
57
58
/// We are using fine-grained lock on VM to allow mutability on different fields in VM.
/// Also we use two-level locks for some data structures such as MuFunction/
/// MuFunctionVersion/CompiledFunction so that we can mutate on two
/// different functions/funcvers/etc at the same time.
qinsoon's avatar
[wip]    
qinsoon committed
59
60
61
62

//  FIXME: However, there are problems with fine-grained lock design,
//  and we will need to rethink. See Issue #2.
//  TODO: besides fields in VM, there are some 'globals' we need to persist
qinsoon's avatar
qinsoon committed
63
64
65
66
//  such as STRUCT_TAG_MAP, INTERNAL_ID and internal types from ir crate. The point is
//  ir crate should be independent and self-contained. But when persisting the 'world',
//  besides persisting VM struct (containing most of the 'world'), we also need to
//  specifically persist those globals.
67
68
pub struct VM {
    // The comments are the offset into the struct
69
    // ---serialize---
qinsoon's avatar
[wip]    
qinsoon committed
70
    /// next MuID to assign
71
    next_id: AtomicUsize, // +0
qinsoon's avatar
[wip]    
qinsoon committed
72
    /// a map from MuID to MuName (for client to query)
73
    id_name_map: RwLock<HashMap<MuID, MuName>>, // +8
qinsoon's avatar
[wip]    
qinsoon committed
74
    /// a map from MuName to ID (for client to query)
75
    name_id_map: RwLock<HashMap<MuName, MuID>>, // +64
qinsoon's avatar
[wip]    
qinsoon committed
76
    /// types declared to the VM
77
    types: RwLock<HashMap<MuID, P<MuType>>>, // +120
qinsoon's avatar
[wip]    
qinsoon committed
78
79
80
    /// types that are resolved as BackendType
    backend_type_info: RwLock<HashMap<MuID, Box<BackendType>>>, // +176
    /// constants declared to the VM
81
    constants: RwLock<HashMap<MuID, P<Value>>>, // +232
qinsoon's avatar
[wip]    
qinsoon committed
82
    /// globals declared to the VM
83
    globals: RwLock<HashMap<MuID, P<Value>>>, // +288
qinsoon's avatar
[wip]    
qinsoon committed
84
    /// function signatures declared
85
    func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>, // +400
qinsoon's avatar
[wip]    
qinsoon committed
86
    /// functions declared to the VM
87
    funcs: RwLock<HashMap<MuID, RwLock<MuFunction>>>, // +456
qinsoon's avatar
[wip]    
qinsoon committed
88
    /// primordial function that is set to make boot image
89
    pub primordial: RwLock<Option<PrimordialThreadInfo>>, // +568
qinsoon's avatar
[wip]    
qinsoon committed
90
    /// current options for this VM
91
    pub vm_options: VMOptions, // +624
92

93
    // ---partially serialize---
qinsoon's avatar
[wip]    
qinsoon committed
94
95
    /// compiled functions
    /// (we are not persisting generated code with compiled function)
96
    compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
97

98
99
    /// match each functions version to a map, mapping each of it's containing callsites
    /// to the name of the catch block
100
    callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
101

102
    // ---do not serialize---
qinsoon's avatar
vm.rs    
qinsoon committed
103
104
105
    /// global cell locations. We use this map to create handles for global cells,
    /// or dump globals into boot image. (this map does not get persisted because
    /// the location is changed in different runs)
106
107
    pub global_locations: RwLock<HashMap<MuID, ValueLocation>>,
    func_vers: RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>>,
108

qinsoon's avatar
[wip]    
qinsoon committed
109
110
111
112
    /// all the funcref that clients want to store for AOT which are pending stores
    /// For AOT scenario, when client tries to store funcref to the heap, the store
    /// happens before we have an actual address for the function so we store a fake
    /// funcref and when generating boot image, we fix the funcref with a relocatable symbol
113
114
    aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,

115
116
    /// runtime callsite table for exception handling
    /// a map from callsite address to CompiledCallsite
117
    pub compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>>, // 896
118
119

    /// Nnmber of callsites in the callsite tables
120
    pub callsite_count: AtomicUsize
121
}
qinsoon's avatar
vm.rs    
qinsoon committed
122

123
unsafe impl rodal::Dump for VM {
124
    fn dump<D: ?Sized + rodal::Dumper>(&self, dumper: &mut D) {
125
126
127
128
129
130
131
132
133
134
135
136
137
138
        dumper.debug_record("VM", "dump");

        dumper.dump_object(&self.next_id);
        dumper.dump_object(&self.id_name_map);
        dumper.dump_object(&self.name_id_map);
        dumper.dump_object(&self.types);
        dumper.dump_object(&self.backend_type_info);
        dumper.dump_object(&self.constants);
        dumper.dump_object(&self.globals);
        dumper.dump_object(&self.func_sigs);
        dumper.dump_object(&self.funcs);
        dumper.dump_object(&self.primordial);
        dumper.dump_object(&self.vm_options);
        dumper.dump_object(&self.compiled_funcs);
139
        dumper.dump_object(&self.callsite_table);
140

141
142
        // Dump empty maps so that we can safely read and modify them once loaded
        dumper.dump_padding(&self.global_locations);
143
        dumper.dump_object_here(&RwLock::new(
144
            rodal::EmptyHashMap::<MuID, ValueLocation>::new()
145
        ));
146
147

        dumper.dump_padding(&self.func_vers);
148
        dumper.dump_object_here(&RwLock::new(
149
            rodal::EmptyHashMap::<MuID, RwLock<MuFunctionVersion>>::new()
150
        ));
151
152

        dumper.dump_padding(&self.aot_pending_funcref_store);
153
        dumper.dump_object_here(&RwLock::new(
154
            rodal::EmptyHashMap::<Address, ValueLocation>::new()
155
        ));
156
157

        // Dump an emepty hashmap for the other hashmaps
158
        dumper.dump_padding(&self.compiled_callsite_table);
159
        dumper.dump_object_here(&RwLock::new(
160
            rodal::EmptyHashMap::<Address, CompiledCallsite>::new()
161
        ));
162
        dumper.dump_object(&self.callsite_count);
163
    }
164
}
165

qinsoon's avatar
[wip]    
qinsoon committed
166
167
168
169
/// a fake funcref to store for AOT when client tries to store a funcref via API
//  For AOT scenario, when client tries to store funcref to the heap, the store
//  happens before we have an actual address for the function so we store a fake
//  funcref and when generating boot image, we fix the funcref with a relocatable symbol
170
const PENDING_FUNCREF: u64 = {
qinsoon's avatar
[wip]    
qinsoon committed
171
172
173
    use std::u64;
    u64::MAX
};
174

qinsoon's avatar
[wip]    
qinsoon committed
175
/// a macro to generate int8/16/32/64 from/to API calls
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
macro_rules! gen_handle_int {
    ($fn_from: ident, $fn_to: ident, $int_ty: ty) => {
        pub fn $fn_from (&self, num: $int_ty, len: BitSize) -> APIHandleResult {
            let handle_id = self.next_id();
            self.new_handle (APIHandle {
                id: handle_id,
                v: APIHandleValue::Int(num as u64, len)
            })
        }

        pub fn $fn_to (&self, handle: APIHandleArg) -> $int_ty {
            handle.v.as_int() as $int_ty
        }
    }
}

192
impl<'a> VM {
qinsoon's avatar
[wip]    
qinsoon committed
193
    /// creates a VM with default options
qinsoon's avatar
qinsoon committed
194
    pub fn new() -> VM {
qinsoon's avatar
qinsoon committed
195
196
197
        VM::new_internal(VMOptions::default())
    }

qinsoon's avatar
[wip]    
qinsoon committed
198
    /// creates a VM with specified options
qinsoon's avatar
qinsoon committed
199
200
201
202
    pub fn new_with_opts(str: &str) -> VM {
        VM::new_internal(VMOptions::init(str))
    }

qinsoon's avatar
[wip]    
qinsoon committed
203
    /// internal function to create a VM with options
qinsoon's avatar
qinsoon committed
204
205
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
qinsoon's avatar
qinsoon committed
206

qinsoon's avatar
qinsoon committed
207
        let ret = VM {
208
            next_id: ATOMIC_USIZE_INIT,
qinsoon's avatar
qinsoon committed
209
            vm_options: options,
qinsoon's avatar
qinsoon committed
210
211
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
212
213
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
214
            backend_type_info: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
215
            globals: RwLock::new(HashMap::new()),
216
            global_locations: RwLock::new(hashmap!{}),
qinsoon's avatar
qinsoon committed
217
            func_sigs: RwLock::new(HashMap::new()),
218
            func_vers: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
219
            funcs: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
220
            compiled_funcs: RwLock::new(HashMap::new()),
221
            callsite_table: RwLock::new(HashMap::new()),
222
            primordial: RwLock::new(None),
223
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
224
            compiled_callsite_table: RwLock::new(HashMap::new()),
225
            callsite_count: ATOMIC_USIZE_INIT
226
        };
qinsoon's avatar
qinsoon committed
227

228
        // insert all internal types
qinsoon's avatar
qinsoon committed
229
230
231
232
233
234
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
qinsoon's avatar
qinsoon committed
235

qinsoon's avatar
[wip]    
qinsoon committed
236
        // starts allocating ID from USER_ID_START
237
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
qinsoon's avatar
qinsoon committed
238

239
240
241
        // init types
        types::init_types();

qinsoon's avatar
[wip]    
qinsoon committed
242
        // init runtime
243
        ret.init_runtime();
qinsoon's avatar
qinsoon committed
244

245
246
        ret
    }
qinsoon's avatar
qinsoon committed
247

qinsoon's avatar
[wip]    
qinsoon committed
248
    /// initializes runtime
249
    fn init_runtime(&self) {
qinsoon's avatar
qinsoon committed
250
251
252
        // init gc
        {
            let ref options = self.vm_options;
253
254
255
256
            gc::gc_init(
                options.flag_gc_immixspace_size,
                options.flag_gc_lospace_size,
                options.flag_gc_nthreads,
257
                !options.flag_gc_disable_collection
258
            );
qinsoon's avatar
qinsoon committed
259
260
261
        }
    }

qinsoon's avatar
[wip]    
qinsoon committed
262
    /// starts logging based on MuLogLevel flag
qinsoon's avatar
qinsoon committed
263
    fn start_logging(level: MuLogLevel) {
264
        use std::env;
qinsoon's avatar
qinsoon committed
265
        match level {
266
            MuLogLevel::None => {}
qinsoon's avatar
qinsoon committed
267
            MuLogLevel::Error => VM::start_logging_internal(LogLevel::Error),
268
269
            MuLogLevel::Warn => VM::start_logging_internal(LogLevel::Warn),
            MuLogLevel::Info => VM::start_logging_internal(LogLevel::Info),
qinsoon's avatar
qinsoon committed
270
271
            MuLogLevel::Debug => VM::start_logging_internal(LogLevel::Debug),
            MuLogLevel::Trace => VM::start_logging_internal(LogLevel::Trace),
272
273
274
275
276
            MuLogLevel::Env => {
                match env::var("MU_LOG_LEVEL") {
                    Ok(s) => VM::start_logging(MuLogLevel::from_string(s)),
                    _ => {} // Don't log
                }
277
            }
qinsoon's avatar
qinsoon committed
278
        }
qinsoon's avatar
qinsoon committed
279
280
    }

qinsoon's avatar
[wip]    
qinsoon committed
281
    /// starts trace-level logging
qinsoon's avatar
qinsoon committed
282
    pub fn start_logging_trace() {
qinsoon's avatar
qinsoon committed
283
284
        VM::start_logging_internal(LogLevel::Trace)
    }
qinsoon's avatar
[wip]    
qinsoon committed
285
286

    /// starts logging based on MU_LOG_LEVEL environment variable
287
288
289
    pub fn start_logging_env() {
        VM::start_logging(MuLogLevel::Env)
    }
qinsoon's avatar
qinsoon committed
290

qinsoon's avatar
[wip]    
qinsoon committed
291
292
    /// starts logging based on Rust's LogLevel
    /// (this function actually initializes logger and deals with error)
qinsoon's avatar
qinsoon committed
293
    fn start_logging_internal(level: LogLevel) {
294
295
296
297
        use stderrlog;

        let verbose = match level {
            LogLevel::Error => 0,
298
299
            LogLevel::Warn => 1,
            LogLevel::Info => 2,
300
            LogLevel::Debug => 3,
301
            LogLevel::Trace => 4
302
        };
qinsoon's avatar
qinsoon committed
303

304
        match stderrlog::new().verbosity(verbose).init() {
305
            Ok(()) => { info!("logger initialized") }
306
307
308
309
310
311
            Err(e) => {
                error!(
                    "failed to init logger, probably already initialized: {:?}",
                    e
                )
            }
qinsoon's avatar
qinsoon committed
312
313
        }
    }
314

qinsoon's avatar
[wip]    
qinsoon committed
315
316
    /// adds an exception callsite and catch block
    /// (later we will use this info to build an exception table for unwinding use)
317
318
    pub fn add_exception_callsite(&self, callsite: Callsite, fv: MuID) {
        let mut table = self.callsite_table.write().unwrap();
319
320

        if table.contains_key(&fv) {
321
            table.get_mut(&fv).unwrap().push(callsite);
322
        } else {
323
            table.insert(fv, vec![callsite]);
324
        };
325
326
        // TODO: do wee need a stronger ordering??
        self.callsite_count.fetch_add(1, Ordering::Relaxed);
327
328
    }

qinsoon's avatar
[wip]    
qinsoon committed
329
330
    /// resumes persisted VM. Ideally the VM should be back to the status when we start
    /// persisting it except a few fields that we do not want to persist.
331
    pub fn resume_vm(dumped_vm: *mut Arc<VM>) -> Arc<VM> {
qinsoon's avatar
[wip]    
qinsoon committed
332
        // load the vm back
333
        let vm = unsafe { rodal::load_asm_pointer_move(dumped_vm) };
qinsoon's avatar
[wip]    
qinsoon committed
334
335

        // initialize runtime
336
        vm.init_runtime();
qinsoon's avatar
qinsoon committed
337

qinsoon's avatar
[wip]    
qinsoon committed
338
        // construct exception table
339
        vm.build_callsite_table();
qinsoon's avatar
[wip]    
qinsoon committed
340

qinsoon's avatar
qinsoon committed
341
342
343
        // restore gc types
        {
            let type_info_guard = vm.backend_type_info.read().unwrap();
344
345
            let mut type_info_vec: Vec<Box<BackendType>> =
                type_info_guard.values().map(|x| x.clone()).collect();
qinsoon's avatar
qinsoon committed
346
347
348
349
350
351
352
353
354
355
356
357
358
359
            type_info_vec.sort_by(|a, b| a.gc_type.id.cmp(&b.gc_type.id));

            let mut expect_id = 0;
            for ty_info in type_info_vec.iter() {
                use runtime::mm;

                let ref gc_type = ty_info.gc_type;

                if gc_type.id != expect_id {
                    debug_assert!(expect_id < gc_type.id);

                    while expect_id < gc_type.id {
                        use runtime::mm::common::gctype::GCType;

qinsoon's avatar
qinsoon committed
360
                        mm::add_gc_type(GCType::new_noreftype(0, 8));
qinsoon's avatar
qinsoon committed
361
362
363
364
365
366
367
368
369
370
371
                        expect_id += 1;
                    }
                }

                // now expect_id == gc_type.id
                debug_assert!(expect_id == gc_type.id);

                mm::add_gc_type(gc_type.as_ref().clone());
                expect_id += 1;
            }
        }
372
        // construct exception table
373
        vm.build_callsite_table();
374
375
        vm
    }
376

qinsoon's avatar
[wip]    
qinsoon committed
377
378
379
380
381
    /// builds a succinct exception table for fast query during exception unwinding
    /// We need this step because for AOT compilation, we do not know symbol address at compile,
    /// and resolving symbol address during exception handling is expensive. Thus when boot image
    /// gets executed, we first resolve symbols and store the results in another table for fast
    /// query.
382
383
    pub fn build_callsite_table(&self) {
        let callsite_table = self.callsite_table.read().unwrap();
384
        let compiled_funcs = self.compiled_funcs.read().unwrap();
385
        let mut compiled_callsite_table = self.compiled_callsite_table.write().unwrap();
386
387
        // TODO: Use a different ordering?
        compiled_callsite_table.reserve(self.callsite_count.load(Ordering::Relaxed));
388
389
390
391
        for (fv, callsite_list) in callsite_table.iter() {
            let compiled_func = compiled_funcs.get(fv).unwrap().read().unwrap();
            let callee_saved_table = Arc::new(compiled_func.frame.callee_saved.clone());
            for callsite in callsite_list.iter() {
392
393
394
395
396
                compiled_callsite_table.insert(
                    resolve_symbol(callsite.name.clone()),
                    CompiledCallsite::new(
                        &callsite,
                        compiled_func.func_ver_id,
397
398
                        callee_saved_table.clone()
                    )
399
                );
400
401
            }
        }
402
    }
qinsoon's avatar
[wip]    
qinsoon committed
403
404

    /// returns a valid ID for use next
405
    pub fn next_id(&self) -> MuID {
Kunshan Wang's avatar
Kunshan Wang committed
406
407
408
        // This only needs to be atomic, and does not need to be a synchronisation operation. The
        // only requirement for IDs is that all IDs obtained from `next_id()` are different. So
        // `Ordering::Relaxed` is sufficient.
409
        self.next_id.fetch_add(1, Ordering::Relaxed)
410
    }
qinsoon's avatar
[wip]    
qinsoon committed
411

412
413
    /// are we doing AOT compilation? (feature = aot when building Zebu)
    pub fn is_doing_aot(&self) -> bool {
414
        return cfg!(feature = "aot");
415
    }
416
417
418

    /// are we doing JIT compilation? (feature = jit when building Zebu)
    pub fn is_doing_jit(&self) -> bool {
419
        return cfg!(feature = "jit");
qinsoon's avatar
qinsoon committed
420
    }
421
422

    /// informs VM about a client-supplied name
423
    pub fn set_name(&self, entity: &MuEntity) {
qinsoon's avatar
qinsoon committed
424
        let id = entity.id();
425
        let name = entity.name();
426

qinsoon's avatar
qinsoon committed
427
        let mut map = self.id_name_map.write().unwrap();
428
        map.insert(id, name.clone());
429

qinsoon's avatar
qinsoon committed
430
431
432
        let mut map2 = self.name_id_map.write().unwrap();
        map2.insert(name, id);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
433
434
435
436
437

    /// returns Mu ID for a client-supplied name
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
    pub fn id_of(&self, name: &str) -> MuID {
qinsoon's avatar
qinsoon committed
438
        let map = self.name_id_map.read().unwrap();
439
440
        match map.get(name) {
            Some(id) => *id,
441
            None => panic!("cannot find id for name: {}", name)
442
        }
Kunshan Wang's avatar
Kunshan Wang committed
443
    }
444

qinsoon's avatar
vm.rs    
qinsoon committed
445
446
447
    /// returns the client-supplied Mu name for Mu ID
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
qinsoon's avatar
qinsoon committed
448
449
    pub fn name_of(&self, id: MuID) -> MuName {
        let map = self.id_name_map.read().unwrap();
450
        map.get(&id).unwrap().clone()
qinsoon's avatar
qinsoon committed
451
    }
qinsoon's avatar
vm.rs    
qinsoon committed
452
453

    /// declares a constant
454
    pub fn declare_const(&self, entity: MuEntityHeader, ty: P<MuType>, val: Constant) -> P<Value> {
455
456
457
        let ret = P(Value {
            hdr: entity,
            ty: ty,
458
            v: Value_::Constant(val)
459
        });
460

qinsoon's avatar
vm.rs    
qinsoon committed
461
        let mut constants = self.constants.write().unwrap();
462
        self.declare_const_internal(&mut constants, ret.id(), ret.clone());
463

qinsoon's avatar
qinsoon committed
464
465
        ret
    }
466

qinsoon's avatar
vm.rs    
qinsoon committed
467
    /// adds a constant to the map (already acquired lock)
468
469
470
471
    fn declare_const_internal(
        &self,
        map: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        id: MuID,
472
        val: P<Value>
473
    ) {
474
475
        debug_assert!(!map.contains_key(&id));

qinsoon's avatar
vm.rs    
qinsoon committed
476
        info!("declare const #{} = {}", id, val);
477
478
        map.insert(id, val);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
479
480

    /// gets the constant P<Value> for a given Mu ID, panics if there is no type with the ID
481
482
483
484
    pub fn get_const(&self, id: MuID) -> P<Value> {
        let const_lock = self.constants.read().unwrap();
        match const_lock.get(&id) {
            Some(ret) => ret.clone(),
485
            None => panic!("cannot find const #{}", id)
486
487
        }
    }
488

qinsoon's avatar
vm.rs    
qinsoon committed
489
490
    /// allocates memory for a constant that needs to be put in memory
    /// For AOT, we simply create a label for it, and let code emitter allocate the memory
491
492
493
    #[cfg(feature = "aot")]
    pub fn allocate_const(&self, val: P<Value>) -> ValueLocation {
        let id = val.id();
494
        let name = format!("CONST_{}_{}", id, val.name());
495
496
497

        ValueLocation::Relocatable(backend::RegGroup::GPR, name)
    }
qinsoon's avatar
vm.rs    
qinsoon committed
498
499

    /// declares a global
500
    pub fn declare_global(&self, entity: MuEntityHeader, ty: P<MuType>) -> P<Value> {
qinsoon's avatar
vm.rs    
qinsoon committed
501
        // create iref value for the global
502
        let global = P(Value {
503
            hdr: entity,
504
505
            ty: self.declare_type(
                MuEntityHeader::unnamed(self.next_id()),
506
                MuType_::iref(ty.clone())
507
            ),
508
            v: Value_::Global(ty)
qinsoon's avatar
qinsoon committed
509
        });
qinsoon's avatar
vm.rs    
qinsoon committed
510

qinsoon's avatar
qinsoon committed
511
        let mut globals = self.globals.write().unwrap();
512
        let mut global_locs = self.global_locations.write().unwrap();
513
        self.declare_global_internal(&mut globals, &mut global_locs, global.id(), global.clone());
514

qinsoon's avatar
qinsoon committed
515
        global
qinsoon's avatar
qinsoon committed
516
    }
517

qinsoon's avatar
vm.rs    
qinsoon committed
518
    /// adds the global to the map (already acquired lock), and allocates memory for it
519
520
521
522
    fn declare_global_internal(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
523
        id: MuID,
524
        val: P<Value>
525
526
527
528
529
    ) {
        self.declare_global_internal_no_alloc(globals, id, val.clone());
        self.alloc_global(global_locs, id, val);
    }

qinsoon's avatar
vm.rs    
qinsoon committed
530
531
532
    /// adds the global to the map (already acquired lock)
    /// when bulk declaring, we hold locks for everything, we cannot resolve backend type and do alloc
    /// so we add globals to the map, and then allocate them later
533
534
535
    fn declare_global_internal_no_alloc(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
536
        id: MuID,
537
        val: P<Value>
538
539
540
541
542
543
544
    ) {
        debug_assert!(!globals.contains_key(&id));

        info!("declare global #{} = {}", id, val);
        globals.insert(id, val.clone());
    }

qinsoon's avatar
vm.rs    
qinsoon committed
545
    /// allocates memory for a global cell
546
547
548
    fn alloc_global(
        &self,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
549
        id: MuID,
550
        val: P<Value>
551
    ) {
552
        let backend_ty = self.get_backend_type_info(val.ty.get_referent_ty().unwrap().id());
553
        let loc = gc::allocate_global(val, backend_ty);
qinsoon's avatar
vm.rs    
qinsoon committed
554
        trace!("allocate global #{} as {}", id, loc);
555
556
        global_locs.insert(id, loc);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
557
558

    /// declares a type
559
    pub fn declare_type(&self, entity: MuEntityHeader, ty: MuType_) -> P<MuType> {
560
        let ty = P(MuType { hdr: entity, v: ty });
561

qinsoon's avatar
vm.rs    
qinsoon committed
562
        let mut types = self.types.write().unwrap();
563
        self.declare_type_internal(&mut types, ty.id(), ty.clone());
564

qinsoon's avatar
qinsoon committed
565
566
        ty
    }
567

qinsoon's avatar
vm.rs    
qinsoon committed
568
    /// adds the type to the map (already acquired lock)
569
570
571
572
    fn declare_type_internal(
        &self,
        types: &mut RwLockWriteGuard<HashMap<MuID, P<MuType>>>,
        id: MuID,
573
        ty: P<MuType>
574
    ) {
575
576
577
        debug_assert!(!types.contains_key(&id));

        types.insert(id, ty.clone());
qinsoon's avatar
vm.rs    
qinsoon committed
578
        info!("declare type #{} = {}", id, ty);
qinsoon's avatar
qinsoon committed
579

qinsoon's avatar
vm.rs    
qinsoon committed
580
        // for struct/hybrid, also adds to struct/hybrid tag map
qinsoon's avatar
qinsoon committed
581
582
583
584
        if ty.is_struct() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let struct_map_guard = STRUCT_TAG_MAP.read().unwrap();
            let struct_inner = struct_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
585
            trace!("  {}", struct_inner);
qinsoon's avatar
qinsoon committed
586
587
588
589
        } else if ty.is_hybrid() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let hybrid_map_guard = HYBRID_TAG_MAP.read().unwrap();
            let hybrid_inner = hybrid_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
590
            trace!("  {}", hybrid_inner);
qinsoon's avatar
qinsoon committed
591
        }
592
    }
qinsoon's avatar
vm.rs    
qinsoon committed
593
594

    /// gets the type for a given Mu ID, panics if there is no type with the ID
595
596
597
598
    pub fn get_type(&self, id: MuID) -> P<MuType> {
        let type_lock = self.types.read().unwrap();
        match type_lock.get(&id) {
            Some(ret) => ret.clone(),
599
            None => panic!("cannot find type #{}", id)
600
        }
601
    }
qinsoon's avatar
vm.rs    
qinsoon committed
602
603

    /// declares a function signature
604
605
606
607
    pub fn declare_func_sig(
        &self,
        entity: MuEntityHeader,
        ret_tys: Vec<P<MuType>>,
608
        arg_tys: Vec<P<MuType>>
609
610
611
612
    ) -> P<MuFuncSig> {
        let ret = P(MuFuncSig {
            hdr: entity,
            ret_tys: ret_tys,
613
            arg_tys: arg_tys
614
        });
615
616

        let mut func_sigs = self.func_sigs.write().unwrap();
617
        self.declare_func_sig_internal(&mut func_sigs, ret.id(), ret.clone());
618

qinsoon's avatar
qinsoon committed
619
620
        ret
    }
621

qinsoon's avatar
vm.rs    
qinsoon committed
622
    /// adds a function signature to the map (already acquired lock)
623
624
625
626
    fn declare_func_sig_internal(
        &self,
        sigs: &mut RwLockWriteGuard<HashMap<MuID, P<MuFuncSig>>>,
        id: MuID,
627
        sig: P<MuFuncSig>
628
    ) {
629
630
631
632
633
        debug_assert!(!sigs.contains_key(&id));

        info!("declare func sig #{} = {}", id, sig);
        sigs.insert(id, sig);
    }
qinsoon's avatar
vm.rs    
qinsoon committed
634
635

    /// gets the function signature for a given ID, panics if there is no func sig with the ID
636
637
638
639
    pub fn get_func_sig(&self, id: MuID) -> P<MuFuncSig> {
        let func_sig_lock = self.func_sigs.read().unwrap();
        match func_sig_lock.get(&id) {
            Some(ret) => ret.clone(),
640
            None => panic!("cannot find func sig #{}", id)
641
642
        }
    }
qinsoon's avatar
vm.rs    
qinsoon committed
643
644

    /// declares a Mu function
645
    pub fn declare_func(&self, func: MuFunction) {
qinsoon's avatar
qinsoon committed
646
        let mut funcs = self.funcs.write().unwrap();
647
648
649
650

        self.declare_func_internal(&mut funcs, func.id(), func);
    }

qinsoon's avatar
vm.rs    
qinsoon committed
651
    /// adds a Mu function to the map (already acquired lock)
652
653
654
655
    fn declare_func_internal(
        &self,
        funcs: &mut RwLockWriteGuard<HashMap<MuID, RwLock<MuFunction>>>,
        id: MuID,
656
        func: MuFunction
657
    ) {
658
659
660
661
        debug_assert!(!funcs.contains_key(&id));

        info!("declare func #{} = {}", id, func);
        funcs.insert(id, RwLock::new(func));
662
    }
663

qinsoon's avatar
vm.rs    
qinsoon committed
664
665
666
667
    /// gets the function name for a function (by ID), panics if there is no function with the ID
    /// Note this name is the internal name, which is different than
    /// the client-supplied name from vm.name_of()
    pub fn get_name_for_func(&self, id: MuID) -> MuName {
668
669
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
670
            Some(func) => func.read().unwrap().name(),
671
            None => panic!("cannot find name for Mu function #{}")
672
673
        }
    }
qinsoon's avatar
vm.rs    
qinsoon committed
674
675
676

    /// gets the function signature for a function (by ID), panics if there is no function with the ID
    pub fn get_sig_for_func(&self, id: MuID) -> P<MuFuncSig> {
677
678
679
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
            Some(func) => func.read().unwrap().sig.clone(),
680
            None => panic!("cannot find Mu function #{}", id)
681
        }
qinsoon's avatar
vm.rs    
qinsoon committed
682
683
684
685
686
687
688
689
690
691
    }

    /// gets the current function version for a Mu function (by ID)
    /// returns None if the function does not exist, or no version is defined for the function
    pub fn get_cur_version_for_func(&self, fid: MuID) -> Option<MuID> {
        let funcs_guard = self.funcs.read().unwrap();
        match funcs_guard.get(&fid) {
            Some(rwlock_func) => {
                let func_guard = rwlock_func.read().unwrap();
                func_guard.cur_ver
692
            }
693
            None => None
qinsoon's avatar
vm.rs    
qinsoon committed
694
695
696
697
698
699
        }
    }

    /// gets the address as ValueLocation of a Mu function (by ID)
    pub fn get_address_for_func(&self, func_id: MuID) -> ValueLocation {
        let funcs = self.funcs.read().unwrap();
700
        let func: &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
qinsoon's avatar
vm.rs    
qinsoon committed
701
702
703
704

        if self.is_doing_jit() {
            unimplemented!()
        } else {
705
            ValueLocation::Relocatable(backend::RegGroup::GPR, func.name())
qinsoon's avatar
vm.rs    
qinsoon committed
706
707
708
709
        }
    }

    /// defines a function version
710
    pub fn define_func_version(&self, func_ver: MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
711
        info!("define function version {}", func_ver);
qinsoon's avatar
vm.rs    
qinsoon committed
712
        // add this funcver to map
qinsoon's avatar
qinsoon committed
713
        let func_ver_id = func_ver.id();
714
715
        {
            let mut func_vers = self.func_vers.write().unwrap();
qinsoon's avatar
qinsoon committed
716
            func_vers.insert(func_ver_id, RwLock::new(func_ver));
717
        }
718

719
720
        // acquire a reference to the func_ver
        let func_vers = self.func_vers.read().unwrap();
qinsoon's avatar
qinsoon committed
721
        let func_ver = func_vers.get(&func_ver_id).unwrap().write().unwrap();
722

qinsoon's avatar
vm.rs    
qinsoon committed
723
        // change current version of the function to new version (obsolete old versions)
724
        let funcs = self.funcs.read().unwrap();
qinsoon's avatar
qinsoon committed
725
        debug_assert!(funcs.contains_key(&func_ver.func_id)); // it should be declared before defining
qinsoon's avatar
qinsoon committed
726
        let mut func = funcs.get(&func_ver.func_id).unwrap().write().unwrap();
727

728
        func.new_version(func_ver.id());
qinsoon's avatar
vm.rs    
qinsoon committed
729
730
731
732
733

        if self.is_doing_jit() {
            // redefinition may happen, we need to check
            unimplemented!()
        }
734
    }
735

qinsoon's avatar
vm.rs    
qinsoon committed
736
737
738
739
    /// adds a new bundle into VM.
    /// This function will drain the contents of all arguments. Ideally, this function should
    /// happen atomically. e.g. The client should not see a new type added without also seeing
    /// a new function added.
740
741
742
743
744
745
746
747
748
    pub fn declare_many(
        &self,
        new_id_name_map: &mut HashMap<MuID, MuName>,
        new_types: &mut HashMap<MuID, P<MuType>>,
        new_func_sigs: &mut HashMap<MuID, P<MuFuncSig>>,
        new_constants: &mut HashMap<MuID, P<Value>>,
        new_globals: &mut HashMap<MuID, P<Value>>,
        new_funcs: &mut HashMap<MuID, Box<MuFunction>>,
        new_func_vers: &mut HashMap<MuID, Box<MuFunctionVersion>>,
749
        arc_vm: Arc<VM>
750
    ) {
751
752
        // Make sure other components, if ever acquiring multiple locks at the same time, acquire
        // them in this order, to prevent deadlock.
753
754
755
        {
            let mut id_name_map = self.id_name_map.write().unwrap();
            let mut name_id_map = self.name_id_map.write().unwrap();
756
757
758
759
760
761
            let mut types = self.types.write().unwrap();
            let mut constants = self.constants.write().unwrap();
            let mut globals = self.globals.write().unwrap();
            let mut func_sigs = self.func_sigs.write().unwrap();
            let mut funcs = self.funcs.write().unwrap();
            let mut func_vers = self.func_vers.write().unwrap();
762

763
764
765
766
            for (id, name) in new_id_name_map.drain() {
                id_name_map.insert(id, name.clone());
                name_id_map.insert(name, id);
            }
767

768
769
770
            for (id, obj) in new_types.drain() {
                self.declare_type_internal(&mut types, id, obj);
            }
771

772
773
774
            for (id, obj) in new_constants.drain() {
                self.declare_const_internal(&mut constants, id, obj);
            }
775

776
777
778
779
            for (id, obj) in new_globals.drain() {
                // we bulk allocate later (since we are holding all the locks, we cannot find ty info)
                self.declare_global_internal_no_alloc(&mut globals, id, obj);
            }
780

781
782
783
            for (id, obj) in new_func_sigs.drain() {
                self.declare_func_sig_internal(&mut func_sigs, id, obj);
            }
784

785
786
787
            for (id, obj) in new_funcs.drain() {
                self.declare_func_internal(&mut funcs, id, *obj);
            }
788

789
790
791
            for (id, obj) in new_func_vers.drain() {
                let func_id = obj.func_id;
                func_vers.insert(id, RwLock::new(*obj));
792

793
794
795
796
                {
                    trace!("Adding funcver {} as a version of {}...", id, func_id);
                    let func = funcs.get_mut(&func_id).unwrap();
                    func.write().unwrap().new_version(id);
797
798
799
800
801
802
                    trace!(
                        "Added funcver {} as a version of {} {:?}.",
                        id,
                        func_id,
                        func
                    );
803
                }
804
805
806
            }
        }
        // Locks released here
807
808
809
810
811
812
813

        // allocate all the globals defined
        {
            let globals = self.globals.read().unwrap();
            let mut global_locs = self.global_locations.write().unwrap();

            // make sure current thread has allocator
814
815
            let created =
                unsafe { MuThread::current_thread_as_mu_thread(Address::zero(), arc_vm.clone()) };
816
817
818
819

            for (id, global) in globals.iter() {
                self.alloc_global(&mut global_locs, *id, global.clone());
            }
820
821

            if created {
822
                unsafe { MuThread::cleanup_current_mu_thread() };
823
            }
824
        }
825
    }
qinsoon's avatar
vm.rs    
qinsoon committed
826
827

    /// informs the VM of a newly compiled function (the function and funcver should already be declared before this call)
828
    pub fn add_compiled_func(&self, func: CompiledFunction) {
qinsoon's avatar
qinsoon committed
829
        debug_assert!(self.funcs.read().unwrap().contains_key(&func.func_id));
830
831
832
833
834
835
        debug_assert!(
            self.func_vers
                .read()
                .unwrap()
                .contains_key(&func.func_ver_id)
        );
qinsoon's avatar
qinsoon committed
836

837
838
839
840
        self.compiled_funcs