GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

vm.rs 67.4 KB
Newer Older
1
// Copyright 2017 The Australian National University
2
//
3 4 5
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
6
//
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
9 10 11 12 13 14
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
use std::collections::HashMap;

17
use rodal;
qinsoon's avatar
qinsoon committed
18
use ast::ptr::*;
19
use ast::ir::*;
20
use ast::inst::*;
qinsoon's avatar
qinsoon committed
21
use ast::types;
22
use ast::types::*;
23
use compiler::{Compiler, CompilerPolicy};
qinsoon's avatar
qinsoon committed
24
use compiler::backend;
25
use compiler::backend::BackendType;
26
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
27

28
use runtime::thread::*;
29
use runtime::*;
qinsoon's avatar
qinsoon committed
30
use utils::ByteSize;
31
use utils::BitSize;
32
use utils::Address;
33
use runtime::mm as gc;
34
use vm::handle::*;
qinsoon's avatar
qinsoon committed
35 36
use vm::vm_options::VMOptions;
use vm::vm_options::MuLogLevel;
37

qinsoon's avatar
qinsoon committed
38
use log::LogLevel;
39
use std::sync::Arc;
qinsoon's avatar
qinsoon committed
40
use std::sync::RwLock;
41
use std::sync::Mutex;
42
use std::sync::RwLockWriteGuard;
43
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
44 45
use std::thread::JoinHandle;
use std::collections::LinkedList;
46 47
use std;
use utils::bit_utils::{bits_ones, u64_asr};
qinsoon's avatar
qinsoon committed
48

qinsoon's avatar
qinsoon committed
49 50 51
/// The VM struct. This stores metadata for the currently running Zebu instance.
/// This struct gets persisted in the boot image, and when the boot image is loaded,
/// everything should be back to the same status as before persisting.
qinsoon's avatar
qinsoon committed
52
///
qinsoon's avatar
qinsoon committed
53 54 55
/// This struct is usually used as Arc<VM> so it can be shared among threads. The
/// Arc<VM> is stored in every thread local of a Mu thread, so that they can refer
/// to the VM easily.
qinsoon's avatar
qinsoon committed
56
///
qinsoon's avatar
qinsoon committed
57 58 59 60
/// We are using fine-grained lock on VM to allow mutability on different fields in VM.
/// Also we use two-level locks for some data structures such as MuFunction/
/// MuFunctionVersion/CompiledFunction so that we can mutate on two
/// different functions/funcvers/etc at the same time.
qinsoon's avatar
qinsoon committed
61 62 63 64

//  FIXME: However, there are problems with fine-grained lock design,
//  and we will need to rethink. See Issue #2.
//  TODO: besides fields in VM, there are some 'globals' we need to persist
qinsoon's avatar
qinsoon committed
65 66 67 68
//  such as STRUCT_TAG_MAP, INTERNAL_ID and internal types from ir crate. The point is
//  ir crate should be independent and self-contained. But when persisting the 'world',
//  besides persisting VM struct (containing most of the 'world'), we also need to
//  specifically persist those globals.
69 70
pub struct VM {
    // The comments are the offset into the struct
71
    // ---serialize---
qinsoon's avatar
qinsoon committed
72
    /// next MuID to assign
73
    next_id: AtomicUsize, // +0
qinsoon's avatar
qinsoon committed
74
    /// a map from MuID to MuName (for client to query)
75
    id_name_map: RwLock<HashMap<MuID, MuName>>, // +8
qinsoon's avatar
qinsoon committed
76
    /// a map from MuName to ID (for client to query)
77
    name_id_map: RwLock<HashMap<MuName, MuID>>, // +64
qinsoon's avatar
qinsoon committed
78
    /// types declared to the VM
79
    types: RwLock<HashMap<MuID, P<MuType>>>, // +120
80 81
    /// Ref types declared by 'make_strong_type', the key is the ID of the Referant
    ref_types: RwLock<HashMap<MuID, P<MuType>>>,
qinsoon's avatar
qinsoon committed
82
    /// types that are resolved as BackendType
83
    backend_type_info: RwLock<HashMap<MuID, Box<BackendType>>>,
qinsoon's avatar
qinsoon committed
84
    /// constants declared to the VM
85
    constants: RwLock<HashMap<MuID, P<Value>>>,
qinsoon's avatar
qinsoon committed
86
    /// globals declared to the VM
87
    globals: RwLock<HashMap<MuID, P<Value>>>,
qinsoon's avatar
qinsoon committed
88
    /// function signatures declared
89
    func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>,
qinsoon's avatar
qinsoon committed
90
    /// functions declared to the VM
91
    funcs: RwLock<HashMap<MuID, RwLock<MuFunction>>>,
qinsoon's avatar
qinsoon committed
92
    /// primordial function that is set to make boot image
93
    primordial: RwLock<Option<PrimordialThreadInfo>>,
94

qinsoon's avatar
qinsoon committed
95
    /// current options for this VM
96
    pub vm_options: VMOptions, // +624
97

98
    // ---partially serialize---
qinsoon's avatar
qinsoon committed
99 100
    /// compiled functions
    /// (we are not persisting generated code with compiled function)
101
    compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
102

103 104
    /// match each functions version to a map, mapping each of it's containing callsites
    /// to the name of the catch block
105
    callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
106

107
    // ---do not serialize---
qinsoon's avatar
qinsoon committed
108 109 110
    /// global cell locations. We use this map to create handles for global cells,
    /// or dump globals into boot image. (this map does not get persisted because
    /// the location is changed in different runs)
111
    global_locations: RwLock<HashMap<MuID, ValueLocation>>,
112
    func_vers: RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>>,
113

qinsoon's avatar
qinsoon committed
114 115 116 117
    /// all the funcref that clients want to store for AOT which are pending stores
    /// For AOT scenario, when client tries to store funcref to the heap, the store
    /// happens before we have an actual address for the function so we store a fake
    /// funcref and when generating boot image, we fix the funcref with a relocatable symbol
118 119
    aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,

120 121
    /// runtime callsite table for exception handling
    /// a map from callsite address to CompiledCallsite
122
    compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>>, // 896
123 124

    /// Nnmber of callsites in the callsite tables
125 126
    callsite_count: AtomicUsize,
    pub pending_joins: Mutex<LinkedList<JoinHandle<()>>> // A list of all threads currently waiting to be joined
127
}
qinsoon's avatar
qinsoon committed
128

129
unsafe impl rodal::Dump for VM {
130
    fn dump<D: ?Sized + rodal::Dumper>(&self, dumper: &mut D) {
131 132 133 134 135 136
        dumper.debug_record("VM", "dump");

        dumper.dump_object(&self.next_id);
        dumper.dump_object(&self.id_name_map);
        dumper.dump_object(&self.name_id_map);
        dumper.dump_object(&self.types);
137
        dumper.dump_object(&self.ref_types);
138 139 140 141 142 143 144 145
        dumper.dump_object(&self.backend_type_info);
        dumper.dump_object(&self.constants);
        dumper.dump_object(&self.globals);
        dumper.dump_object(&self.func_sigs);
        dumper.dump_object(&self.funcs);
        dumper.dump_object(&self.primordial);
        dumper.dump_object(&self.vm_options);
        dumper.dump_object(&self.compiled_funcs);
146
        dumper.dump_object(&self.callsite_table);
147

148 149
        // Dump empty maps so that we can safely read and modify them once loaded
        dumper.dump_padding(&self.global_locations);
150
        dumper.dump_object_here(&RwLock::new(
151
            rodal::EmptyHashMap::<MuID, ValueLocation>::new()
152
        ));
153 154

        dumper.dump_padding(&self.func_vers);
155
        dumper.dump_object_here(&RwLock::new(
156
            rodal::EmptyHashMap::<MuID, RwLock<MuFunctionVersion>>::new()
157
        ));
158 159

        dumper.dump_padding(&self.aot_pending_funcref_store);
160
        dumper.dump_object_here(&RwLock::new(
161
            rodal::EmptyHashMap::<Address, ValueLocation>::new()
162
        ));
163 164

        // Dump an emepty hashmap for the other hashmaps
165
        dumper.dump_padding(&self.compiled_callsite_table);
166
        dumper.dump_object_here(&RwLock::new(
167
            rodal::EmptyHashMap::<Address, CompiledCallsite>::new()
168
        ));
169
        dumper.dump_object(&self.callsite_count);
170 171 172 173 174

        dumper.dump_padding(&self.pending_joins);
        dumper.dump_object_here(&Mutex::new(
            rodal::EmptyLinkedList::<JoinHandle<()>>::new()
        ));
175
    }
176
}
177

qinsoon's avatar
qinsoon committed
178 179 180 181
/// a fake funcref to store for AOT when client tries to store a funcref via API
//  For AOT scenario, when client tries to store funcref to the heap, the store
//  happens before we have an actual address for the function so we store a fake
//  funcref and when generating boot image, we fix the funcref with a relocatable symbol
182
const PENDING_FUNCREF: u64 = {
qinsoon's avatar
qinsoon committed
183 184 185
    use std::u64;
    u64::MAX
};
186

qinsoon's avatar
qinsoon committed
187
/// a macro to generate int8/16/32/64 from/to API calls
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
macro_rules! gen_handle_int {
    ($fn_from: ident, $fn_to: ident, $int_ty: ty) => {
        pub fn $fn_from (&self, num: $int_ty, len: BitSize) -> APIHandleResult {
            let handle_id = self.next_id();
            self.new_handle (APIHandle {
                id: handle_id,
                v: APIHandleValue::Int(num as u64, len)
            })
        }

        pub fn $fn_to (&self, handle: APIHandleArg) -> $int_ty {
            handle.v.as_int() as $int_ty
        }
    }
}

204
impl<'a> VM {
qinsoon's avatar
qinsoon committed
205
    /// creates a VM with default options
qinsoon's avatar
qinsoon committed
206
    pub fn new() -> VM {
qinsoon's avatar
qinsoon committed
207 208 209
        VM::new_internal(VMOptions::default())
    }

qinsoon's avatar
qinsoon committed
210
    /// creates a VM with specified options
qinsoon's avatar
qinsoon committed
211 212 213 214
    pub fn new_with_opts(str: &str) -> VM {
        VM::new_internal(VMOptions::init(str))
    }

qinsoon's avatar
qinsoon committed
215
    /// internal function to create a VM with options
216
    #[cfg(not(feature = "sel4-rumprun"))]
qinsoon's avatar
qinsoon committed
217 218
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
qinsoon's avatar
qinsoon committed
219

qinsoon's avatar
qinsoon committed
220
        let ret = VM {
221
            next_id: ATOMIC_USIZE_INIT,
qinsoon's avatar
qinsoon committed
222
            vm_options: options,
qinsoon's avatar
qinsoon committed
223 224
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
225 226
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
227
            ref_types: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
228
            backend_type_info: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
229
            globals: RwLock::new(HashMap::new()),
230
            global_locations: RwLock::new(hashmap!{}),
qinsoon's avatar
qinsoon committed
231
            func_sigs: RwLock::new(HashMap::new()),
232
            func_vers: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
233
            funcs: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
234
            compiled_funcs: RwLock::new(HashMap::new()),
235
            callsite_table: RwLock::new(HashMap::new()),
236
            primordial: RwLock::new(None),
237
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
238
            compiled_callsite_table: RwLock::new(HashMap::new()),
239 240
            callsite_count: ATOMIC_USIZE_INIT,
            pending_joins: Mutex::new(LinkedList::new()),
241
        };
qinsoon's avatar
qinsoon committed
242

243
        // insert all internal types
qinsoon's avatar
qinsoon committed
244 245 246 247 248 249
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
qinsoon's avatar
qinsoon committed
250

qinsoon's avatar
qinsoon committed
251
        // starts allocating ID from USER_ID_START
252
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
qinsoon's avatar
qinsoon committed
253

254 255 256
        // init types
        types::init_types();

qinsoon's avatar
qinsoon committed
257
        // init runtime
258
        ret.init_runtime();
qinsoon's avatar
qinsoon committed
259

260 261
        ret
    }
262

263 264 265 266 267
    /// internal function to create a VM with options for sel4-rumprun
    /// default memory sizes are different from other platforms
    #[cfg(feature = "sel4-rumprun")]
    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
268

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
        let mut ret = VM {
            next_id: ATOMIC_USIZE_INIT,
            vm_options: options,
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
            constants: RwLock::new(HashMap::new()),
            types: RwLock::new(HashMap::new()),
            backend_type_info: RwLock::new(HashMap::new()),
            globals: RwLock::new(HashMap::new()),
            global_locations: RwLock::new(hashmap!{}),
            func_sigs: RwLock::new(HashMap::new()),
            func_vers: RwLock::new(HashMap::new()),
            funcs: RwLock::new(HashMap::new()),
            compiled_funcs: RwLock::new(HashMap::new()),
            callsite_table: RwLock::new(HashMap::new()),
            primordial: RwLock::new(None),
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
            compiled_callsite_table: RwLock::new(HashMap::new()),
            callsite_count: ATOMIC_USIZE_INIT
        };
289

290 291
        // currently, the default sizes don't work on sel4-rumprun platform
        // this is due to memory allocation size limitations
292 293 294
        ret.vm_options.flag_gc_immixspace_size = 1 << 19;
        ret.vm_options.flag_gc_lospace_size = 1 << 19;

295 296 297 298 299 300 301
        // insert all internal types
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
302

303 304
        // starts allocating ID from USER_ID_START
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
305

306 307
        // init types
        types::init_types();
308

309 310
        // init runtime
        ret.init_runtime();
311

312 313
        ret
    }
qinsoon's avatar
qinsoon committed
314

qinsoon's avatar
qinsoon committed
315
    /// initializes runtime
316
    fn init_runtime(&self) {
qinsoon's avatar
qinsoon committed
317 318 319
        // init gc
        {
            let ref options = self.vm_options;
320 321 322 323
            gc::gc_init(
                options.flag_gc_immixspace_size,
                options.flag_gc_lospace_size,
                options.flag_gc_nthreads,
324
                !options.flag_gc_disable_collection
325
            );
qinsoon's avatar
qinsoon committed
326 327 328
        }
    }

qinsoon's avatar
qinsoon committed
329
    /// starts logging based on MuLogLevel flag
qinsoon's avatar
qinsoon committed
330
    fn start_logging(level: MuLogLevel) {
331
        use std::env;
qinsoon's avatar
qinsoon committed
332
        match level {
333
            MuLogLevel::None => {}
qinsoon's avatar
qinsoon committed
334
            MuLogLevel::Error => VM::start_logging_internal(LogLevel::Error),
335 336
            MuLogLevel::Warn => VM::start_logging_internal(LogLevel::Warn),
            MuLogLevel::Info => VM::start_logging_internal(LogLevel::Info),
qinsoon's avatar
qinsoon committed
337 338
            MuLogLevel::Debug => VM::start_logging_internal(LogLevel::Debug),
            MuLogLevel::Trace => VM::start_logging_internal(LogLevel::Trace),
339 340 341 342 343
            MuLogLevel::Env => {
                match env::var("MU_LOG_LEVEL") {
                    Ok(s) => VM::start_logging(MuLogLevel::from_string(s)),
                    _ => {} // Don't log
                }
344
            }
qinsoon's avatar
qinsoon committed
345
        }
qinsoon's avatar
qinsoon committed
346 347
    }

qinsoon's avatar
qinsoon committed
348
    /// starts trace-level logging
qinsoon's avatar
qinsoon committed
349
    pub fn start_logging_trace() {
qinsoon's avatar
qinsoon committed
350 351
        VM::start_logging_internal(LogLevel::Trace)
    }
qinsoon's avatar
qinsoon committed
352 353

    /// starts logging based on MU_LOG_LEVEL environment variable
354 355 356
    pub fn start_logging_env() {
        VM::start_logging(MuLogLevel::Env)
    }
qinsoon's avatar
qinsoon committed
357

qinsoon's avatar
qinsoon committed
358 359
    /// starts logging based on Rust's LogLevel
    /// (this function actually initializes logger and deals with error)
qinsoon's avatar
qinsoon committed
360
    fn start_logging_internal(level: LogLevel) {
361 362 363 364
        use stderrlog;

        let verbose = match level {
            LogLevel::Error => 0,
365 366
            LogLevel::Warn => 1,
            LogLevel::Info => 2,
367
            LogLevel::Debug => 3,
368
            LogLevel::Trace => 4
369
        };
qinsoon's avatar
qinsoon committed
370

371
        match stderrlog::new().verbosity(verbose).init() {
372
            Ok(()) => { info!("logger initialized") }
373 374 375 376 377 378
            Err(e) => {
                error!(
                    "failed to init logger, probably already initialized: {:?}",
                    e
                )
            }
qinsoon's avatar
qinsoon committed
379 380
        }
    }
381

qinsoon's avatar
qinsoon committed
382 383
    /// adds an exception callsite and catch block
    /// (later we will use this info to build an exception table for unwinding use)
384 385
    pub fn add_exception_callsite(&self, callsite: Callsite, fv: MuID) {
        let mut table = self.callsite_table.write().unwrap();
386 387

        if table.contains_key(&fv) {
388
            table.get_mut(&fv).unwrap().push(callsite);
389
        } else {
390
            table.insert(fv, vec![callsite]);
391
        };
392 393
        // TODO: do wee need a stronger ordering??
        self.callsite_count.fetch_add(1, Ordering::Relaxed);
394 395
    }

qinsoon's avatar
qinsoon committed
396 397
    /// resumes persisted VM. Ideally the VM should be back to the status when we start
    /// persisting it except a few fields that we do not want to persist.
398
    pub fn resume_vm(dumped_vm: *mut Arc<VM>) -> Arc<VM> {
qinsoon's avatar
qinsoon committed
399
        // load the vm back
400
        let vm = unsafe { rodal::load_asm_pointer_move(dumped_vm) };
qinsoon's avatar
qinsoon committed
401 402

        // initialize runtime
403
        vm.init_runtime();
404

qinsoon's avatar
qinsoon committed
405
        // construct exception table
406
        vm.build_callsite_table();
qinsoon's avatar
qinsoon committed
407

408 409 410
        // restore gc types
        {
            let type_info_guard = vm.backend_type_info.read().unwrap();
411 412
            let mut type_info_vec: Vec<Box<BackendType>> =
                type_info_guard.values().map(|x| x.clone()).collect();
413 414 415 416 417 418 419 420 421 422 423 424 425 426
            type_info_vec.sort_by(|a, b| a.gc_type.id.cmp(&b.gc_type.id));

            let mut expect_id = 0;
            for ty_info in type_info_vec.iter() {
                use runtime::mm;

                let ref gc_type = ty_info.gc_type;

                if gc_type.id != expect_id {
                    debug_assert!(expect_id < gc_type.id);

                    while expect_id < gc_type.id {
                        use runtime::mm::common::gctype::GCType;

qinsoon's avatar
qinsoon committed
427
                        mm::add_gc_type(GCType::new_noreftype(0, 8));
428 429 430 431 432 433 434 435 436 437 438
                        expect_id += 1;
                    }
                }

                // now expect_id == gc_type.id
                debug_assert!(expect_id == gc_type.id);

                mm::add_gc_type(gc_type.as_ref().clone());
                expect_id += 1;
            }
        }
439
        // construct exception table
440
        vm.build_callsite_table();
441 442
        vm
    }
443

qinsoon's avatar
qinsoon committed
444 445 446 447 448
    /// builds a succinct exception table for fast query during exception unwinding
    /// We need this step because for AOT compilation, we do not know symbol address at compile,
    /// and resolving symbol address during exception handling is expensive. Thus when boot image
    /// gets executed, we first resolve symbols and store the results in another table for fast
    /// query.
449 450
    pub fn build_callsite_table(&self) {
        let callsite_table = self.callsite_table.read().unwrap();
451
        let compiled_funcs = self.compiled_funcs.read().unwrap();
452
        let mut compiled_callsite_table = self.compiled_callsite_table.write().unwrap();
453 454
        // TODO: Use a different ordering?
        compiled_callsite_table.reserve(self.callsite_count.load(Ordering::Relaxed));
455 456 457 458
        for (fv, callsite_list) in callsite_table.iter() {
            let compiled_func = compiled_funcs.get(fv).unwrap().read().unwrap();
            let callee_saved_table = Arc::new(compiled_func.frame.callee_saved.clone());
            for callsite in callsite_list.iter() {
459 460 461 462 463
                compiled_callsite_table.insert(
                    resolve_symbol(callsite.name.clone()),
                    CompiledCallsite::new(
                        &callsite,
                        compiled_func.func_ver_id,
464 465
                        callee_saved_table.clone()
                    )
466
                );
467 468
            }
        }
469
    }
qinsoon's avatar
qinsoon committed
470 471

    /// returns a valid ID for use next
472
    pub fn next_id(&self) -> MuID {
473 474 475
        // This only needs to be atomic, and does not need to be a synchronisation operation. The
        // only requirement for IDs is that all IDs obtained from `next_id()` are different. So
        // `Ordering::Relaxed` is sufficient.
476
        self.next_id.fetch_add(1, Ordering::Relaxed)
477
    }
qinsoon's avatar
qinsoon committed
478

479 480
    /// are we doing AOT compilation? (feature = aot when building Zebu)
    pub fn is_doing_aot(&self) -> bool {
481
        return cfg!(feature = "aot");
482
    }
483 484 485

    /// are we doing JIT compilation? (feature = jit when building Zebu)
    pub fn is_doing_jit(&self) -> bool {
486
        return cfg!(feature = "jit");
487
    }
488 489

    /// informs VM about a client-supplied name
490
    pub fn set_name(&self, entity: &MuEntity) {
qinsoon's avatar
qinsoon committed
491
        let id = entity.id();
492
        let name = entity.name();
493

qinsoon's avatar
qinsoon committed
494
        let mut map = self.id_name_map.write().unwrap();
495
        map.insert(id, name.clone());
496

qinsoon's avatar
qinsoon committed
497 498 499
        let mut map2 = self.name_id_map.write().unwrap();
        map2.insert(name, id);
    }
qinsoon's avatar
qinsoon committed
500 501 502 503 504

    /// returns Mu ID for a client-supplied name
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
    pub fn id_of(&self, name: &str) -> MuID {
qinsoon's avatar
qinsoon committed
505
        let map = self.name_id_map.read().unwrap();
506 507
        match map.get(name) {
            Some(id) => *id,
508
            None => panic!("cannot find id for name: {}", name)
509
        }
510
    }
511

qinsoon's avatar
qinsoon committed
512 513 514
    /// returns the client-supplied Mu name for Mu ID
    /// This function should only used by client, 'name' used internally may be slightly different
    /// due to removal of some special symbols in the MuName. See name_check() in ir.rs
qinsoon's avatar
qinsoon committed
515 516
    pub fn name_of(&self, id: MuID) -> MuName {
        let map = self.id_name_map.read().unwrap();
517
        map.get(&id).unwrap().clone()
qinsoon's avatar
qinsoon committed
518
    }
qinsoon's avatar
qinsoon committed
519 520

    /// declares a constant
521
    pub fn declare_const(&self, entity: MuEntityHeader, ty: P<MuType>, val: Constant) -> P<Value> {
522 523 524
        let ret = P(Value {
            hdr: entity,
            ty: ty,
525
            v: Value_::Constant(val)
526
        });
527

qinsoon's avatar
qinsoon committed
528
        let mut constants = self.constants.write().unwrap();
529
        self.declare_const_internal(&mut constants, ret.id(), ret.clone());
530

531 532
        ret
    }
533

qinsoon's avatar
qinsoon committed
534
    /// adds a constant to the map (already acquired lock)
535 536 537 538
    fn declare_const_internal(
        &self,
        map: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        id: MuID,
539
        val: P<Value>
540
    ) {
541 542
        debug_assert!(!map.contains_key(&id));

qinsoon's avatar
qinsoon committed
543
        info!("declare const #{} = {}", id, val);
544 545
        map.insert(id, val);
    }
qinsoon's avatar
qinsoon committed
546 547

    /// gets the constant P<Value> for a given Mu ID, panics if there is no type with the ID
548 549 550 551
    pub fn get_const(&self, id: MuID) -> P<Value> {
        let const_lock = self.constants.read().unwrap();
        match const_lock.get(&id) {
            Some(ret) => ret.clone(),
552
            None => panic!("cannot find const #{}", id)
553 554
        }
    }
555

qinsoon's avatar
qinsoon committed
556 557
    /// allocates memory for a constant that needs to be put in memory
    /// For AOT, we simply create a label for it, and let code emitter allocate the memory
558 559 560
    #[cfg(feature = "aot")]
    pub fn allocate_const(&self, val: P<Value>) -> ValueLocation {
        let id = val.id();
561
        let name = format!("CONST_{}_{}", id, val.name());
562 563 564

        ValueLocation::Relocatable(backend::RegGroup::GPR, name)
    }
qinsoon's avatar
qinsoon committed
565 566

    /// declares a global
567
    pub fn declare_global(&self, entity: MuEntityHeader, ty: P<MuType>) -> P<Value> {
qinsoon's avatar
qinsoon committed
568
        // create iref value for the global
569
        let global = P(Value {
570
            hdr: entity,
571 572
            ty: self.declare_type(
                MuEntityHeader::unnamed(self.next_id()),
573
                MuType_::iref(ty.clone())
574
            ),
575
            v: Value_::Global(ty)
qinsoon's avatar
qinsoon committed
576
        });
qinsoon's avatar
qinsoon committed
577

qinsoon's avatar
qinsoon committed
578
        let mut globals = self.globals.write().unwrap();
579
        let mut global_locs = self.global_locations.write().unwrap();
580
        self.declare_global_internal(&mut globals, &mut global_locs, global.id(), global.clone());
581

qinsoon's avatar
qinsoon committed
582
        global
qinsoon's avatar
qinsoon committed
583
    }
584

qinsoon's avatar
qinsoon committed
585
    /// adds the global to the map (already acquired lock), and allocates memory for it
586 587 588 589
    fn declare_global_internal(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
590
        id: MuID,
591
        val: P<Value>
592 593 594 595 596
    ) {
        self.declare_global_internal_no_alloc(globals, id, val.clone());
        self.alloc_global(global_locs, id, val);
    }

qinsoon's avatar
qinsoon committed
597
    /// adds the global to the map (already acquired lock)
qinsoon's avatar
qinsoon committed
598 599
    /// when bulk declaring, we hold locks for everything, we cannot resolve backend type
    /// and do alloc so we add globals to the map, and then allocate them later
600 601 602
    fn declare_global_internal_no_alloc(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
603
        id: MuID,
604
        val: P<Value>
605 606 607 608 609 610 611
    ) {
        debug_assert!(!globals.contains_key(&id));

        info!("declare global #{} = {}", id, val);
        globals.insert(id, val.clone());
    }

qinsoon's avatar
qinsoon committed
612
    /// allocates memory for a global cell
613 614 615
    fn alloc_global(
        &self,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
616
        id: MuID,
617
        val: P<Value>
618
    ) {
619
        let backend_ty = self.get_backend_type_info(val.ty.get_referent_ty().unwrap().id());
620
        let loc = gc::allocate_global(val, backend_ty);
qinsoon's avatar
qinsoon committed
621
        trace!("allocate global #{} as {}", id, loc);
622 623
        global_locs.insert(id, loc);
    }
qinsoon's avatar
qinsoon committed
624 625

    /// declares a type
626
    pub fn declare_type(&self, entity: MuEntityHeader, ty: MuType_) -> P<MuType> {
627
        let ty = P(MuType { hdr: entity, v: ty });
628

qinsoon's avatar
qinsoon committed
629
        let mut types = self.types.write().unwrap();
630
        self.declare_type_internal(&mut types, ty.id(), ty.clone());
631

632 633
        ty
    }
634

qinsoon's avatar
qinsoon committed
635
    /// adds the type to the map (already acquired lock)
636 637 638 639
    fn declare_type_internal(
        &self,
        types: &mut RwLockWriteGuard<HashMap<MuID, P<MuType>>>,
        id: MuID,
640
        ty: P<MuType>
641
    ) {
642 643 644
        debug_assert!(!types.contains_key(&id));

        types.insert(id, ty.clone());
qinsoon's avatar
qinsoon committed
645
        info!("declare type #{} = {}", id, ty);
qinsoon's avatar
qinsoon committed
646

qinsoon's avatar
qinsoon committed
647
        // for struct/hybrid, also adds to struct/hybrid tag map
qinsoon's avatar
qinsoon committed
648 649 650 651
        if ty.is_struct() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let struct_map_guard = STRUCT_TAG_MAP.read().unwrap();
            let struct_inner = struct_map_guard.get(&tag).unwrap();
652
            trace!("  {}", struct_inner);
qinsoon's avatar
qinsoon committed
653 654 655 656
        } else if ty.is_hybrid() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let hybrid_map_guard = HYBRID_TAG_MAP.read().unwrap();
            let hybrid_inner = hybrid_map_guard.get(&tag).unwrap();
657
            trace!("  {}", hybrid_inner);
qinsoon's avatar
qinsoon committed
658
        }
659
    }
qinsoon's avatar
qinsoon committed
660 661

    /// gets the type for a given Mu ID, panics if there is no type with the ID
662 663 664 665
    pub fn get_type(&self, id: MuID) -> P<MuType> {
        let type_lock = self.types.read().unwrap();
        match type_lock.get(&id) {
            Some(ret) => ret.clone(),
666
            None => panic!("cannot find type #{}", id)
667
        }
668
    }
qinsoon's avatar
qinsoon committed
669 670

    /// declares a function signature
671 672 673 674
    pub fn declare_func_sig(
        &self,
        entity: MuEntityHeader,
        ret_tys: Vec<P<MuType>>,
675
        arg_tys: Vec<P<MuType>>
676 677 678 679
    ) -> P<MuFuncSig> {
        let ret = P(MuFuncSig {
            hdr: entity,
            ret_tys: ret_tys,
680
            arg_tys: arg_tys
681
        });
682 683

        let mut func_sigs = self.func_sigs.write().unwrap();
684
        self.declare_func_sig_internal(&mut func_sigs, ret.id(), ret.clone());
685

686 687
        ret
    }
688

qinsoon's avatar
qinsoon committed
689
    /// adds a function signature to the map (already acquired lock)
690 691 692 693
    fn declare_func_sig_internal(
        &self,
        sigs: &mut RwLockWriteGuard<HashMap<MuID, P<MuFuncSig>>>,
        id: MuID,
694
        sig: P<MuFuncSig>
695
    ) {
696 697 698 699 700
        debug_assert!(!sigs.contains_key(&id));

        info!("declare func sig #{} = {}", id, sig);
        sigs.insert(id, sig);
    }
qinsoon's avatar
qinsoon committed
701 702

    /// gets the function signature for a given ID, panics if there is no func sig with the ID
703 704 705 706
    pub fn get_func_sig(&self, id: MuID) -> P<MuFuncSig> {
        let func_sig_lock = self.func_sigs.read().unwrap();
        match func_sig_lock.get(&id) {
            Some(ret) => ret.clone(),
707
            None => panic!("cannot find func sig #{}", id)
708 709
        }
    }
qinsoon's avatar
qinsoon committed
710 711

    /// declares a Mu function
712
    pub fn declare_func(&self, func: MuFunction) {
qinsoon's avatar
qinsoon committed
713
        let mut funcs = self.funcs.write().unwrap();
714 715 716 717

        self.declare_func_internal(&mut funcs, func.id(), func);
    }

qinsoon's avatar
qinsoon committed
718
    /// adds a Mu function to the map (already acquired lock)
719 720 721 722
    fn declare_func_internal(
        &self,
        funcs: &mut RwLockWriteGuard<HashMap<MuID, RwLock<MuFunction>>>,
        id: MuID,
723
        func: MuFunction
724
    ) {
725 726 727 728
        debug_assert!(!funcs.contains_key(&id));

        info!("declare func #{} = {}", id, func);
        funcs.insert(id, RwLock::new(func));
729
    }
730

qinsoon's avatar
qinsoon committed
731 732 733 734
    /// gets the function name for a function (by ID), panics if there is no function with the ID
    /// Note this name is the internal name, which is different than
    /// the client-supplied name from vm.name_of()
    pub fn get_name_for_func(&self, id: MuID) -> MuName {
735 736
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
737
            Some(func) => func.read().unwrap().name(),
738
            None => panic!("cannot find name for Mu function #{}")
739 740
        }
    }
qinsoon's avatar
qinsoon committed
741

qinsoon's avatar
qinsoon committed
742 743
    /// gets the function signature for a function (by ID),
    /// panics if there is no function with the ID
qinsoon's avatar
qinsoon committed
744
    pub fn get_sig_for_func(&self, id: MuID) -> P<MuFuncSig> {
745 746 747
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
            Some(func) => func.read().unwrap().sig.clone(),
748
            None => panic!("cannot find Mu function #{}", id)
749
        }
qinsoon's avatar
qinsoon committed
750 751 752 753 754 755 756 757 758 759
    }

    /// gets the current function version for a Mu function (by ID)
    /// returns None if the function does not exist, or no version is defined for the function
    pub fn get_cur_version_for_func(&self, fid: MuID) -> Option<MuID> {
        let funcs_guard = self.funcs.read().unwrap();
        match funcs_guard.get(&fid) {
            Some(rwlock_func) => {
                let func_guard = rwlock_func.read().unwrap();
                func_guard.cur_ver
760
            }
761
            None => None
qinsoon's avatar
qinsoon committed
762 763 764 765 766 767
        }
    }

    /// gets the address as ValueLocation of a Mu function (by ID)
    pub fn get_address_for_func(&self, func_id: MuID) -> ValueLocation {
        let funcs = self.funcs.read().unwrap();
768
        let func: &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
qinsoon's avatar
qinsoon committed
769 770 771 772

        if self.is_doing_jit() {
            unimplemented!()
        } else {
773
            ValueLocation::Relocatable(backend::RegGroup::GPR, func.name())
qinsoon's avatar
qinsoon committed
774 775 776 777
        }
    }

    /// defines a function version
778
    pub fn define_func_version(&self, func_ver: MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
779
        info!("define function version {}", func_ver);
qinsoon's avatar
qinsoon committed
780
        // add this funcver to map
qinsoon's avatar
qinsoon committed
781
        let func_ver_id = func_ver.id();
782 783
        {
            let mut func_vers = self.func_vers.write().unwrap();
qinsoon's avatar
qinsoon committed
784
            func_vers.insert(func_ver_id, RwLock::new(func_ver));
785
        }
786

787 788
        // acquire a reference to the func_ver
        let func_vers = self.func_vers.read().unwrap();
qinsoon's avatar
qinsoon committed
789
        let func_ver = func_vers.get(&func_ver_id).unwrap().write().unwrap();
790

qinsoon's avatar
qinsoon committed
791
        // change current version of the function to new version (obsolete old versions)
792
        let funcs = self.funcs.read().unwrap();
qinsoon's avatar
qinsoon committed
793 794
        // it should be declared before defining
        debug_assert!(funcs.contains_key(&func_ver.func_id));
qinsoon's avatar
qinsoon committed
795
        let mut func = funcs.get(&func_ver.func_id).unwrap().write().unwrap();
796

797
        func.new_version(func_ver.id());
qinsoon's avatar
qinsoon committed
798 799 800 801 802

        if self.is_doing_jit() {
            // redefinition may happen, we need to check
            unimplemented!()
        }
803
    }
804

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
    pub fn make_strong_type(&self, ty: P<MuType>) -> P<MuType> {
        match &ty.v {
            &MuType_::WeakRef(ref t) => {
                let res = self.ref_types
                    .read()
                    .unwrap()
                    .get(&t.id())
                    .map(|x| x.clone());
                match res {
                    Some(ty) => ty,
                    None => {
                        let ty = P(MuType::new(self.next_id(), MuType_::muref(t.clone())));
                        self.ref_types.write().unwrap().insert(t.id(), ty.clone());
                        ty
                    }
                }
            }
            _ => ty.clone()
        }
    }

qinsoon's avatar
qinsoon committed
826 827 828 829
    /// adds a new bundle into VM.
    /// This function will drain the contents of all arguments. Ideally, this function should
    /// happen atomically. e.g. The client should not see a new type added without also seeing
    /// a new function added.
830 831 832 833 834 835 836 837 838
    pub fn declare_many(
        &self,
        new_id_name_map: &mut HashMap<MuID, MuName>,
        new_types: &mut HashMap<MuID, P<MuType>>,
        new_func_sigs: &mut HashMap<MuID, P<MuFuncSig>>,
        new_constants: &mut HashMap<MuID, P<Value>>,
        new_globals: &mut HashMap<MuID, P<Value>>,
        new_funcs: &mut HashMap<MuID, Box<MuFunction>>,
        new_func_vers: &mut HashMap<MuID, Box<MuFunctionVersion>>,
839
        arc_vm: Arc<VM>
840
    ) {
841 842
        // Make sure other components, if ever acquiring multiple locks at the same time, acquire
        // them in this order, to prevent deadlock.
843 844 845
        {
            let mut id_name_map = self.id_name_map.write().unwrap();
            let mut name_id_map = self.name_id_map.write().unwrap();
846 847 848 849 850 851
            let mut types = self.types.write().unwrap();
            let mut constants = self.constants.write().unwrap();
            let mut globals = self.globals.write().unwrap();
            let mut func_sigs = self.func_sigs.write().unwrap();
            let mut funcs = self.funcs.write().unwrap();
            let mut func_vers = self.func_vers.write().unwrap();
852

853 854 855 856
            for (id, name) in new_id_name_map.drain() {
                id_name_map.insert(id, name.clone());
                name_id_map.insert(name, id);
            }
857

858 859 860
            for (id, obj) in new_types.drain() {
                self.declare_type_internal(&mut types, id, obj);
            }
861

862 863 864
            for (id, obj) in new_constants.drain() {
                self.declare_const_internal(&mut constants, id, obj);
            }
865

866
            for (id, obj) in new_globals.drain() {
qinsoon's avatar
qinsoon committed
867 868
                // we bulk allocate later
                // (since we are holding all the locks, we cannot find ty info)
869 870
                self.declare_global_internal_no_alloc(&mut globals, id, obj);
            }
871

872 873 874
            for (id, obj) in new_func_sigs.drain() {
                self.declare_func_sig_internal(&mut func_sigs, id, obj);
            }
875

876 877 878
            for (id, obj) in new_funcs.drain() {
                self.declare_func_internal(&mut funcs, id, *obj);
            }
879

880 881 882
            for (id, obj) in new_func_vers.drain() {
                let func_id = obj.func_id;
                func_vers.insert(id, RwLock::new(*obj));
883

884 885 886 887
                {
                    trace!("Adding funcver {} as a version of {}...", id, func_id);
                    let func = funcs.get_mut(&func_id).unwrap();
                    func.write().unwrap().new_version(id);
888 889 890 891 892 893
                    trace!(
                        "Added funcver {} as a version of {} {:?}.",
                        id,
                        func_id,
                        func
                    );
894
                }
895 896 897
            }
        }
        // Locks released here
898 899 900 901 902 903 904

        // allocate all the globals defined
        {
            let globals = self.globals.read().unwrap();
            let mut global_locs = self.global_locations.write().unwrap();

            // make sure current thread has allocator
905 906
            let created =
                unsafe { MuThread::current_thread_as_mu_thread(Address::zero(), arc_vm.clone()) };
907 908 909 910

            for (id, global) in globals.iter() {
                self.alloc_global(&mut global_locs, *id, global.clone());
            }
911 912

            if created {
913
                unsafe { MuThread::cleanup_current_mu_thread() };
914
            }
915
        }
916
    }
qinsoon's avatar
qinsoon committed
917

qinsoon's avatar
qinsoon committed
918 919
    /// informs the VM of a newly compiled function
    /// (the function and funcver should already be declared before this call)
920
    pub fn add_compiled_func(&self, func: CompiledFunction) {
qinsoon's avatar
qinsoon committed
921
        debug_assert!(self.funcs.read().unwrap().contains_key(&func.func_id));
922 923 924 925 926 927
        debug_assert!(
            self.func_vers
                .read()
                .unwrap()
                .contains_key(&func.func_ver_id)
        );
qinsoon's avatar
qinsoon committed
928

929 930 931 932
        self.compiled_funcs
            .write()
            .unwrap()
            .insert(func.func_ver_id, RwLock::new(func));
qinsoon's avatar
qinsoon committed
933
    }
qinsoon's avatar
qinsoon committed
934 935

    /// gets the backend/storage type for a given Mu type (by ID)
936
    pub fn get_backend_type_info(&self, tyid: MuID) -> Box<BackendType> {
qinsoon's avatar
qinsoon committed
937
        // if we already resolved this type, return the BackendType
qinsoon's avatar
qinsoon committed
938 939
        {
            let read_lock = self.backend_type_info.read().unwrap();
940

941
            match read_lock.get(&tyid) {
942 943 944
                Some(info) => {
                    return info.clone();
                }
qinsoon's avatar
qinsoon committed
945 946 947
                None => {}
            }
        }
948

qinsoon's avatar
qinsoon committed
949
        // otherwise, we need to resolve the type now
950
        let types = self.types.read().unwrap();
qinsoon's avatar
qinsoon committed
951 952
        let ty = match types.get(&tyid) {
            Some(ty) => ty,
953
            None => panic!("invalid type id during get_backend_type_info(): {}", tyid)
qinsoon's avatar
qinsoon committed
954
        };
955
        let resolved = Box::new(backend::BackendType::resolve(ty, self));
qinsoon's avatar
qinsoon committed
956 957

        // insert the type so later we do not need to resolve it again
qinsoon's avatar
qinsoon committed
958
        let mut write_lock = self.backend_type_info.write().unwrap();
959
        write_lock.insert(tyid, resolved.clone());
960 961

        resolved
qinsoon's avatar
qinsoon committed
962
    }
qinsoon's avatar
qinsoon committed
963 964 965 966

    /// gets the backend/storage type size for a given Mu type (by ID)
    /// This is equivalent to vm.get_backend_type_info(id).size
    pub fn get_backend_type_size(&self, tyid: MuID) -> ByteSize {
qinsoon's avatar
qinsoon committed
967 968
        self.get_backend_type_info(tyid).size
    }
qinsoon's avatar
qinsoon committed
969 970

    /// returns the lock for globals
qinsoon's avatar
qinsoon committed
971
    pub fn globals(&self) -> &RwLock<HashMap<MuID, P<Value>>> {
qinsoon's avatar
qinsoon committed
972 973
        &self.globals
    }
qinsoon's avatar
qinsoon committed
974 975

    /// returns the lock for functions
qinsoon's avatar
qinsoon committed
976
    pub fn funcs(&self) -> &RwLock<HashMap<MuID, RwLock<MuFunction>>> {
qinsoon's avatar
qinsoon committed
977
        &self.funcs
978
    }
qinsoon's avatar
qinsoon committed
979 980

    /// returns the lock for function versions
qinsoon's avatar
qinsoon committed
981
    pub fn func_vers(&self) -> &RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>> {
982 983
        &self.func_vers
    }
qinsoon's avatar
qinsoon committed
984

qinsoon's avatar
qinsoon committed
985
    /// returns the lock for compiled functions
qinsoon's avatar
qinsoon committed
986
    pub fn compiled_funcs(&self) -> &RwLock<HashMap<MuID, RwLock<CompiledFunction>>> {
987 988
        &self.compiled_funcs
    }
qinsoon's avatar
qinsoon committed
989 990

    /// returns the lock for types
991 992 993
    pub fn types(&self) -> &RwLock<HashMap<MuID, P<MuType>>> {
        &self.types
    }
qinsoon's avatar
qinsoon committed
994 995

    /// returns the lock for function signatures
996 997 998
    pub fn func_sigs(&self) -> &RwLock<HashMap<MuID, P<MuFuncSig>>> {
        &self.func_sigs
    }
999

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
    /// returns the lock for global locations
    pub fn global_locations(&self) -> &RwLock<HashMap<MuID, ValueLocation>> {
        &self.global_locations
    }

    /// returns the lock for primordial thread info
    pub fn primordial(&self) -> &RwLock<Option<PrimordialThreadInfo>> {
        &self.primordial
    }

    /// returns the lock for compiled callsite table
    pub fn compiled_callsite_table(&self) -> &RwLock<HashMap<Address, CompiledCallsite>> {
        &self.compiled_callsite_table
    }

1015 1016
    pub fn resolve_function_address(&self, func_id: MuID) -> ValueLocation {
        let funcs = self.funcs.read().unwrap();
1017 1018
        let func: &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();

1019
        if self.is_doing_jit() {
1020 1021
            unimplemented!()
        } else {
1022
            ValueLocation::Relocatable(backend::RegGroup::GPR, func.name())
1023 1024
        }
    }
1025

qinsoon's avatar
qinsoon committed
1026
    /// set info (entry function, arguments) for primordial thread for boot image
qinsoon's avatar
qinsoon committed
1027
    pub fn set_primordial_thread(&self, func_id: MuID, has_const_args: bool, args: Vec<Constant>) {
1028
        let mut guard = self.primordial.write().unwrap();
1029 1030 1031
        *guard = Some(PrimordialThreadInfo {
            func_id: func_id,
            has_const_args: has_const_args,
1032
            args: args
1033
        });
qinsoon's avatar
qinsoon committed
1034
    }
1035

qinsoon's avatar
qinsoon committed
1036
    /// makes a boot image
qinsoon's avatar
qinsoon committed
1037 1038 1039 1040
    /// We are basically following the spec for this API calls.
    /// However, there are a few differences:
    /// 1. we are not doing 'automagic' relocation for unsafe pointers, relocation of
    ///    unsafe pointers needs to be done via explicit sym_fields/strings, reloc_fields/strings
qinsoon's avatar
qinsoon committed
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
    /// 2. if the output name for the boot image has extension name for dynamic libraries
    ///    (.so or .dylib), we generate a dynamic library as boot image. Otherwise, we generate
    ///    an executable.
    /// 3. we do not support primordial stack (as Kunshan pointed out, making boot image with a
    ///    primordial stack may get deprecated)
    ///
    /// args:
    /// whitelist               : functions to be put into the boot image
    /// primordial_func         : starting function for the boot image
    /// primordial_stack        : starting stack for the boot image
qinsoon's avatar
qinsoon committed
1051 1052
    ///                           (client should name either primordial_func or stack,
    ///                            currently Zebu only supports func)
qinsoon's avatar
qinsoon committed
1053 1054 1055 1056
    /// primordial_threadlocal  : thread local for the starting thread
    /// sym_fields/strings      : declare an address with symbol
    /// reloc_fields/strings    : declare an field pointing to a symbol
    /// output_file             : path for the boot image
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
    pub fn make_boot_image(
        &self,
        whitelist: Vec<MuID>,
        primordial_func: Option<&APIHandle>,
        primordial_stack: Option<&APIHandle>,
        primordial_threadlocal: Option<&APIHandle>,
        sym_fields: Vec<&APIHandle>,
        sym_strings: Vec<String>,
        reloc_fields: Vec<&APIHandle>,
        reloc_strings: Vec<String>,
1067
        output_file: String
1068
    ) {
1069 1070
        self.make_boot_image_internal(
            whitelist,
1071 1072
            primordial_func,
            primordial_stack,
1073
            primordial_threadlocal,
1074 1075 1076 1077
            sym_fields,
            sym_strings,
            reloc_fields,
            reloc_strings,
1078
            vec![],
1079
            output_file
1080 1081
        )
    }
qinsoon's avatar
qinsoon committed
1082 1083 1084 1085

    /// the actual function to make boot image
    /// One difference from the public one is that we allow linking extra source code during
    /// generating the boot image.
qinsoon's avatar
qinsoon committed
1086
    #[allow(unused_variables)]
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
    pub fn make_boot_image_internal(
        &self,
        whitelist: Vec<MuID>,
        primordial_func: Option<&APIHandle>,
        pr