GitLab will be upgraded on 30 Jan 2023 from 2.00 pm (AEDT) to 3.00 pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

vm.rs 51.9 KB
Newer Older
Isaac Oscar Gariano's avatar
Isaac Oscar Gariano committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
// Copyright 2017 The Australian National University
// 
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// 
//     http://www.apache.org/licenses/LICENSE-2.0
// 
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

qinsoon's avatar
qinsoon committed
15
16
use std::collections::HashMap;

17
use rodal;
qinsoon's avatar
qinsoon committed
18
use ast::ptr::*;
qinsoon's avatar
qinsoon committed
19
use ast::ir::*;
20
use ast::inst::*;
qinsoon's avatar
qinsoon committed
21
use ast::types;
qinsoon's avatar
qinsoon committed
22
use ast::types::*;
qinsoon's avatar
qinsoon committed
23
use compiler::{Compiler, CompilerPolicy};
qinsoon's avatar
qinsoon committed
24
25
use compiler::backend;
use compiler::backend::BackendTypeInfo;
26
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
27

28
use runtime::thread::*;
29
use runtime::*;
qinsoon's avatar
qinsoon committed
30
use utils::ByteSize;
31
use utils::BitSize;
32
use utils::Address;
33
use runtime::mm as gc;
34
use vm::handle::*;
qinsoon's avatar
qinsoon committed
35
36
use vm::vm_options::VMOptions;
use vm::vm_options::MuLogLevel;
37

qinsoon's avatar
qinsoon committed
38
use log::LogLevel;
39
use std::sync::Arc;
qinsoon's avatar
qinsoon committed
40
use std::sync::RwLock;
41
use std::sync::RwLockWriteGuard;
42
use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT, Ordering};
43

44
45
46
use std;
use utils::bit_utils::{bits_ones, u64_asr};

qinsoon's avatar
qinsoon committed
47
48
49
50
// FIXME:
// besides fields in VM, there are some 'globals' we need to persist
// such as STRUCT_TAG_MAP
// possibly INTERNAL_ID in ir.rs, internal types, etc
51
pub struct VM { // The comments are the offset into the struct
52
    // ---serialize---
53
54
55
56
57
58
59
60
61
62
63
64
    next_id: AtomicUsize, // +0
    id_name_map: RwLock<HashMap<MuID, MuName>>, // +8
    name_id_map: RwLock<HashMap<MuName, MuID>>, //+64
    types: RwLock<HashMap<MuID, P<MuType>>>, //+120
    backend_type_info: RwLock<HashMap<MuID, Box<BackendTypeInfo>>>, // +176
    constants: RwLock<HashMap<MuID, P<Value>>>, // +232
    globals: RwLock<HashMap<MuID, P<Value>>>, //+288
    func_sigs: RwLock<HashMap<MuID, P<MuFuncSig>>>, // +400
    funcs: RwLock<HashMap<MuID, RwLock<MuFunction>>>, // +456
    pub primordial: RwLock<Option<MuPrimordialThread>>, // +568
    pub vm_options: VMOptions, // +624

65
    // ---partially serialize---
66
    compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
67

68
69
70
    // Match each functions version to a map, mapping each of it's containing callsites
    // to the name of the catch block
    callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
71
    is_running: AtomicBool, // +952
72

73
    // ---do not serialize---
74
75
    pub global_locations: RwLock<HashMap<MuID, ValueLocation>>,
    func_vers: RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>>,
76
77
78

    // client may try to store funcref to the heap, so that they can load it later, and call it
    // however the store may happen before we have an actual address to the func (in AOT scenario)
79
80
    aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,

81
82
    pub compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>>, // 896
    pub callsite_count: AtomicUsize, //Number of callsites in the callsite tables
83
84
}
unsafe impl rodal::Dump for VM {
85
    fn dump<D: ? Sized + rodal::Dumper>(&self, dumper: &mut D) {
86
87
88
89
90
91
92
93
94
95
96
97
98
99
        dumper.debug_record("VM", "dump");

        dumper.dump_object(&self.next_id);
        dumper.dump_object(&self.id_name_map);
        dumper.dump_object(&self.name_id_map);
        dumper.dump_object(&self.types);
        dumper.dump_object(&self.backend_type_info);
        dumper.dump_object(&self.constants);
        dumper.dump_object(&self.globals);
        dumper.dump_object(&self.func_sigs);
        dumper.dump_object(&self.funcs);
        dumper.dump_object(&self.primordial);
        dumper.dump_object(&self.vm_options);
        dumper.dump_object(&self.compiled_funcs);
100
        dumper.dump_object(&self.callsite_table);
101

102
103
104
105
106
107
108
109
110
111
112
        // Dump empty maps so that we can safely read and modify them once loaded
        dumper.dump_padding(&self.global_locations);
        dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<MuID, ValueLocation>::new()));

        dumper.dump_padding(&self.func_vers);
        dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<MuID, RwLock<MuFunctionVersion>>::new()));

        dumper.dump_padding(&self.aot_pending_funcref_store);
        dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<Address, ValueLocation>::new()));

        // Dump an emepty hashmap for the other hashmaps
113
114
        dumper.dump_padding(&self.compiled_callsite_table);
        dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<Address, CompiledCallsite>::new()));
115
        dumper.dump_object(&self.callsite_count);
116
117
118
119

        // This field is actually stored at the end of the struct, the others all have the same allignment so are not reordered
        dumper.dump_object(&self.is_running);
    }
120
}
121

122
123
124
use std::u64;
const PENDING_FUNCREF : u64 = u64::MAX;

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
macro_rules! gen_handle_int {
    ($fn_from: ident, $fn_to: ident, $int_ty: ty) => {
        pub fn $fn_from (&self, num: $int_ty, len: BitSize) -> APIHandleResult {
            let handle_id = self.next_id();
            self.new_handle (APIHandle {
                id: handle_id,
                v: APIHandleValue::Int(num as u64, len)
            })
        }

        pub fn $fn_to (&self, handle: APIHandleArg) -> $int_ty {
            handle.v.as_int() as $int_ty
        }
    }
}

qinsoon's avatar
qinsoon committed
141
142
impl <'a> VM {
    pub fn new() -> VM {
qinsoon's avatar
qinsoon committed
143
144
145
146
147
148
149
150
151
        VM::new_internal(VMOptions::default())
    }

    pub fn new_with_opts(str: &str) -> VM {
        VM::new_internal(VMOptions::init(str))
    }

    fn new_internal(options: VMOptions) -> VM {
        VM::start_logging(options.flag_log_level);
qinsoon's avatar
qinsoon committed
152

qinsoon's avatar
qinsoon committed
153
        let ret = VM {
154
            next_id: ATOMIC_USIZE_INIT,
155
            is_running: ATOMIC_BOOL_INIT,
qinsoon's avatar
qinsoon committed
156
157
            vm_options: options,

qinsoon's avatar
qinsoon committed
158
159
            id_name_map: RwLock::new(HashMap::new()),
            name_id_map: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
160

qinsoon's avatar
qinsoon committed
161
            constants: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
162

qinsoon's avatar
qinsoon committed
163
            types: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
164
            backend_type_info: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
165

qinsoon's avatar
qinsoon committed
166
            globals: RwLock::new(HashMap::new()),
167
            global_locations: RwLock::new(hashmap!{}),
qinsoon's avatar
qinsoon committed
168

qinsoon's avatar
qinsoon committed
169
            func_sigs: RwLock::new(HashMap::new()),
170
            func_vers: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
171
            funcs: RwLock::new(HashMap::new()),
qinsoon's avatar
qinsoon committed
172
            compiled_funcs: RwLock::new(HashMap::new()),
173
            callsite_table: RwLock::new(HashMap::new()),
174
175
            primordial: RwLock::new(None),

176
            aot_pending_funcref_store: RwLock::new(HashMap::new()),
177
            compiled_callsite_table: RwLock::new(HashMap::new()),
178
            callsite_count: ATOMIC_USIZE_INIT,
179
        };
qinsoon's avatar
qinsoon committed
180

181
        // insert all internal types
qinsoon's avatar
qinsoon committed
182
183
184
185
186
187
        {
            let mut types = ret.types.write().unwrap();
            for ty in INTERNAL_TYPES.iter() {
                types.insert(ty.id(), ty.clone());
            }
        }
qinsoon's avatar
qinsoon committed
188

189
        ret.is_running.store(false, Ordering::SeqCst);
Kunshan Wang's avatar
Kunshan Wang committed
190
191
192
193
194
195
196
197
198

        // Does not need SeqCst.
        //
        // If VM creates Mu threads and Mu threads calls traps, the trap handler still "happens
        // after" the creation of the VM itself. Rust does not have a proper memory model, but this
        // is how C++ works.
        //
        // If the client needs to create client-level threads, however, the client should properly
        // synchronise at the time of inter-thread communication, rather than creation of the VM.
199
        ret.next_id.store(USER_ID_START, Ordering::Relaxed);
qinsoon's avatar
qinsoon committed
200

201
202
203
204
        // init types
        types::init_types();

        ret.init_runtime();
qinsoon's avatar
qinsoon committed
205

206
207
        ret
    }
qinsoon's avatar
qinsoon committed
208

209
    fn init_runtime(&self) {
qinsoon's avatar
qinsoon committed
210
211
212
213
214
215
        // init log
        VM::start_logging(self.vm_options.flag_log_level);

        // init gc
        {
            let ref options = self.vm_options;
216
            gc::gc_init(options.flag_gc_immixspace_size, options.flag_gc_lospace_size, options.flag_gc_nthreads, !options.flag_gc_disable_collection);
qinsoon's avatar
qinsoon committed
217
218
219
220
        }
    }

    fn start_logging(level: MuLogLevel) {
221
        use std::env;
qinsoon's avatar
qinsoon committed
222
223
224
225
226
227
228
        match level {
            MuLogLevel::None  => {},
            MuLogLevel::Error => VM::start_logging_internal(LogLevel::Error),
            MuLogLevel::Warn  => VM::start_logging_internal(LogLevel::Warn),
            MuLogLevel::Info  => VM::start_logging_internal(LogLevel::Info),
            MuLogLevel::Debug => VM::start_logging_internal(LogLevel::Debug),
            MuLogLevel::Trace => VM::start_logging_internal(LogLevel::Trace),
229
230
231
232
233
234
            MuLogLevel::Env => {
                match env::var("MU_LOG_LEVEL") {
                    Ok(s) => VM::start_logging(MuLogLevel::from_string(s)),
                    _ => {} // Don't log
                }
            },
qinsoon's avatar
qinsoon committed
235
        }
qinsoon's avatar
qinsoon committed
236
237
238
    }

    pub fn start_logging_trace() {
qinsoon's avatar
qinsoon committed
239
240
        VM::start_logging_internal(LogLevel::Trace)
    }
241
242
243
    pub fn start_logging_env() {
        VM::start_logging(MuLogLevel::Env)
    }
qinsoon's avatar
qinsoon committed
244
245

    fn start_logging_internal(level: LogLevel) {
246
247
248
249
250
251
252
253
254
        use stderrlog;

        let verbose = match level {
            LogLevel::Error => 0,
            LogLevel::Warn  => 1,
            LogLevel::Info  => 2,
            LogLevel::Debug => 3,
            LogLevel::Trace => 4,
        };
qinsoon's avatar
qinsoon committed
255

256
257
258
        match stderrlog::new().verbosity(verbose).init() {
            Ok(()) => info!("logger initialized"),
            Err(e) => error!("failed to init logger, probably already initialized: {:?}", e)
qinsoon's avatar
qinsoon committed
259
260
        }
    }
261

262
263
    pub fn add_exception_callsite(&self, callsite: Callsite, fv: MuID) {
        let mut table = self.callsite_table.write().unwrap();
264
265

        if table.contains_key(&fv) {
266
            table.get_mut(&fv).unwrap().push(callsite);
267
        } else {
268
            table.insert(fv, vec![callsite]);
269
        };
270
271
        // TODO: do wee need a stronger ordering??
        self.callsite_count.fetch_add(1, Ordering::Relaxed);
272
273
    }

274
275
    pub fn resume_vm(dumped_vm: *mut Arc<VM>) -> Arc<VM> {
        let vm = unsafe{rodal::load_asm_pointer_move(dumped_vm)};
276
        vm.init_runtime();
qinsoon's avatar
qinsoon committed
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295

        // restore gc types
        {
            let type_info_guard = vm.backend_type_info.read().unwrap();
            let mut type_info_vec: Vec<Box<BackendTypeInfo>> = type_info_guard.values().map(|x| x.clone()).collect();
            type_info_vec.sort_by(|a, b| a.gc_type.id.cmp(&b.gc_type.id));

            let mut expect_id = 0;
            for ty_info in type_info_vec.iter() {
                use runtime::mm;

                let ref gc_type = ty_info.gc_type;

                if gc_type.id != expect_id {
                    debug_assert!(expect_id < gc_type.id);

                    while expect_id < gc_type.id {
                        use runtime::mm::common::gctype::GCType;

qinsoon's avatar
qinsoon committed
296
                        mm::add_gc_type(GCType::new_noreftype(0, 8));
qinsoon's avatar
qinsoon committed
297
298
299
300
301
302
303
304
305
306
307
                        expect_id += 1;
                    }
                }

                // now expect_id == gc_type.id
                debug_assert!(expect_id == gc_type.id);

                mm::add_gc_type(gc_type.as_ref().clone());
                expect_id += 1;
            }
        }
308
        // construct exception table
309
        vm.build_callsite_table();
310
311
312
        vm
    }

313
314
    pub fn build_callsite_table(&self) {
        let callsite_table = self.callsite_table.read().unwrap();
315
        let compiled_funcs = self.compiled_funcs.read().unwrap();
316
        let mut compiled_callsite_table = self.compiled_callsite_table.write().unwrap();
317
318
        // TODO: Use a different ordering?
        compiled_callsite_table.reserve(self.callsite_count.load(Ordering::Relaxed));
319
320
321
322
323
        for (fv, callsite_list) in callsite_table.iter() {
            let compiled_func = compiled_funcs.get(fv).unwrap().read().unwrap();
            let callee_saved_table = Arc::new(compiled_func.frame.callee_saved.clone());
            for callsite in callsite_list.iter() {
                compiled_callsite_table.insert(resolve_symbol(callsite.name.clone()), CompiledCallsite::new(&callsite, compiled_func.func_ver_id, callee_saved_table.clone()));
324
325
            }
        }
326
327
    }
    
328
    pub fn next_id(&self) -> MuID {
Kunshan Wang's avatar
Kunshan Wang committed
329
330
331
        // This only needs to be atomic, and does not need to be a synchronisation operation. The
        // only requirement for IDs is that all IDs obtained from `next_id()` are different. So
        // `Ordering::Relaxed` is sufficient.
332
        self.next_id.fetch_add(1, Ordering::Relaxed)
333
334
    }
    
335
336
337
338
339
340
    pub fn run_vm(&self) {
        self.is_running.store(true, Ordering::SeqCst);
    }
    
    pub fn is_running(&self) -> bool {
        self.is_running.load(Ordering::Relaxed)
qinsoon's avatar
qinsoon committed
341
342
    }
    
343
    pub fn set_name(&self, entity: &MuEntity) {
qinsoon's avatar
qinsoon committed
344
        let id = entity.id();
345
        let name = entity.name();
qinsoon's avatar
qinsoon committed
346
347
        
        let mut map = self.id_name_map.write().unwrap();
348
        map.insert(id, name.clone());
qinsoon's avatar
qinsoon committed
349
350
351
352
353
        
        let mut map2 = self.name_id_map.write().unwrap();
        map2.insert(name, id);
    }
    
Kunshan Wang's avatar
Kunshan Wang committed
354
    pub fn id_of_by_refstring(&self, name: &String) -> MuID {
qinsoon's avatar
qinsoon committed
355
        let map = self.name_id_map.read().unwrap();
356
        match map.get(&name.clone()) {
357
358
359
            Some(id) => *id,
            None => panic!("cannot find id for name: {}", name)
        }
Kunshan Wang's avatar
Kunshan Wang committed
360
    }
361
362
363

    /// should only used by client
    /// 'name' used internally may be slightly different to remove some special symbols
Kunshan Wang's avatar
Kunshan Wang committed
364
365
    pub fn id_of(&self, name: &str) -> MuID {
        self.id_of_by_refstring(&name.to_string())
qinsoon's avatar
qinsoon committed
366
    }
367
368
369

    /// should only used by client
    /// 'name' used internally may be slightly different to remove some special symbols
qinsoon's avatar
qinsoon committed
370
371
    pub fn name_of(&self, id: MuID) -> MuName {
        let map = self.id_name_map.read().unwrap();
372
        map.get(&id).unwrap().clone()
qinsoon's avatar
qinsoon committed
373
374
    }
    
375
    pub fn declare_const(&self, entity: MuEntityHeader, ty: P<MuType>, val: Constant) -> P<Value> {
qinsoon's avatar
qinsoon committed
376
        let mut constants = self.constants.write().unwrap();
377
        let ret = P(Value{hdr: entity, ty: ty, v: Value_::Constant(val)});
378

379
        self.declare_const_internal(&mut constants, ret.id(), ret.clone());
qinsoon's avatar
qinsoon committed
380
381
382
        
        ret
    }
383
384
385
386

    fn declare_const_internal(&self, map: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>, id: MuID, val: P<Value>) {
        debug_assert!(!map.contains_key(&id));

qinsoon's avatar
qinsoon committed
387
        trace!("declare const #{} = {}", id, val);
388
389
        map.insert(id, val);
    }
qinsoon's avatar
qinsoon committed
390
    
391
392
393
394
395
396
397
    pub fn get_const(&self, id: MuID) -> P<Value> {
        let const_lock = self.constants.read().unwrap();
        match const_lock.get(&id) {
            Some(ret) => ret.clone(),
            None => panic!("cannot find const #{}", id)
        }
    }
398
399
400
401
402
403
404
405
406
407
408
409

    pub fn get_const_nocheck(&self, id: MuID) -> Option<P<Value>> {
        let const_lock = self.constants.read().unwrap();
        match const_lock.get(&id) {
            Some(ret) => Some(ret.clone()),
            None => None
        }
    }

    #[cfg(feature = "aot")]
    pub fn allocate_const(&self, val: P<Value>) -> ValueLocation {
        let id = val.id();
410
        let name = format!("CONST_{}_{}", id, val.name());
411
412
413

        ValueLocation::Relocatable(backend::RegGroup::GPR, name)
    }
414
    
415
    pub fn declare_global(&self, entity: MuEntityHeader, ty: P<MuType>) -> P<Value> {
qinsoon's avatar
qinsoon committed
416
        let global = P(Value{
417
418
            hdr: entity,
            ty: self.declare_type(MuEntityHeader::unnamed(self.next_id()), MuType_::iref(ty.clone())),
419
            v: Value_::Global(ty)
qinsoon's avatar
qinsoon committed
420
        });
qinsoon's avatar
qinsoon committed
421
422
        
        let mut globals = self.globals.write().unwrap();
423
        let mut global_locs = self.global_locations.write().unwrap();
424

425
        self.declare_global_internal(&mut globals, &mut global_locs, global.id(), global.clone());
qinsoon's avatar
qinsoon committed
426
        
qinsoon's avatar
qinsoon committed
427
        global
qinsoon's avatar
qinsoon committed
428
    }
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461

    fn declare_global_internal(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
        id: MuID, val: P<Value>
    ) {
        self.declare_global_internal_no_alloc(globals, id, val.clone());
        self.alloc_global(global_locs, id, val);
    }

    // when bulk declaring, we hold locks for everything, we cannot resolve backend type, and do alloc
    fn declare_global_internal_no_alloc(
        &self,
        globals: &mut RwLockWriteGuard<HashMap<MuID, P<Value>>>,
        id: MuID, val: P<Value>
    ) {
        debug_assert!(!globals.contains_key(&id));

        info!("declare global #{} = {}", id, val);
        globals.insert(id, val.clone());
    }

    fn alloc_global(
        &self,
        global_locs: &mut RwLockWriteGuard<HashMap<MuID, ValueLocation>>,
        id: MuID, val: P<Value>
    ) {
        let backend_ty = self.get_backend_type_info(val.ty.get_referenced_ty().unwrap().id());
        let loc = gc::allocate_global(val, backend_ty);
        info!("allocate global #{} as {}", id, loc);
        global_locs.insert(id, loc);
    }
qinsoon's avatar
qinsoon committed
462
    
463
464
    pub fn declare_type(&self, entity: MuEntityHeader, ty: MuType_) -> P<MuType> {
        let ty = P(MuType{hdr: entity, v: ty});
qinsoon's avatar
qinsoon committed
465
        
qinsoon's avatar
qinsoon committed
466
        let mut types = self.types.write().unwrap();
467

468
        self.declare_type_internal(&mut types, ty.id(), ty.clone());
qinsoon's avatar
qinsoon committed
469
470
471
        
        ty
    }
472
473
474
475
476

    fn declare_type_internal(&self, types: &mut RwLockWriteGuard<HashMap<MuID, P<MuType>>>, id: MuID, ty: P<MuType>) {
        debug_assert!(!types.contains_key(&id));

        types.insert(id, ty.clone());
qinsoon's avatar
qinsoon committed
477

qinsoon's avatar
qinsoon committed
478
        trace!("declare type #{} = {}", id, ty);
qinsoon's avatar
qinsoon committed
479
480
481
482
        if ty.is_struct() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let struct_map_guard = STRUCT_TAG_MAP.read().unwrap();
            let struct_inner = struct_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
483
            trace!("  {}", struct_inner);
qinsoon's avatar
qinsoon committed
484
485
486
487
        } else if ty.is_hybrid() {
            let tag = ty.get_struct_hybrid_tag().unwrap();
            let hybrid_map_guard = HYBRID_TAG_MAP.read().unwrap();
            let hybrid_inner = hybrid_map_guard.get(&tag).unwrap();
qinsoon's avatar
qinsoon committed
488
            trace!("  {}", hybrid_inner);
qinsoon's avatar
qinsoon committed
489
        }
490
    }
qinsoon's avatar
qinsoon committed
491
    
492
493
494
495
496
497
498
499
    pub fn get_type(&self, id: MuID) -> P<MuType> {
        let type_lock = self.types.read().unwrap();
        match type_lock.get(&id) {
            Some(ret) => ret.clone(),
            None => panic!("cannot find type #{}", id)
        }
    }    
    
500
501
    pub fn declare_func_sig(&self, entity: MuEntityHeader, ret_tys: Vec<P<MuType>>, arg_tys: Vec<P<MuType>>) -> P<MuFuncSig> {
        let ret = P(MuFuncSig{hdr: entity, ret_tys: ret_tys, arg_tys: arg_tys});
502
503

        let mut func_sigs = self.func_sigs.write().unwrap();
504
        self.declare_func_sig_internal(&mut func_sigs, ret.id(), ret.clone());
qinsoon's avatar
qinsoon committed
505
506
507
        
        ret
    }
508
509
510
511
512
513
514

    fn declare_func_sig_internal(&self, sigs: &mut RwLockWriteGuard<HashMap<MuID, P<MuFuncSig>>>, id: MuID, sig: P<MuFuncSig>) {
        debug_assert!(!sigs.contains_key(&id));

        info!("declare func sig #{} = {}", id, sig);
        sigs.insert(id, sig);
    }
qinsoon's avatar
qinsoon committed
515
    
516
517
518
519
520
521
522
523
    pub fn get_func_sig(&self, id: MuID) -> P<MuFuncSig> {
        let func_sig_lock = self.func_sigs.read().unwrap();
        match func_sig_lock.get(&id) {
            Some(ret) => ret.clone(),
            None => panic!("cannot find func sig #{}", id)
        }
    }
    
524
    pub fn declare_func (&self, func: MuFunction) {
qinsoon's avatar
qinsoon committed
525
        let mut funcs = self.funcs.write().unwrap();
526
527
528
529
530
531
532
533
534

        self.declare_func_internal(&mut funcs, func.id(), func);
    }

    fn declare_func_internal(&self, funcs: &mut RwLockWriteGuard<HashMap<MuID, RwLock<MuFunction>>>, id: MuID, func: MuFunction) {
        debug_assert!(!funcs.contains_key(&id));

        info!("declare func #{} = {}", id, func);
        funcs.insert(id, RwLock::new(func));
535
    }
536
537
538
539
540

    /// this is different than vm.name_of()
    pub fn get_func_name(&self, id: MuID) -> MuName {
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
541
            Some(func) => func.read().unwrap().name(),
542
543
544
            None => panic!("cannot find name for Mu function #{}")
        }
    }
545
    
546
547
548
549
550
551
552
553
554
    /// The IR builder needs to look-up the function signature from the existing function ID.
    pub fn get_func_sig_for_func(&self, id: MuID) -> P<MuFuncSig> {
        let funcs_lock = self.funcs.read().unwrap();
        match funcs_lock.get(&id) {
            Some(func) => func.read().unwrap().sig.clone(),
            None => panic!("cannot find Mu function #{}", id)
        }
    }    
    
555
    pub fn define_func_version (&self, func_ver: MuFunctionVersion) {
qinsoon's avatar
qinsoon committed
556
        info!("define function version {}", func_ver);
557
        // record this version
qinsoon's avatar
qinsoon committed
558
        let func_ver_id = func_ver.id();
559
560
        {
            let mut func_vers = self.func_vers.write().unwrap();
qinsoon's avatar
qinsoon committed
561
            func_vers.insert(func_ver_id, RwLock::new(func_ver));
562
563
564
565
        }
        
        // acquire a reference to the func_ver
        let func_vers = self.func_vers.read().unwrap();
qinsoon's avatar
qinsoon committed
566
        let func_ver = func_vers.get(&func_ver_id).unwrap().write().unwrap();
567
568
569
        
        // change current version to this (obsolete old versions)
        let funcs = self.funcs.read().unwrap();
qinsoon's avatar
qinsoon committed
570
        debug_assert!(funcs.contains_key(&func_ver.func_id)); // it should be declared before defining
qinsoon's avatar
qinsoon committed
571
        let mut func = funcs.get(&func_ver.func_id).unwrap().write().unwrap();
572
        
573
574
575
576
        func.new_version(func_ver.id());
        
        // redefinition happens here
        // do stuff        
577
    }
578
579
580
581
582
583
584
585
586
587
588
589
590
591

    /// Add a new bundle into VM.
    ///
    /// This function will drain the contents of all arguments.
    ///
    /// Ideally, this function should happen atomically. e.g. The client should not see a new type
    /// added without also seeing a new function added.
    pub fn declare_many(&self,
                        new_id_name_map: &mut HashMap<MuID, MuName>,
                        new_types: &mut HashMap<MuID, P<MuType>>,
                        new_func_sigs: &mut HashMap<MuID, P<MuFuncSig>>,
                        new_constants: &mut HashMap<MuID, P<Value>>,
                        new_globals: &mut HashMap<MuID, P<Value>>,
                        new_funcs: &mut HashMap<MuID, Box<MuFunction>>,
592
593
                        new_func_vers: &mut HashMap<MuID, Box<MuFunctionVersion>>,
                        arc_vm: Arc<VM>
594
595
596
                        ) {
        // Make sure other components, if ever acquiring multiple locks at the same time, acquire
        // them in this order, to prevent deadlock.
597
598
599
600
601
602
603
604
605
        {
            let mut id_name_map = self.id_name_map.write().unwrap();
            let mut name_id_map = self.name_id_map.write().unwrap();
            let mut types = self.types.write().unwrap();
            let mut constants = self.constants.write().unwrap();
            let mut globals = self.globals.write().unwrap();
            let mut func_sigs = self.func_sigs.write().unwrap();
            let mut funcs = self.funcs.write().unwrap();
            let mut func_vers = self.func_vers.write().unwrap();
606

607
608
609
610
            for (id, name) in new_id_name_map.drain() {
                id_name_map.insert(id, name.clone());
                name_id_map.insert(name, id);
            }
611

612
613
614
            for (id, obj) in new_types.drain() {
                self.declare_type_internal(&mut types, id, obj);
            }
615

616
617
618
            for (id, obj) in new_constants.drain() {
                self.declare_const_internal(&mut constants, id, obj);
            }
619

620
621
622
623
            for (id, obj) in new_globals.drain() {
                // we bulk allocate later (since we are holding all the locks, we cannot find ty info)
                self.declare_global_internal_no_alloc(&mut globals, id, obj);
            }
624

625
626
627
            for (id, obj) in new_func_sigs.drain() {
                self.declare_func_sig_internal(&mut func_sigs, id, obj);
            }
628

629
630
631
            for (id, obj) in new_funcs.drain() {
                self.declare_func_internal(&mut funcs, id, *obj);
            }
632

633
634
635
            for (id, obj) in new_func_vers.drain() {
                let func_id = obj.func_id;
                func_vers.insert(id, RwLock::new(*obj));
636

637
638
639
640
641
642
                {
                    trace!("Adding funcver {} as a version of {}...", id, func_id);
                    let func = funcs.get_mut(&func_id).unwrap();
                    func.write().unwrap().new_version(id);
                    trace!("Added funcver {} as a version of {} {:?}.", id, func_id, func);
                }
643
644
645
            }
        }
        // Locks released here
646
647
648
649
650
651
652

        // allocate all the globals defined
        {
            let globals = self.globals.read().unwrap();
            let mut global_locs = self.global_locations.write().unwrap();

            // make sure current thread has allocator
653
            let created = unsafe {MuThread::current_thread_as_mu_thread(Address::zero(), arc_vm.clone())};
654
655
656
657

            for (id, global) in globals.iter() {
                self.alloc_global(&mut global_locs, *id, global.clone());
            }
658
659
660
661

            if created {
                unsafe {MuThread::cleanup_current_mu_thread()};
            }
662
        }
663
    }
664
    
665
    pub fn add_compiled_func (&self, func: CompiledFunction) {
qinsoon's avatar
qinsoon committed
666
        debug_assert!(self.funcs.read().unwrap().contains_key(&func.func_id));
qinsoon's avatar
qinsoon committed
667
        debug_assert!(self.func_vers.read().unwrap().contains_key(&func.func_ver_id));
qinsoon's avatar
qinsoon committed
668

qinsoon's avatar
qinsoon committed
669
        self.compiled_funcs.write().unwrap().insert(func.func_ver_id, RwLock::new(func));
qinsoon's avatar
qinsoon committed
670
671
    }
    
qinsoon's avatar
qinsoon committed
672
    pub fn get_backend_type_info(&self, tyid: MuID) -> Box<BackendTypeInfo> {        
qinsoon's avatar
qinsoon committed
673
674
675
        {
            let read_lock = self.backend_type_info.read().unwrap();
        
676
            match read_lock.get(&tyid) {
qinsoon's avatar
qinsoon committed
677
678
679
680
                Some(info) => {return info.clone();},
                None => {}
            }
        }
681
682

        let types = self.types.read().unwrap();
qinsoon's avatar
qinsoon committed
683
684
685
686
        let ty = match types.get(&tyid) {
            Some(ty) => ty,
            None => panic!("invalid type id during get_backend_type_info(): {}", tyid)
        };
qinsoon's avatar
qinsoon committed
687
        let resolved = Box::new(backend::resolve_backend_type_info(ty, self));
qinsoon's avatar
qinsoon committed
688
689
        
        let mut write_lock = self.backend_type_info.write().unwrap();
690
        write_lock.insert(tyid, resolved.clone());
qinsoon's avatar
qinsoon committed
691
692
693
694
        
        resolved        
    }
    
qinsoon's avatar
qinsoon committed
695
696
697
698
    pub fn get_type_size(&self, tyid: MuID) -> ByteSize {
        self.get_backend_type_info(tyid).size
    }
    
qinsoon's avatar
qinsoon committed
699
    pub fn globals(&self) -> &RwLock<HashMap<MuID, P<Value>>> {
qinsoon's avatar
qinsoon committed
700
701
702
        &self.globals
    }
    
qinsoon's avatar
qinsoon committed
703
    pub fn funcs(&self) -> &RwLock<HashMap<MuID, RwLock<MuFunction>>> {
qinsoon's avatar
qinsoon committed
704
        &self.funcs
705
    }
706
    
qinsoon's avatar
qinsoon committed
707
    pub fn func_vers(&self) -> &RwLock<HashMap<MuID, RwLock<MuFunctionVersion>>> {
708
709
        &self.func_vers
    }
qinsoon's avatar
qinsoon committed
710
711
712
713
714
715
716
717
718
719
720

    pub fn get_cur_version_of(&self, fid: MuID) -> Option<MuID> {
        let funcs_guard = self.funcs.read().unwrap();
        match funcs_guard.get(&fid) {
            Some(rwlock_func) => {
                let func_guard = rwlock_func.read().unwrap();
                func_guard.cur_ver
            },
            None => None
        }
    }
721
    
qinsoon's avatar
qinsoon committed
722
    pub fn compiled_funcs(&self) -> &RwLock<HashMap<MuID, RwLock<CompiledFunction>>> {
723
724
        &self.compiled_funcs
    }
725
726
727
728
729
730
731
732
    
    pub fn types(&self) -> &RwLock<HashMap<MuID, P<MuType>>> {
        &self.types
    }
    
    pub fn func_sigs(&self) -> &RwLock<HashMap<MuID, P<MuFuncSig>>> {
        &self.func_sigs
    }
qinsoon's avatar
qinsoon committed
733
    
734
735
736
737
738
739
740
    pub fn resolve_function_address(&self, func_id: MuID) -> ValueLocation {
        let funcs = self.funcs.read().unwrap();
        let func : &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
                
        if self.is_running() {
            unimplemented!()
        } else {
741
            ValueLocation::Relocatable(backend::RegGroup::GPR, mangle_name(func.name()))
742
743
744
        }
    }
    
qinsoon's avatar
qinsoon committed
745
    pub fn new_stack(&self, func_id: MuID) -> Box<MuStack> {
qinsoon's avatar
qinsoon committed
746
747
748
        let funcs = self.funcs.read().unwrap();
        let func : &MuFunction = &funcs.get(&func_id).unwrap().read().unwrap();
        
749
750
751
        Box::new(MuStack::new(self.next_id(), self.resolve_function_address(func_id), func))
    }
    
qinsoon's avatar
qinsoon committed
752
    pub fn make_primordial_thread(&self, func_id: MuID, has_const_args: bool, args: Vec<Constant>) {
753
        let mut guard = self.primordial.write().unwrap();
qinsoon's avatar
qinsoon committed
754
        *guard = Some(MuPrimordialThread{func_id: func_id, has_const_args: has_const_args, args: args});
qinsoon's avatar
qinsoon committed
755
    }
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773

    pub fn make_boot_image(&self,
                            whitelist: Vec<MuID>,
                            primordial_func: Option<&APIHandle>, primordial_stack: Option<&APIHandle>,
                            primordial_threadlocal: Option<&APIHandle>,
                            sym_fields: Vec<&APIHandle>, sym_strings: Vec<String>,
                            reloc_fields: Vec<&APIHandle>, reloc_strings: Vec<String>,
                            output_file: String) {
        self.make_boot_image_internal(
            whitelist,
            primordial_func, primordial_stack,
            primordial_threadlocal,
            sym_fields, sym_strings,
            reloc_fields, reloc_strings,
            vec![],
            output_file
        )
    }
qinsoon's avatar
qinsoon committed
774
    
qinsoon's avatar
qinsoon committed
775
    #[allow(unused_variables)]
776
777
778
779
780
781
782
783
    pub fn make_boot_image_internal(&self,
                                   whitelist: Vec<MuID>,
                                   primordial_func: Option<&APIHandle>, primordial_stack: Option<&APIHandle>,
                                   primordial_threadlocal: Option<&APIHandle>,
                                   sym_fields: Vec<&APIHandle>, sym_strings: Vec<String>,
                                   reloc_fields: Vec<&APIHandle>, reloc_strings: Vec<String>,
                                   extra_sources_to_link: Vec<String>,
                                   output_file: String) {
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
        trace!("Making boot image...");

        let whitelist_funcs = {
            let compiler = Compiler::new(CompilerPolicy::default(), self);
            let funcs = self.funcs().read().unwrap();
            let func_vers = self.func_vers().read().unwrap();

            // make sure all functions in whitelist are compiled
            let mut whitelist_funcs: Vec<MuID> = vec![];
            for &id in whitelist.iter() {
                if let Some(f) = funcs.get(&id) {
                    whitelist_funcs.push(id);

                    let f: &MuFunction = &f.read().unwrap();
                    match f.cur_ver {
                        Some(fv_id) => {
                            let mut func_ver = func_vers.get(&fv_id).unwrap().write().unwrap();

                            if !func_ver.is_compiled() {
                                compiler.compile(&mut func_ver);
                            }
qinsoon's avatar
qinsoon committed
805
                        }
806
                        None => panic!("whitelist function {} has no version defined", f)
qinsoon's avatar
qinsoon committed
807
808
809
                    }
                }
            }
810
811
812
813
814
815
816

            whitelist_funcs
        };

        if primordial_threadlocal.is_some() {
            // we are going to need to persist this threadlocal
            unimplemented!()
qinsoon's avatar
qinsoon committed
817
818
819
        }

        // make sure only one of primordial_func or primoridial_stack is set
820
821
822
823
824
        let has_primordial_func  = primordial_func.is_some();
        let has_primordial_stack = primordial_stack.is_some();

        // we assume client will start with a function (instead of a stack)
        if has_primordial_stack {
qinsoon's avatar
qinsoon committed
825
            panic!("Zebu doesnt support creating primordial thread through a stack, name a entry function instead")
826
        } else {
827
828
829
830
831
832
833
834
835
            if has_primordial_func {
                // extract func id
                let func_id = primordial_func.unwrap().v.as_funcref();

                // make primordial thread in vm
                self.make_primordial_thread(func_id, false, vec![]);    // do not pass const args, use argc/argv
            } else {
                warn!("no entry function is passed");
            }
836

qinsoon's avatar
qinsoon committed
837
838
839
840
841
842
            // deal with relocation symbols
            assert_eq!(sym_fields.len(), sym_strings.len());
            let symbols = {
                let mut ret = hashmap!{};
                for i in 0..sym_fields.len() {
                    let addr = sym_fields[i].v.as_address();
843
                    ret.insert(addr, sym_strings[i].clone());
qinsoon's avatar
qinsoon committed
844
845
846
847
848
849
850
                }
                ret
            };

            assert_eq!(reloc_fields.len(), reloc_strings.len());
            let fields = {
                let mut ret = hashmap!{};
851
852

                // client supplied relocation fields
qinsoon's avatar
qinsoon committed
853
854
                for i in 0..reloc_fields.len() {
                    let addr = reloc_fields[i].v.as_address();
855
                    ret.insert(addr, reloc_strings[i].clone());
856
857
858
859
860
861
                }

                // pending funcrefs - we want to replace them as symbol
                {
                    let mut pending_funcref = self.aot_pending_funcref_store.write().unwrap();
                    for (addr, vl) in pending_funcref.drain() {
862
                        ret.insert(addr, demangle_name(vl.to_relocatable()));
863
                    }
qinsoon's avatar
qinsoon committed
864
                }
865

qinsoon's avatar
qinsoon committed
866
867
868
                ret
            };

869
            // emit context (serialized vm, etc)
qinsoon's avatar
qinsoon committed
870
            backend::emit_context_with_reloc(self, symbols, fields);
871
872

            // link
873
            self.link_boot_image(whitelist_funcs, extra_sources_to_link, output_file);
874
875
876
877
        }
    }

    #[cfg(feature = "aot")]
878
    fn link_boot_image(&self, funcs: Vec<MuID>, extra_srcs: Vec<String>, output_file: String) {
879
880
881
882
883
884
        use testutil;

        trace!("Linking boot image...");

        let func_names = {
            let funcs_guard = self.funcs().read().unwrap();
885
            funcs.iter().map(|x| funcs_guard.get(x).unwrap().read().unwrap().name()).collect()
886
887
888
        };

        trace!("functions: {:?}", func_names);
889
        trace!("extern sources: {:?}", extra_srcs);
890
891
892
893
        trace!("output   : {}", output_file);

        if output_file.ends_with("dylib") || output_file.ends_with("so") {
            // compile as dynamic library
894
            testutil::aot::link_dylib_with_extra_srcs(func_names, extra_srcs, &output_file, self);
895
        } else {
896
            assert!(extra_srcs.len() == 0, "trying to create an executable with linking extern sources, unimplemented");
897
898
899
900
901
            // compile as executable
            testutil::aot::link_primordial(func_names, &output_file, self);
        }

        trace!("Done!");
qinsoon's avatar
qinsoon committed
902
    }
903

qinsoon's avatar
qinsoon committed
904
    // -- API ---
905
906
    fn new_handle(&self, handle: APIHandle) -> APIHandleResult {
        let ret = Box::new(handle);
qinsoon's avatar
qinsoon committed
907
908
909
910

        ret
    }

911
    pub fn new_fixed(&self, tyid: MuID) -> APIHandleResult {
912
        let ty = self.get_type(tyid);
qinsoon's avatar
qinsoon committed
913
        assert!(!ty.is_hybrid());
qinsoon's avatar
qinsoon committed
914

915
916
        let backend_ty = self.get_backend_type_info(tyid);
        let addr = gc::allocate_fixed(ty.clone(), backend_ty);
qinsoon's avatar
qinsoon committed
917
918
919
920
921
922
923
924
        trace!("API: allocated fixed type {} at {}", ty, addr);

        self.new_handle(APIHandle {
            id: self.next_id(),
            v : APIHandleValue::Ref(ty, addr)
        })
    }

925
    pub fn new_hybrid(&self, tyid: MuID, length: APIHandleArg) -> APIHandleResult {
qinsoon's avatar
qinsoon committed
926
        let ty  = self.get_type(tyid);
qinsoon's avatar
qinsoon committed
927
928
        assert!(ty.is_hybrid());

qinsoon's avatar
qinsoon committed
929
930
        let len = self.handle_to_uint64(length);

931
932
        let backend_ty = self.get_backend_type_info(tyid);
        let addr = gc::allocate_hybrid(ty.clone(), len, backend_ty);
qinsoon's avatar
qinsoon committed
933
934
935
936
937
938
939
940
        trace!("API: allocated hybrid type {} of length {} at {}", ty, len, addr);

        self.new_handle(APIHandle {
            id: self.next_id(),
            v: APIHandleValue::Ref(ty, addr)
        })
    }

qinsoon's avatar
qinsoon committed
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
    pub fn handle_refcast(&self, from_op: APIHandleArg, to_ty: MuID) -> APIHandleResult {
        let handle_id = self.next_id();
        let to_ty = self.get_type(to_ty);

        trace!("API: refcast {} into type {}", from_op, to_ty);

        match from_op.v {
            APIHandleValue::Ref(_, addr) => {
                assert!(to_ty.is_ref());
                let inner_ty = to_ty.get_referenced_ty().unwrap();

                self.new_handle(APIHandle {
                    id: handle_id,
                    v: APIHandleValue::Ref(inner_ty, addr)
                })
            },
            APIHandleValue::IRef(_, addr) => {
                assert!(to_ty.is_iref());
                let inner_ty = to_ty.get_referenced_ty().unwrap();

                self.new_handle(APIHandle {
                    id: handle_id,
                    v : APIHandleValue::IRef(inner_ty, addr)
                })
            },
966
            APIHandleValue::FuncRef(_) => unimplemented!(),
qinsoon's avatar
qinsoon committed
967
968
969
970
971

            _ => panic!("unexpected operand for refcast: {:?}", from_op)
        }
    }

972
    pub fn handle_get_iref(&self, handle_ref: APIHandleArg) -> APIHandleResult {
qinsoon's avatar
qinsoon committed
973
974
        let (ty, addr) = handle_ref.v.as_ref();

qinsoon's avatar
qinsoon committed
975
        /// FIXME: iref/ref share the same address - this actually depends on GC
qinsoon's avatar
qinsoon committed
976
        // iref has the same address as ref
qinsoon's avatar
qinsoon committed
977
        let ret = self.new_handle(APIHandle {
qinsoon's avatar
qinsoon committed
978
979
            id: self.next_id(),
            v : APIHandleValue::IRef(ty, addr)
qinsoon's avatar
qinsoon committed
980
981
        });

qinsoon's avatar
qinsoon committed
982
        trace!("API: get iref from {:?}", handle_ref);
qinsoon's avatar
qinsoon committed
983
984
985
        trace!("API: result {:?}", ret);

        ret
qinsoon's avatar
qinsoon committed
986
987
    }

988
    pub fn handle_shift_iref(&self, handle_iref: APIHandleArg, offset: APIHandleArg) -> APIHandleResult {
qinsoon's avatar
qinsoon committed
989
990
991
992
993
994
995
996
        let (ty, addr) = handle_iref.v.as_iref();
        let offset = self.handle_to_uint64(offset);

        let offset_addr = {
            let backend_ty = self.get_backend_type_info(ty.id());
            addr.plus(backend_ty.size * (offset as usize))
        };

qinsoon's avatar
qinsoon committed
997
        let ret = self.new_handle(APIHandle {
qinsoon's avatar
qinsoon committed
998
999
            id: self.next_id(),
            v : APIHandleValue::IRef(ty, offset_addr)
qinsoon's avatar
qinsoon committed
1000
1001
1002
1003
1004
1005
        });

        trace!("API: shift iref from {:?}", handle_iref);
        trace!("API: result {:?}", ret);

        ret
qinsoon's avatar
qinsoon committed
1006
1007
    }

qinsoon's avatar
qinsoon committed
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
    pub fn handle_get_elem_iref(&self, handle_iref: APIHandleArg, index: APIHandleArg) -> APIHandleResult {
        let (ty, addr) = handle_iref.v.as_iref();
        let index = self.handle_to_uint64(index);

        let ele_ty = match ty.get_elem_ty() {
            Some(ty) => ty,
            None => panic!("cannot get element ty from {}", ty)
        };
        let elem_addr = {
            let backend_ty = self.get_backend_type_info(ele_ty.id());
            addr.plus(backend_ty.size * (index as usize))
        };

qinsoon's avatar
qinsoon committed
1021
        let ret = self.new_handle(APIHandle {
qinsoon's avatar
qinsoon committed
1022
1023
            id: self.next_id(),
            v : APIHandleValue::IRef(ele_ty, elem_addr)
qinsoon's avatar
qinsoon committed
1024
1025
1026
1027
1028
1029
        });

        trace!("API: get element iref from {:?} at index {:?}", handle_iref, index);
        trace!("API: result {:?}", ret);

        ret
qinsoon's avatar
qinsoon committed
1030
1031
    }

1032
    pub fn handle_get_var_part_iref(&self, handle_iref: APIHandleArg) -> APIHandleResult {
qinsoon's avatar
qinsoon committed
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
        let (ty, addr) = handle_iref.v.as_iref();

        let varpart_addr = {
            let backend_ty = self.get_backend_type_info(ty.id());
            addr.plus(backend_ty.size)
        };

        let varpart_ty = match ty.get_hybrid_varpart_ty() {
            Some(ty) => ty,
            None => panic!("cannot get varpart ty from {}", ty)
        };

qinsoon's avatar
qinsoon committed
1045
        let ret = self.new_handle(APIHandle {
qinsoon's avatar
qinsoon committed
1046
1047
            id: self.next_id(),
            v : APIHandleValue::IRef(varpart_ty, varpart_addr)
qinsoon's avatar
qinsoon committed
1048
1049
1050
1051
1052
1053
        });

        trace!("API: get var part iref from {:?}", handle_iref);
        trace!("API: result {:?}", ret);

        ret
qinsoon's avatar
qinsoon committed
1054
1055
    }

1056
    pub fn handle_get_field_iref(&self, handle_iref: APIHandleArg, field: usize) -> APIHandleResult {
qinsoon's avatar
qinsoon committed
1057
1058
        trace!("API: get field iref from {:?}", handle_iref);

qinsoon's avatar
qinsoon committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
        let (ty, addr) = handle_iref.v.as_iref();

        let field_ty = match ty.get_field_ty(field) {
            Some(ty) =>