// Copyright 2017 The Australian National University // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // RTMU this file needs many additions use super::common::*; use ast::inst::*; use ast::op::*; use std; use utils::bit_utils::bits_ones; use utils::math::align_up; use utils::LinkedHashMap; use utils::LinkedHashSet; pub static mut VALIDATE_IR: bool = true; macro_rules! assert_ir { ($ cond : expr ) => [{if unsafe{VALIDATE_IR} {assert!($cond)} }]; ($ cond : expr , $ ( $ arg : tt ) + ) => [{if unsafe{VALIDATE_IR} {assert!($cond, $($arg)+)} }]; } pub struct MuIRBuilder { /// ref to MuVM mvm: *const MuVM, /// Point to the C-visible CMuIRBuilder so that `load` and `abort` can /// deallocate itself. pub c_struct: *mut CMuIRBuilder, /// Map IDs to names. Items are inserted during `gen_sym`. MuIRBuilder is /// supposed to be used by one thread, so there is no need for locking. /// Note: names generated by Zebu internally may also be added to this map id_name_map: HashMap, /// This maps all names given by the client (using gen_sym) to ids name_id_map: HashMap, /// The "transient bundle" includes everything being built here. bundle: TransientBundle } pub type IdBMap = HashMap>; /// A transient bundle, i.e. the bundle being built, but not yet loaded into the /// MuVM. #[derive(Default)] pub struct TransientBundle { types: IdBMap, sigs: IdBMap, consts: IdBMap, globals: IdBMap, funcs: IdBMap, expfuncs: IdBMap, funcvers: IdBMap, bbs: IdBMap, insts: IdBMap, dest_clauses: IdBMap, exc_clauses: IdBMap, cs_clauses: IdBMap, ns_clauses: IdBMap, ka_clauses: IdBMap } impl MuIRBuilder { pub fn new(mvm: *const MuVM) -> Box { Box::new(MuIRBuilder { mvm: mvm, c_struct: ptr::null_mut(), id_name_map: Default::default(), name_id_map: Default::default(), bundle: Default::default() }) } #[inline(always)] fn get_mvm<'a, 'b>(&'a mut self) -> &'b MuVM { //self.mvm unsafe { &*self.mvm } } #[inline(always)] fn get_mvm_immutable<'a, 'b>(&'a self) -> &'b MuVM { unsafe { &*self.mvm } } #[inline(always)] fn get_vm<'a, 'b>(&'a mut self) -> &'b VM { &self.get_mvm().vm } #[inline(always)] fn next_id(&mut self) -> MuID { self.get_vm().next_id() } fn deallocate(&mut self) { let c_struct = self.c_struct; let b_ptr = self as *mut MuIRBuilder; debug!( "Deallocating MuIRBuilder {:?} and CMuIRBuilder {:?}...", b_ptr, c_struct ); unsafe { Box::from_raw(c_struct); Box::from_raw(b_ptr); } trace!("Deallocation done!"); } /// Get the Mu name of the `id`. This will consume the entry in the /// `id_name_map`. For this reason, this function is only called when /// the actual MuEntity that has this ID is created /// (such as `new_type_int`). fn consume_name_of(&mut self, id: MuID) -> Option { self.id_name_map.remove(&id) } pub fn load(&mut self) { load_bundle(self); self.deallocate(); } pub fn abort(&mut self) { info!("Aborting boot image building..."); self.deallocate(); } pub fn gen_sym(&mut self, name: Option) -> MuID { let my_id = self.next_id(); trace!("gen_sym({:?}) -> {}", name, my_id); match name { None => {} Some(the_name) => { let old_name = self.id_name_map.insert(my_id, the_name.clone()); let old_id = self.name_id_map.insert(the_name, my_id); assert_ir!(old_id.is_none()); debug_assert!( old_name.is_none(), "ID already exists: {}, new name: {}, old name: {}", my_id, self.id_name_map.get(&my_id).unwrap(), old_name.unwrap() ); } }; my_id } pub fn new_type_int(&mut self, id: MuID, len: c_int) { self.bundle .types .insert(id, Box::new(NodeType::TypeInt { id: id, len: len })); } pub fn new_type_float(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeFloat { id: id })); } pub fn new_type_double(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeDouble { id: id })); } pub fn new_type_uptr(&mut self, id: MuID, ty: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeUPtr { id: id, ty: ty })); } pub fn new_type_ufuncptr(&mut self, id: MuID, sig: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeUFuncPtr { id: id, sig: sig })); } pub fn new_type_struct(&mut self, id: MuID, fieldtys: Vec) { self.bundle.types.insert( id, Box::new(NodeType::TypeStruct { id: id, fieldtys: fieldtys }) ); } pub fn new_type_hybrid( &mut self, id: MuID, fixedtys: Vec, varty: MuID ) { self.bundle.types.insert( id, Box::new(NodeType::TypeHybrid { id: id, fixedtys: fixedtys, varty: varty }) ); } pub fn new_type_array(&mut self, id: MuID, elemty: MuID, len: u64) { self.bundle.types.insert( id, Box::new(NodeType::TypeArray { id: id, elemty: elemty, len: len as usize }) ); } pub fn new_type_vector(&mut self, id: MuID, elemty: MuID, len: u64) { self.bundle.types.insert( id, Box::new(NodeType::TypeVector { id: id, elemty: elemty, len: len as usize }) ); } pub fn new_type_void(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeVoid { id: id })); } pub fn new_type_ref(&mut self, id: MuID, ty: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeRef { id: id, ty: ty })); } pub fn new_type_iref(&mut self, id: MuID, ty: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeIRef { id: id, ty: ty })); } pub fn new_type_weakref(&mut self, id: MuID, ty: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeWeakRef { id: id, ty: ty })); } pub fn new_type_funcref(&mut self, id: MuID, sig: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeFuncRef { id: id, sig: sig })); } pub fn new_type_tagref64(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeTagRef64 { id: id })); } pub fn new_type_threadref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeThreadRef { id: id })); } pub fn new_type_stackref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeStackRef { id: id })); } pub fn new_type_framecursorref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeFrameCursorRef { id: id })); } pub fn new_type_irbuilderref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeIRBuilderRef { id: id })); } pub fn new_type_regionref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeRegionRef { id })); } pub fn new_type_attrref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeAttrRef { id })); } pub fn new_type_timerref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeTimerRef { id })); } pub fn new_type_futexref(&mut self, id: MuID) { self.bundle .types .insert(id, Box::new(NodeType::TypeFutexRef { id })); } pub fn new_funcsig( &mut self, id: MuID, paramtys: Vec, rettys: Vec ) { self.bundle.sigs.insert( id, Box::new(NodeFuncSig { id: id, paramtys: paramtys, rettys: rettys }) ); } pub fn new_const_int(&mut self, id: MuID, ty: MuID, value: u64) { self.bundle.consts.insert( id, Box::new(NodeConst::ConstInt { id: id, ty: ty, value: value }) ); } pub fn new_const_int_ex(&mut self, id: MuID, ty: MuID, values: &[u64]) { self.bundle.consts.insert( id, Box::new(NodeConst::ConstIntEx { id: id, ty: ty, value: values.to_vec() }) ); } pub fn new_const_float(&mut self, id: MuID, ty: MuID, value: f32) { self.bundle.consts.insert( id, Box::new(NodeConst::ConstFloat { id: id, ty: ty, value: value }) ); } pub fn new_const_double(&mut self, id: MuID, ty: MuID, value: f64) { self.bundle.consts.insert( id, Box::new(NodeConst::ConstDouble { id: id, ty: ty, value: value }) ); } pub fn new_const_null(&mut self, id: MuID, ty: MuID) { self.bundle .consts .insert(id, Box::new(NodeConst::ConstNull { id: id, ty: ty })); } pub fn new_const_seq(&mut self, id: MuID, ty: MuID, elems: Vec) { self.bundle.consts.insert( id, Box::new(NodeConst::ConstSeq { id: id, ty: ty, elems: elems }) ); } pub fn new_const_extern(&mut self, id: MuID, ty: MuID, symbol: MuName) { self.bundle.consts.insert( id, Box::new(NodeConst::ConstExtern { id: id, ty: ty, symbol: symbol }) ); } pub fn new_global_cell(&mut self, id: MuID, ty: MuID) { self.bundle .globals .insert(id, Box::new(NodeGlobalCell { id: id, ty: ty })); } pub fn new_func(&mut self, id: MuID, sig: MuID) { self.bundle .funcs .insert(id, Box::new(NodeFunc { id: id, sig: sig })); } pub fn new_exp_func( &mut self, id: MuID, func: MuID, callconv: CMuCallConv, cookie: MuID ) { panic!("Not implemented") } pub fn new_func_ver(&mut self, id: MuID, func: MuID, bbs: Vec) { self.bundle.funcvers.insert( id, Box::new(NodeFuncVer { id: id, func: func, bbs: bbs }) ); } pub fn new_bb( &mut self, id: MuID, nor_param_ids: Vec, nor_param_types: Vec, exc_param_id: Option, insts: Vec ) { self.bundle.bbs.insert( id, Box::new(NodeBB { id: id, nor_param_ids: nor_param_ids, nor_param_types: nor_param_types, exc_param_id: exc_param_id, insts: insts }) ); } pub fn new_dest_clause(&mut self, id: MuID, dest: MuID, vars: Vec) { self.bundle.dest_clauses.insert( id, Box::new(NodeDestClause { id: id, dest: dest, vars: vars }) ); } pub fn new_exc_clause(&mut self, id: MuID, nor: MuID, exc: MuID) { self.bundle.exc_clauses.insert( id, Box::new(NodeExcClause { id: id, nor: nor, exc: exc }) ); } pub fn new_keepalive_clause(&mut self, id: MuID, vars: Vec) { self.bundle .ka_clauses .insert(id, Box::new(NodeKeepaliveClause { id: id, vars: vars })); } pub fn new_csc_ret_with(&mut self, id: MuID, rettys: Vec) { self.bundle.cs_clauses.insert( id, Box::new(NodeCurrentStackClause::RetWith { id: id, rettys: rettys }) ); } pub fn new_csc_kill_old(&mut self, id: MuID) { self.bundle .cs_clauses .insert(id, Box::new(NodeCurrentStackClause::KillOld { id: id })); } pub fn new_nsc_pass_values( &mut self, id: MuID, tys: Vec, vars: Vec ) { self.bundle.ns_clauses.insert( id, Box::new(NodeNewStackClause::PassValues { id: id, tys: tys, vars: vars }) ); } pub fn new_nsc_throw_exc(&mut self, id: MuID, exc: MuID) { self.bundle.ns_clauses.insert( id, Box::new(NodeNewStackClause::ThrowExc { id: id, exc: exc }) ); } #[inline(always)] fn add_inst(&mut self, id: MuID, inst: NodeInst) { self.bundle.insts.insert(id, Box::new(inst)); } pub fn new_binop( &mut self, id: MuID, result_id: MuID, optr: CMuBinOptr, ty: MuID, opnd1: MuID, opnd2: MuID, exc_clause: Option ) { trace!("new_binop"); self.add_inst( id, NodeInst::NodeBinOp { id: id, result_id: result_id, status_result_ids: vec![], optr: optr, flags: 0, ty: ty, opnd1: opnd1, opnd2: opnd2, exc_clause: exc_clause } ); } pub fn new_binop_with_status( &mut self, id: MuID, result_id: MuID, status_result_ids: Vec, optr: CMuBinOptr, status_flags: CMuBinOpStatus, ty: MuID, opnd1: MuID, opnd2: MuID, exc_clause: Option ) { trace!("new_binop_with_status"); self.add_inst( id, NodeInst::NodeBinOp { id: id, result_id: result_id, status_result_ids: status_result_ids, optr: optr, flags: status_flags, ty: ty, opnd1: opnd1, opnd2: opnd2, exc_clause: exc_clause } ) } pub fn new_cmp( &mut self, id: MuID, result_id: MuID, optr: CMuCmpOptr, ty: MuID, opnd1: MuID, opnd2: MuID ) { trace!("new_cmp"); self.add_inst( id, NodeInst::NodeCmp { id: id, result_id: result_id, optr: optr, ty: ty, opnd1: opnd1, opnd2: opnd2 } ); } pub fn new_conv( &mut self, id: MuID, result_id: MuID, optr: CMuConvOptr, from_ty: MuID, to_ty: MuID, opnd: MuID ) { trace!("new_conv"); self.add_inst( id, NodeInst::NodeConv { id: id, result_id: result_id, optr: optr, from_ty: from_ty, to_ty: to_ty, opnd: opnd } ); } pub fn new_select( &mut self, id: MuID, result_id: MuID, cond_ty: MuID, opnd_ty: MuID, cond: MuID, if_true: MuID, if_false: MuID ) { trace!("new_select"); self.add_inst( id, NodeInst::NodeSelect { id: id, result_id: result_id, cond_ty: cond_ty, opnd_ty: opnd_ty, cond: cond, if_true: if_true, if_false: if_false } ); } pub fn new_branch(&mut self, id: MuID, dest: MuID) { trace!("muirbuilder.rs new_branch"); self.add_inst(id, NodeInst::NodeBranch { id: id, dest: dest }); trace!("muirbuilder.rs built new_branch"); } pub fn new_branch2( &mut self, id: MuID, cond: MuID, if_true: MuID, if_false: MuID ) { trace!("new_branch2"); self.add_inst( id, NodeInst::NodeBranch2 { id: id, cond: cond, if_true: if_true, if_false: if_false } ); } pub fn new_switch( &mut self, id: MuID, opnd_ty: MuID, opnd: MuID, default_dest: MuID, cases: Vec, dests: Vec ) { trace!("new_switch"); self.add_inst( id, NodeInst::NodeSwitch { id: id, opnd_ty: opnd_ty, opnd: opnd, default_dest: default_dest, cases: cases, dests: dests } ); } pub fn new_call( &mut self, id: MuID, result_ids: Vec, sig: MuID, callee: MuID, args: Vec, exc_clause: Option, keepalive_clause: Option ) { trace!("new_call"); self.add_inst( id, NodeInst::NodeCall { id: id, result_ids: result_ids, sig: sig, callee: callee, args: args, exc_clause: exc_clause, keepalive_clause: keepalive_clause } ); } pub fn new_tailcall( &mut self, id: MuID, sig: MuID, callee: MuID, args: Vec ) { trace!("new_tailcall"); self.add_inst( id, NodeInst::NodeTailCall { id: id, sig: sig, callee: callee, args: args } ); } pub fn new_ret(&mut self, id: MuID, rvs: Vec) { trace!("new_ret"); self.add_inst(id, NodeInst::NodeRet { id: id, rvs: rvs }); } pub fn new_throw(&mut self, id: MuID, exc: MuID) { trace!("new_throw"); self.add_inst(id, NodeInst::NodeThrow { id: id, exc: exc }); } pub fn new_extractvalue( &mut self, id: MuID, result_id: MuID, strty: MuID, index: c_int, opnd: MuID ) { trace!("new_extractvalue"); self.add_inst( id, NodeInst::NodeExtractValue { id: id, result_id: result_id, strty: strty, index: index, opnd: opnd } ); } pub fn new_insertvalue( &mut self, id: MuID, result_id: MuID, strty: MuID, index: c_int, opnd: MuID, newval: MuID ) { trace!("new_insertvalue"); self.add_inst( id, NodeInst::NodeInsertValue { id: id, result_id: result_id, strty: strty, index: index, opnd: opnd, newval: newval } ); } pub fn new_extractelement( &mut self, id: MuID, result_id: MuID, seqty: MuID, indty: MuID, opnd: MuID, index: MuID ) { trace!("new_extractelement"); self.add_inst( id, NodeInst::NodeExtractElement { id: id, result_id: result_id, seqty: seqty, indty: indty, opnd: opnd, index: index } ); } pub fn new_insertelement( &mut self, id: MuID, result_id: MuID, seqty: MuID, indty: MuID, opnd: MuID, index: MuID, newval: MuID ) { trace!("new_insertelement"); self.add_inst( id, NodeInst::NodeInsertElement { id: id, result_id: result_id, seqty: seqty, indty: indty, opnd: opnd, index: index, newval: newval } ); } pub fn new_shufflevector( &mut self, id: MuID, result_id: MuID, vecty: MuID, maskty: MuID, vec1: MuID, vec2: MuID, mask: MuID ) { trace!("new_shufflevector"); self.add_inst( id, NodeInst::NodeShuffleVector { id: id, result_id: result_id, vecty: vecty, maskty: maskty, vec1: vec1, vec2: vec2, mask: mask } ); } pub fn new_new( &mut self, id: MuID, result_id: MuID, allocty: MuID, exc_clause: Option ) { trace!("new_new"); self.add_inst( id, NodeInst::NodeNew { id: id, result_id: result_id, allocty: allocty, exc_clause: exc_clause } ); } pub fn new_newhybrid( &mut self, id: MuID, result_id: MuID, allocty: MuID, lenty: MuID, length: MuID, exc_clause: Option ) { trace!("new_newhybrid"); self.add_inst( id, NodeInst::NodeNewHybrid { id: id, result_id: result_id, allocty: allocty, lenty: lenty, length: length, exc_clause: exc_clause } ); } pub fn new_alloca( &mut self, id: MuID, result_id: MuID, allocty: MuID, exc_clause: Option ) { trace!("new_alloca"); self.add_inst( id, NodeInst::NodeAlloca { id: id, result_id: result_id, allocty: allocty, exc_clause: exc_clause } ); } pub fn new_allocahybrid( &mut self, id: MuID, result_id: MuID, allocty: MuID, lenty: MuID, length: MuID, exc_clause: Option ) { trace!("new_allocahybrid"); self.add_inst( id, NodeInst::NodeAllocaHybrid { id: id, result_id: result_id, allocty: allocty, lenty: lenty, length: length, exc_clause: exc_clause } ); } pub fn new_getiref( &mut self, id: MuID, result_id: MuID, refty: MuID, opnd: MuID ) { trace!("new_getiref"); self.add_inst( id, NodeInst::NodeGetIRef { id: id, result_id: result_id, refty: refty, opnd: opnd } ); } pub fn new_getfieldiref( &mut self, id: MuID, result_id: MuID, is_ptr: bool, refty: MuID, index: c_int, opnd: MuID ) { trace!("new_getfieldiref"); self.add_inst( id, NodeInst::NodeGetFieldIRef { id: id, result_id: result_id, is_ptr: is_ptr, refty: refty, index: index, opnd: opnd } ); } pub fn new_getelemiref( &mut self, id: MuID, result_id: MuID, is_ptr: bool, refty: MuID, indty: MuID, opnd: MuID, index: MuID ) { trace!("new_getelemiref"); self.add_inst( id, NodeInst::NodeGetElemIRef { id: id, result_id: result_id, is_ptr: is_ptr, refty: refty, indty: indty, opnd: opnd, index: index } ); } pub fn new_shiftiref( &mut self, id: MuID, result_id: MuID, is_ptr: bool, refty: MuID, offty: MuID, opnd: MuID, offset: MuID ) { trace!("new_shiftiref"); self.add_inst( id, NodeInst::NodeShiftIRef { id: id, result_id: result_id, is_ptr: is_ptr, refty: refty, offty: offty, opnd: opnd, offset: offset } ); } pub fn new_getvarpartiref( &mut self, id: MuID, result_id: MuID, is_ptr: bool, refty: MuID, opnd: MuID ) { trace!("new_getvarpartiref"); self.add_inst( id, NodeInst::NodeGetVarPartIRef { id: id, result_id: result_id, is_ptr: is_ptr, refty: refty, opnd: opnd } ); } pub fn new_load( &mut self, id: MuID, result_id: MuID, is_ptr: bool, ord: CMuMemOrd, refty: MuID, loc: MuID, exc_clause: Option ) { trace!("new_load"); self.add_inst( id, NodeInst::NodeLoad { id: id, result_id: result_id, is_ptr: is_ptr, ord: ord, refty: refty, loc: loc, exc_clause: exc_clause } ); } pub fn new_store( &mut self, id: MuID, is_ptr: bool, ord: CMuMemOrd, refty: MuID, loc: MuID, newval: MuID, exc_clause: Option ) { trace!("new_store"); self.add_inst( id, NodeInst::NodeStore { id: id, is_ptr: is_ptr, ord: ord, refty: refty, loc: loc, newval: newval, exc_clause: exc_clause } ); } pub fn new_cmpxchg( &mut self, id: MuID, value_result_id: MuID, succ_result_id: MuID, is_ptr: bool, is_weak: bool, ord_succ: CMuMemOrd, ord_fail: CMuMemOrd, refty: MuID, loc: MuID, expected: MuID, desired: MuID, exc_clause: Option ) { trace!("new_cmpxchg"); self.add_inst( id, NodeInst::NodeCmpXchg { id: id, value_result_id: value_result_id, succ_result_id: succ_result_id, is_ptr: is_ptr, is_weak: is_weak, ord_succ: ord_succ, ord_fail: ord_fail, refty: refty, loc: loc, expected: expected, desired: desired, exc_clause: exc_clause } ); } pub fn new_atomicrmw( &mut self, id: MuID, result_id: MuID, is_ptr: bool, ord: CMuMemOrd, optr: CMuAtomicRMWOptr, ref_ty: MuID, loc: MuID, opnd: MuID, exc_clause: Option ) { trace!("new_atomicrmw"); self.add_inst( id, NodeInst::NodeAtomicRMW { id: id, result_id: result_id, is_ptr: is_ptr, ord: ord, optr: optr, ref_ty: ref_ty, loc: loc, opnd: opnd, exc_clause: exc_clause } ); } pub fn new_fence(&mut self, id: MuID, ord: CMuMemOrd) { trace!("new_fence"); self.add_inst(id, NodeInst::NodeFence { id: id, ord: ord }); } pub fn new_trap( &mut self, id: MuID, result_ids: Vec, rettys: Vec, exc_clause: Option, keepalive_clause: Option ) { trace!("new_trap"); self.add_inst( id, NodeInst::NodeTrap { id: id, result_ids: result_ids, rettys: rettys, exc_clause: exc_clause, keepalive_clause: keepalive_clause } ); } pub fn new_watchpoint( &mut self, id: MuID, wpid: CMuWPID, result_ids: Vec, rettys: Vec, dis: MuID, ena: MuID, exc: Option, keepalive_clause: Option ) { trace!("new_watchpoint"); self.add_inst( id, NodeInst::NodeWatchPoint { id: id, wpid: wpid as MuID, result_ids: result_ids, rettys: rettys, dis: dis, ena: ena, exc: exc, keepalive_clause: keepalive_clause } ); } pub fn new_wpbranch( &mut self, id: MuID, wpid: CMuWPID, dis: MuID, ena: MuID ) { trace!("new_wpbranch"); self.add_inst( id, NodeInst::NodeWPBranch { id: id, wpid: wpid as MuID, dis: dis, ena: ena } ); } pub fn new_ccall( &mut self, id: MuID, result_ids: Vec, callconv: CMuCallConv, callee_ty: MuID, sig: MuID, callee: MuID, args: Vec, exc_clause: Option, keepalive_clause: Option ) { trace!("new_ccall"); self.add_inst( id, NodeInst::NodeCCall { id: id, result_ids: result_ids, callconv: callconv, callee_ty: callee_ty, sig: sig, callee: callee, args: args, exc_clause: exc_clause, keepalive_clause: keepalive_clause } ); } pub fn new_newthread( &mut self, id: MuID, result_id: MuID, stack: MuID, threadlocal: Option, new_stack_clause: MuID, exc_clause: Option ) { trace!("new_newthread"); self.add_inst( id, NodeInst::NodeNewThread { id: id, result_id: result_id, stack: stack, threadlocal: threadlocal, new_stack_clause: new_stack_clause, exc_clause: exc_clause } ); } pub fn new_swapstack( &mut self, id: MuID, result_ids: Vec, swappee: MuID, cur_stack_clause: MuID, new_stack_clause: MuID, exc_clause: Option, keepalive_clause: Option ) { trace!("new_swapstack"); self.add_inst( id, NodeInst::NodeSwapStack { id: id, result_ids: result_ids, swappee: swappee, cur_stack_clause: cur_stack_clause, new_stack_clause: new_stack_clause, exc_clause: exc_clause, keepalive_clause: keepalive_clause } ); } pub fn new_newrtthread( &mut self, id: MuID, result_id: MuID, attr: MuID, stack: MuID, threadlocal: Option, new_stack_clause: MuID, exc_clause: Option ) { trace!("new_newrtthread"); self.add_inst( id, NodeInst::NodeNewRTThread { id: id, result_id: result_id, attr: attr, stack: stack, threadlocal: threadlocal, new_stack_clause: new_stack_clause, exc_clause: exc_clause } ); } pub fn new_allocau( &mut self, id: MuID, result_id: MuID, allocty: MuTypeNode, exc_clause: Option ) { trace!("new_allocau"); self.add_inst( id, NodeInst::NodeAllocaU { id, result_id, allocty, exc_clause } ); } pub fn new_allocauhybrid( &mut self, id: MuID, result_id: MuID, allocty: MuTypeNode, lenty: MuTypeNode, length: MuVarNode, exc_clause: Option ) { trace!("new_allocauhybrid"); self.add_inst( id, NodeInst::NodeAllocaUHybrid { id, result_id, allocty, lenty, length, exc_clause } ); } pub fn new_ealloc( &mut self, id: MuID, result_id: MuID, allocty: MuTypeNode, exc_clause: Option ) { trace!("new_ealloc"); self.add_inst( id, NodeInst::NodeEAlloc { id, result_id, allocty, exc_clause } ); } pub fn new_eallochybrid( &mut self, id: MuID, result_id: MuID, allocty: MuTypeNode, lenty: MuTypeNode, length: MuVarNode, exc_clause: Option ) { trace!("new_eallochybrid"); self.add_inst( id, NodeInst::NodeEAllocHybrid { id, result_id, allocty, lenty, length, exc_clause } ); } pub fn new_edelete( &mut self, id: MuID, ptrty: MuTypeNode, ptr: MuVarNode, exc_clause: Option ) { trace!("new_edelete"); self.add_inst( id, NodeInst::NodeEDelete { id, ptrty, ptr, exc_clause } ); } pub fn new_newregion( &mut self, id: MuID, result_id: MuID, size: MuVarNode, exc_clause: Option ) { trace!("new_newrgion"); self.add_inst( id, NodeInst::NodeNewRegion { id, result_id, size, exc_clause } ); } pub fn new_deleteregion( &mut self, id: MuID, ptr: MuVarNode, exc_clause: Option ) { trace!("new_deleteregion"); self.add_inst( id, NodeInst::NodeDeleteRegion { id, ptr, exc_clause } ); } pub fn new_ralloc( &mut self, id: MuID, result_id: MuID, allocty: MuTypeNode, reg: MuVarNode, exc_clause: Option ) { trace!("new_ralloc"); self.add_inst( id, NodeInst::NodeRAlloc { id, result_id, allocty, reg, exc_clause } ); } pub fn new_rallochybrid( &mut self, id: MuID, result_id: MuID, allocty: MuTypeNode, lenty: MuTypeNode, length: MuVarNode, reg: MuVarNode, exc_clause: Option ) { trace!("new_rallochybrid"); self.add_inst( id, NodeInst::NodeRAllocHybrid { id, result_id, allocty, lenty, length, reg, exc_clause } ); } pub fn new_exitthread( &mut self, id: MuID, tref: MuVarNode, exc_clause: Option ) { trace!("new_exitthread"); self.add_inst( id, NodeInst::NodeExitThread { id, tref, exc_clause } ); } pub fn new_yield(&mut self, id: MuID, exc_clause: Option) { trace!("new_yield"); self.add_inst(id, NodeInst::NodeYield { id, exc_clause }); } pub fn new_newfutex(&mut self, id: MuID, result_id: MuID) { trace!("new_newfutex"); self.add_inst(id, NodeInst::NodeNewFutex { id, result_id }); } pub fn new_newattr(&mut self, id: MuID, result_id: MuID) { trace!("new_newattr"); self.add_inst(id, NodeInst::NodeNewAttr { id, result_id }); } pub fn new_setattr( &mut self, id: MuID, tref: MuVarNode, aref: MuVarNode, exc_clause: Option ) { trace!("new_setattr"); self.add_inst( id, NodeInst::NodeSetAttr { id, tref, aref, exc_clause } ); } pub fn new_getattr( &mut self, id: MuID, result_id: MuID, tref: MuVarNode, exc_clause: Option ) { trace!("new_getattr"); self.add_inst( id, NodeInst::NodeGetAttr { id, result_id, tref, exc_clause } ); } pub fn new_gettime(&mut self, id: MuID, result_id: MuID) { trace!("new_gettime"); self.add_inst(id, NodeInst::NodeGetTime { id, result_id }); } pub fn new_settime( &mut self, id: MuID, tm: MuVarNode, exc_clause: Option ) { trace!("new_settime"); self.add_inst(id, NodeInst::NodeSetTime { id, tm, exc_clause }); } pub fn new_newtimer( &mut self, id: MuID, result_id: MuID, exc_clause: Option ) { trace!("new_newtimer"); self.add_inst( id, NodeInst::NodeNewTimer { id, result_id, exc_clause } ); } pub fn new_deletetimer( &mut self, id: MuID, tmr: MuVarNode, exc_clause: Option ) { trace!("new_deletetimer"); self.add_inst(id, NodeInst::NodeDeleteTimer { id, tmr }); } pub fn new_settimer( &mut self, id: MuID, tmr: MuVarNode, tm: MuVarNode, prd: MuVarNode, fsig: MuFuncSigNode, func: MuFuncNode, args: Vec, exc_clause: Option ) { trace!("new_settimer"); self.add_inst( id, NodeInst::NodeSetTimer { id, tmr, tm, prd, fsig, func, args, exc_clause } ); } pub fn new_canceltimer(&mut self, id: MuID, tmr: MuVarNode) { trace!("new_canceltimer"); self.add_inst(id, NodeInst::NodeCancelTimer { id, tmr }); } pub fn new_comminst( &mut self, id: MuID, result_ids: Vec, opcode: CMuCommInst, flags: &[CMuFlag], tys: Vec, sigs: Vec, args: Vec, exc_clause: Option, keepalive_clause: Option ) { trace!("new_comminst"); self.add_inst( id, NodeInst::NodeCommInst { id: id, result_ids: result_ids, opcode: opcode, flags: vec![], tys: tys, sigs: sigs, args: args, exc_clause: exc_clause, keepalive_clause: keepalive_clause } ); } } type IdPMap = HashMap>; struct BundleLoader<'lb, 'lvm> { b: &'lb MuIRBuilder, vm: &'lvm VM, id_name_map: HashMap, name_id_map: HashMap, visited: HashSet, built_types: IdPMap, built_sigs: IdPMap, built_constants: IdPMap, built_globals: IdPMap, built_funcs: IdBMap, built_funcvers: IdBMap, struct_hybrid_id_tags: Vec<(MuID, MuName)>, built_void: Option>, built_refvoid: Option>, built_refi64: Option>, built_i1: Option>, built_i64: Option>, built_double: Option>, built_i52: Option>, built_i6: Option>, built_ref_void: Option>, built_tagref64: Option>, built_stackref: Option>, built_threadref: Option>, built_regionref: Option>, built_timerref: Option>, built_futexref: Option>, built_attrref: Option>, built_funcref_of: IdPMap, built_ref_of: IdPMap, built_iref_of: IdPMap, built_uptr_of: IdPMap, built_strong_variant: IdPMap, built_constint_of: HashMap>, current_sig: Option>, current_entry: MuID } fn load_bundle(b: &mut MuIRBuilder) { let vm = b.get_vm(); let new_id_name_map = b.id_name_map.drain().collect::>(); let new_name_id_map = b.name_id_map.drain().collect::>(); let mut bl = BundleLoader { b: b, vm: vm, id_name_map: new_id_name_map, name_id_map: new_name_id_map, visited: Default::default(), built_types: Default::default(), built_sigs: Default::default(), built_constants: Default::default(), built_globals: Default::default(), built_funcs: Default::default(), built_funcvers: Default::default(), struct_hybrid_id_tags: Default::default(), built_void: Default::default(), built_refvoid: Default::default(), built_refi64: Default::default(), built_i1: Default::default(), built_i64: Default::default(), built_double: Default::default(), built_i52: Default::default(), built_i6: Default::default(), built_ref_void: Default::default(), built_tagref64: Default::default(), built_stackref: Default::default(), built_threadref: Default::default(), built_funcref_of: Default::default(), built_ref_of: Default::default(), built_iref_of: Default::default(), built_uptr_of: Default::default(), built_strong_variant: Default::default(), built_constint_of: Default::default(), current_sig: Default::default(), current_entry: Default::default(), built_regionref: Default::default(), built_attrref: Default::default(), built_timerref: Default::default(), built_futexref: Default::default() }; bl.load_bundle(); } #[derive(Default)] struct FuncCtxBuilder { ctx: FunctionContext, tree_nodes: IdPMap } const DEFAULT_TRUE_PROB: f32 = 0.4f32; impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { fn load_bundle(&mut self) { self.ensure_names(); self.build_toplevels(); self.add_everything_to_vm(); } fn ensure_void(&mut self) -> P { if let Some(ref void) = self.built_void { return void.clone(); } let id_void = self.vm.next_id(); let impl_void = P(MuType { hdr: MuEntityHeader::unnamed(id_void), v: MuType_::Void }); trace!("Ensure void is defined: {} {:?}", id_void, impl_void); self.built_types.insert(id_void, impl_void.clone()); self.built_void = Some(impl_void.clone()); impl_void } fn ensure_refvoid(&mut self) -> P { if let Some(ref refvoid) = self.built_refi64 { return refvoid.clone(); } let id_refvoid = self.vm.next_id(); let id_void = self.ensure_void().id(); let impl_refvoid = self.ensure_ref(id_void); trace!( "Ensure refvoid is defined: {} {:?}", id_refvoid, impl_refvoid ); self.built_types.insert(id_refvoid, impl_refvoid.clone()); self.built_refvoid = Some(impl_refvoid.clone()); impl_refvoid } fn ensure_strong_variant(&mut self, ty: &P) -> P { if let Some(ref sty) = self.built_strong_variant.get(&ty.id()) { return (*sty).clone(); } let sty = match &ty.v { &MuType_::WeakRef(ref t) => { let id = self.vm.next_id(); let sty = P(MuType::new(id, MuType_::muref(t.clone()))); self.built_types.insert(id, sty.clone()); sty } _ => ty.clone() }; trace!("Ensure strong variant is defined: {} {:?}", sty.id(), sty); self.built_strong_variant.insert(ty.id(), sty.clone()); sty } fn ensure_refi64(&mut self) -> P { if let Some(ref refi64) = self.built_refi64 { return refi64.clone(); } let id_i64 = self.vm.next_id(); let id_ref = self.vm.next_id(); let impl_i64 = P(MuType { hdr: MuEntityHeader::unnamed(id_i64), v: MuType_::Int(64) }); let impl_ref = P(MuType { hdr: MuEntityHeader::unnamed(id_ref), v: MuType_::Ref(impl_i64.clone()) }); trace!("Ensure i64 is defined: {} {:?}", id_i64, impl_i64); trace!("Ensure ref is defined: {} {:?}", id_ref, impl_ref); self.built_types.insert(id_i64, impl_i64); self.built_types.insert(id_ref, impl_ref.clone()); self.built_refi64 = Some(impl_ref.clone()); impl_ref } fn ensure_i1(&mut self) -> P { if let Some(ref impl_ty) = self.built_i1 { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Int(1) }); trace!("Ensure i1 is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_i1 = Some(impl_ty.clone()); impl_ty } fn ensure_stackref(&mut self) -> P { if let Some(ref impl_ty) = self.built_stackref { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::StackRef }); trace!("Ensure stackref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_stackref = Some(impl_ty.clone()); impl_ty } fn ensure_threadref(&mut self) -> P { if let Some(ref impl_ty) = self.built_threadref { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::ThreadRef }); trace!("Ensure threadref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_threadref = Some(impl_ty.clone()); impl_ty } #[cfg(feature = "realtime")] fn ensure_regionref(&mut self) -> P { if let Some(ref impl_ty) = self.built_regionref { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::RegionRef }); trace!("Ensure regionref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_regionref = Some(impl_ty.clone()); impl_ty } #[cfg(feature = "realtime")] fn ensure_timerref(&mut self) -> P { if let Some(ref impl_ty) = self.built_timerref { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::TimerRef }); trace!("Ensure timerref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_timerref = Some(impl_ty.clone()); impl_ty } fn ensure_futexref(&mut self) -> P { if let Some(ref impl_ty) = self.built_futexref { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::FutexRef }); trace!("Ensure futexref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_futexref = Some(impl_ty.clone()); impl_ty } #[cfg(feature = "realtime")] fn ensure_attrref(&mut self) -> P { if let Some(ref impl_ty) = self.built_attrref { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::AttrRef }); trace!("Ensure attrref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_attrref = Some(impl_ty.clone()); impl_ty } fn ensure_i6(&mut self) -> P { if let Some(ref impl_ty) = self.built_i6 { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Int(6) }); trace!("Ensure i6 is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_i6 = Some(impl_ty.clone()); impl_ty } fn ensure_i64(&mut self) -> P { if let Some(ref impl_ty) = self.built_i64 { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Int(64) }); trace!("Ensure i64 is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_i64 = Some(impl_ty.clone()); impl_ty } fn ensure_tagref64(&mut self) -> P { if let Some(ref impl_ty) = self.built_tagref64 { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Tagref64 }); trace!("Ensure tagref64 is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_tagref64 = Some(impl_ty.clone()); impl_ty } fn ensure_i52(&mut self) -> P { if let Some(ref impl_ty) = self.built_i52 { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Int(52) }); trace!("Ensure i52 is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_i52 = Some(impl_ty.clone()); impl_ty } fn ensure_double(&mut self) -> P { if let Some(ref impl_ty) = self.built_double { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Double }); trace!("Ensure double is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_ty.clone()); self.built_double = Some(impl_ty.clone()); impl_ty } fn ensure_ref_void(&mut self) -> P { if let Some(ref impl_ty) = self.built_ref_void { return impl_ty.clone(); } let id = self.vm.next_id(); let impl_void_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Void }); let impl_ty = P(MuType { hdr: MuEntityHeader::unnamed(id), v: MuType_::Ref(impl_void_ty.clone()) }); trace!("Ensure ref is defined: {} {:?}", id, impl_ty); self.built_types.insert(id, impl_void_ty.clone()); self.built_types.insert(id, impl_ty.clone()); self.built_ref_void = Some(impl_ty.clone()); impl_ty } fn ensure_constint_of(&mut self, value: u64) -> P { if let Some(c) = self.built_constint_of.get(&value) { return self.new_global(c.clone()); } let id = self.vm.next_id(); let impl_ty = self.ensure_i64(); let impl_val = P(Value { hdr: MuEntityHeader::unnamed(id), ty: impl_ty, v: Value_::Constant(Constant::Int(value)) }); trace!("Ensure const int is defined: {} {:?}", value, impl_val); self.built_constants.insert(id, impl_val.clone()); self.built_constint_of.insert(value, impl_val.clone()); self.new_global(impl_val) } fn ensure_funcref(&mut self, sig_id: MuID) -> P { if let Some(funcref) = self.built_funcref_of.get(&sig_id) { return funcref.clone(); } let sig = self.built_sigs.get(&sig_id).unwrap().clone(); let id_funcref = self.vm.next_id(); let impl_funcref = P(MuType { hdr: MuEntityHeader::unnamed(id_funcref), v: MuType_::FuncRef(sig) }); trace!( "Ensure funcref of {} is defined: {} {:?}", sig_id, id_funcref, impl_funcref ); self.built_types.insert(id_funcref, impl_funcref.clone()); self.built_funcref_of.insert(sig_id, impl_funcref.clone()); impl_funcref } fn ensure_type_generic( id: MuID, hint: &str, vm: &VM, cache_map: &mut IdPMap, storage_map: &mut IdPMap, factory: F ) -> P where F: Fn(P) -> MuType_ { if let Some(obj) = cache_map.get(&id) { return obj.clone(); } let new_id = vm.next_id(); let old_obj = storage_map.get(&id).unwrap().clone(); let impl_type_ = factory(old_obj); let new_obj = P(MuType { hdr: MuEntityHeader::unnamed(new_id), v: impl_type_ }); storage_map.insert(new_id, new_obj.clone()); trace!("Ensure {} of {} is defined: {:?}", hint, id, new_obj); cache_map.insert(new_id, new_obj.clone()); new_obj } fn ensure_ref(&mut self, ty_id: MuID) -> P { BundleLoader::ensure_type_generic( ty_id, "ref", &self.vm, &mut self.built_ref_of, &mut self.built_types, |impl_ty| MuType_::Ref(impl_ty) ) } fn ensure_iref(&mut self, ty_id: MuID) -> P { BundleLoader::ensure_type_generic( ty_id, "iref", &self.vm, &mut self.built_iref_of, &mut self.built_types, |impl_ty| MuType_::IRef(impl_ty) ) } fn ensure_uptr(&mut self, ty_id: MuID) -> P { BundleLoader::ensure_type_generic( ty_id, "uptr", &self.vm, &mut self.built_iref_of, &mut self.built_types, |impl_ty| MuType_::UPtr(impl_ty) ) } fn ensure_iref_or_uptr(&mut self, ty_id: MuID, is_ptr: bool) -> P { if is_ptr { self.ensure_uptr(ty_id) } else { self.ensure_iref(ty_id) } } fn ensure_name(&mut self, id: MuID, parent_id: Option) { let prefix = match parent_id { Some(parent_id) => (*self.get_name(parent_id)).clone() + ".", None => "".to_string() }; self.id_name_map.entry(id).or_insert_with(|| { let name = format!("{}#{}", prefix, id); trace!("Making name for ID {} : {}", id, name); Arc::new(name) }); } fn ensure_names(&mut self) { // Make names for all unnamed entities that have parents, to be relative // to their parents name (this is not strictly neccesary, but // will make reading stuff the compiler generates easier) // Give each struct and hybrid type a name // (this is needed when structs/hybrids refer to themselves) for (id, ty) in &self.b.bundle.types { match **ty { NodeType::TypeHybrid { .. } | NodeType::TypeStruct { .. } => { self.ensure_name(*id, None) } _ => {} } } // A func can be a parent of a function version, so make sure each one // has a name for id in self.b.bundle.funcs.keys() { self.ensure_name(*id, None); } // Make each unnamed function version have a name relative to its // function for (fv_id, fv) in &self.b.bundle.funcvers { self.ensure_name(*fv_id, Some(fv.func)); // Make each unnamed basic block have a name relative to it's // enclosing function version for bb_id in &fv.bbs { self.ensure_name(*bb_id, Some(*fv_id)); } } for (bb_id, bb) in &self.b.bundle.bbs { // Make each of the basic blocks unnamed parameters have // names relative to the block itself for nor_id in &bb.nor_param_ids { self.ensure_name(*nor_id, Some(*bb_id)); } if bb.exc_param_id.is_some() { self.ensure_name(bb.exc_param_id.unwrap(), Some(*bb_id)); } // Make each of the blocks unnamed instructions have names relative // to the block itself for inst_id in &bb.insts { self.ensure_name(*inst_id, Some(*bb_id)); // Make each unnamed instruction result have a name relative to // the basic block match self.b.bundle.insts.get(&inst_id) { Some(inst) => { match **inst { // Instructions with a single result NodeInst::NodeCmp { ref result_id, .. } | NodeInst::NodeConv { ref result_id, .. } | NodeInst::NodeSelect { ref result_id, .. } | NodeInst::NodeExtractValue { ref result_id, .. } | NodeInst::NodeInsertValue { ref result_id, .. } | NodeInst::NodeExtractElement { ref result_id, .. } | NodeInst::NodeInsertElement { ref result_id, .. } | NodeInst::NodeShuffleVector { ref result_id, .. } | NodeInst::NodeNew { ref result_id, .. } | NodeInst::NodeNewHybrid { ref result_id, .. } | NodeInst::NodeAlloca { ref result_id, .. } | NodeInst::NodeAllocaHybrid { ref result_id, .. } | NodeInst::NodeGetIRef { ref result_id, .. } | NodeInst::NodeGetFieldIRef { ref result_id, .. } | NodeInst::NodeGetElemIRef { ref result_id, .. } | NodeInst::NodeShiftIRef { ref result_id, .. } | NodeInst::NodeGetVarPartIRef { ref result_id, .. } | NodeInst::NodeLoad { ref result_id, .. } | NodeInst::NodeAtomicRMW { ref result_id, .. } | NodeInst::NodeNewThread { ref result_id, .. } | NodeInst::NodeNewRTThread { ref result_id, .. } => self.ensure_name(*result_id, Some(*bb_id)), // Instructions with a variable list of results NodeInst::NodeCall { ref result_ids, .. } | NodeInst::NodeTrap { ref result_ids, .. } | NodeInst::NodeWatchPoint { ref result_ids, .. } | NodeInst::NodeCCall { ref result_ids, .. } | NodeInst::NodeSwapStack { ref result_ids, .. } | NodeInst::NodeCommInst { ref result_ids, .. } => { for result_id in result_ids { self.ensure_name(*result_id, Some(*bb_id)); } } NodeInst::NodeBinOp { ref result_id, ref status_result_ids, .. } => { self.ensure_name(*result_id, Some(*bb_id)); for status_result_id in status_result_ids { self.ensure_name( *status_result_id, Some(*bb_id) ); } } NodeInst::NodeCmpXchg { ref value_result_id, ref succ_result_id, .. } => { self.ensure_name( *value_result_id, Some(*bb_id) ); self.ensure_name(*succ_result_id, Some(*bb_id)); } // Instructions has no results _ => {} } } None => panic!( "Referenced instruction {} does not exist", inst_id ) } } } } fn get_name(&self, id: MuID) -> MuName { self.id_name_map.get(&id).unwrap().clone() } fn maybe_get_name(&self, id: MuID) -> Option { self.id_name_map.get(&id).cloned() } fn make_mu_entity_header(&self, id: MuID) -> MuEntityHeader { match self.maybe_get_name(id) { None => MuEntityHeader::unnamed(id), Some(name) => MuEntityHeader::named(id, name) } } fn build_toplevels(&mut self) { for id in self.b.bundle.types.keys() { if !self.visited.contains(id) { self.build_type(*id) } } let struct_hybrid_id_tags = self.struct_hybrid_id_tags.drain(..).collect::>(); for (id, ref tag) in struct_hybrid_id_tags { self.fill_struct_hybrid(id, tag) } for id in self.b.bundle.sigs.keys() { if !self.visited.contains(id) { self.build_sig(*id) } } for id in self.b.bundle.consts.keys() { if !self.visited.contains(id) { self.build_const(*id) } } for id in self.b.bundle.globals.keys() { if !self.visited.contains(id) { self.build_global(*id) } } for id in self.b.bundle.funcs.keys() { if !self.visited.contains(id) { self.build_func(*id) } } for id in self.b.bundle.funcvers.keys() { self.build_funcver(*id) } } fn build_type(&mut self, id: MuID) { self.visited.insert(id); let ty = self.b.bundle.types.get(&id).unwrap(); trace!("Building type {} {:?}", id, ty); let hdr = self.make_mu_entity_header(id); let impl_ty_ = match **ty { NodeType::TypeInt { id: _, len } => { assert_ir!(len >= 1); MuType_::Int(len as usize) } NodeType::TypeFloat { id: _ } => MuType_::Float, NodeType::TypeDouble { id: _ } => MuType_::Double, NodeType::TypeUPtr { id: _, ty: toty } => { // NOTE: The mu-spec requires toty to be native safe // but that will break clients that ignore this rule so it is // not checked for let impl_toty = self.ensure_type_rec(toty); MuType_::UPtr(impl_toty) } NodeType::TypeUFuncPtr { id: _, sig } => { // NOTE: The mu-spec requires toty to be native safe // but that will break clients that ignore this rule so it is // not checked for let impl_sig = self.ensure_sig_rec(sig); MuType_::UFuncPtr(impl_sig) } NodeType::TypeStruct { id: _, fieldtys: _ } => { let tag = self.get_name(id); self.struct_hybrid_id_tags.push((id, tag.clone())); MuType_::mustruct_empty(tag) // MuType_::Struct(tag) } NodeType::TypeHybrid { id: _, fixedtys: _, varty: _ } => { let tag = self.get_name(id); self.struct_hybrid_id_tags.push((id, tag.clone())); MuType_::hybrid_empty(tag) } NodeType::TypeArray { id: _, elemty, len } => { let impl_elemty = self.ensure_type_rec(elemty); assert_ir!(len >= 1); assert_ir!(!impl_elemty.is_hybrid() && !impl_elemty.is_void()); MuType_::Array(impl_elemty, len) } NodeType::TypeVector { id: _, elemty, len } => { let impl_elemty = self.ensure_type_rec(elemty); assert_ir!(len >= 1); assert_ir!(!impl_elemty.is_hybrid() && !impl_elemty.is_void()); MuType_::Vector(impl_elemty, len) } NodeType::TypeVoid { id: _ } => MuType_::Void, NodeType::TypeTagRef64 { id: _ } => MuType_::Tagref64, NodeType::TypeRef { id: _, ty: toty } => { let impl_toty = self.ensure_type_rec(toty); MuType_::Ref(impl_toty) } NodeType::TypeIRef { id: _, ty: toty } => { let impl_toty = self.ensure_type_rec(toty); MuType_::IRef(impl_toty) } NodeType::TypeWeakRef { id: _, ty: toty } => { let impl_toty = self.ensure_type_rec(toty); MuType_::WeakRef(impl_toty) } NodeType::TypeFuncRef { id: _, sig } => { let impl_sig = self.ensure_sig_rec(sig); MuType_::FuncRef(impl_sig) } NodeType::TypeThreadRef { id: _ } => MuType_::ThreadRef, NodeType::TypeStackRef { id: _ } => MuType_::StackRef, #[cfg(feature = "realtime")] NodeType::TypeRegionRef { id: _ } => MuType_::RegionRef, #[cfg(feature = "realtime")] NodeType::TypeAttrRef { id: _ } => MuType_::AttrRef, #[cfg(feature = "realtime")] NodeType::TypeTimerRef { id: _ } => MuType_::TimerRef, #[cfg(feature = "realtime")] NodeType::TypeFutexRef { id: _ } => MuType_::FutexRef, ref t => panic!("{:?} not implemented", t) }; let impl_ty = MuType { hdr: hdr, v: impl_ty_ }; trace!("Type built: {} {:?}", id, impl_ty); self.built_types.insert(id, P(impl_ty)); } fn ensure_type_rec(&mut self, id: MuID) -> P { if self.b.bundle.types.contains_key(&id) { if self.visited.contains(&id) { match self.built_types.get(&id) { Some(t) => t.clone(), None => panic!("Cyclic types found. id: {}", id) } } else { self.build_type(id); self.built_types.get(&id).unwrap().clone() } } else { self.vm.get_type(id) } } fn get_built_type(&self, id: MuID) -> P { match self.built_types.get(&id) { Some(t) => t.clone(), None => self.vm.get_type(id) } } fn fill_struct_hybrid(&mut self, id: MuID, tag: &MuName) { let ty = self.b.bundle.types.get(&id).unwrap(); trace!("Filling struct or hybrid {} {:?}", id, ty); match **ty { // TODO: Check for recursive types NodeType::TypeStruct { id: _, ref fieldtys } => { let fieldtys_impl = fieldtys .iter() .map(|fid| self.ensure_type_rec(*fid)) .collect::>(); assert_ir!( fieldtys_impl.len() >= 1 && fieldtys_impl .iter() .all(|x| !x.is_hybrid() && !x.is_void()) ); MuType_::mustruct_put(tag, fieldtys_impl); trace!( "Struct {} filled: {:?}", id, STRUCT_TAG_MAP.read().unwrap().get(tag) ); } NodeType::TypeHybrid { id: _, ref fixedtys, varty } => { let fixedtys_impl = fixedtys .iter() .map(|fid| self.ensure_type_rec(*fid)) .collect::>(); let varty_impl = self.ensure_type_rec(varty); assert_ir!(fixedtys_impl .iter() .all(|x| !x.is_hybrid() && !x.is_void())); assert_ir!(!varty_impl.is_hybrid() && !varty_impl.is_void()); MuType_::hybrid_put(tag, fixedtys_impl, varty_impl); trace!( "Hybrid {} filled: {:?}", id, HYBRID_TAG_MAP.read().unwrap().get(tag) ); } ref t => panic!("{} {:?} should be a Struct or Hybrid type", id, ty) } } fn build_sig(&mut self, id: MuID) { self.visited.insert(id); let sig = self.b.bundle.sigs.get(&id).unwrap(); trace!("Building function signature {} {:?}", id, sig); let hdr = self.make_mu_entity_header(id); let impl_sig = MuFuncSig { hdr: hdr, ret_tys: sig .rettys .iter() .map(|i| self.ensure_type_rec(*i)) .collect::>(), arg_tys: sig .paramtys .iter() .map(|i| self.ensure_type_rec(*i)) .collect::>() }; trace!("Function signature built: {} {:?}", id, impl_sig); self.built_sigs.insert(id, P(impl_sig)); } fn ensure_sig_rec(&mut self, id: MuID) -> P { if self.b.bundle.sigs.contains_key(&id) { if self.visited.contains(&id) { match self.built_sigs.get(&id) { Some(t) => t.clone(), None => panic!("Cyclic signature found. id: {}", id) } } else { self.build_sig(id); self.built_sigs.get(&id).unwrap().clone() } } else { self.vm.get_func_sig(id) } } fn build_const(&mut self, id: MuID) { self.visited.insert(id); let con = self.b.bundle.consts.get(&id).unwrap(); trace!("Building constant {} {:?}", id, con); let hdr = self.make_mu_entity_header(id); let (impl_con, impl_ty) = match **con { NodeConst::ConstInt { id: _, ty, value } => { let t = self.ensure_type_rec(ty); let c = Constant::Int(value); assert_ir!(t.is_int() || t.is_ptr()); assert_ir!(value <= bits_ones(t.get_int_length().unwrap())); (c, t) } NodeConst::ConstIntEx { id: _, ty, ref value } => { let t = self.ensure_type_rec(ty); let c = Constant::IntEx(value.clone()); assert_ir!(t.is_int() || t.is_ptr()); assert_ir!( value.len() * 64 == align_up(t.get_int_length().unwrap(), 64) ); assert_ir!( *value.last().unwrap() <= bits_ones( t.get_int_length().unwrap() - (value.len() - 1) * 64 ) ); (c, t) } NodeConst::ConstFloat { id: _, ty, value } => { let t = self.ensure_type_rec(ty); let c = Constant::Float(value); assert_ir!(t.is_float()); (c, t) } NodeConst::ConstDouble { id: _, ty, value } => { let t = self.ensure_type_rec(ty); let c = Constant::Double(value); assert_ir!(t.is_double()); (c, t) } NodeConst::ConstNull { id: _, ty } => { let t = self.ensure_type_rec(ty); let c = Constant::NullRef; assert_ir!( t.is_ref() || t.is_iref() | t.is_funcref() | t.is_opaque_reference() ); (c, t) } NodeConst::ConstExtern { id: _, ty, ref symbol } => { let t = self.ensure_type_rec(ty); let c = Constant::ExternSym(symbol.clone()); assert_ir!(symbol .as_bytes() .iter() .all(|x| *x >= 33 && *x <= 126 && *x != 34)); assert_ir!(t.is_ptr()); (c, t) } ref c => panic!("{:?} not implemented", c) }; let impl_val = Value { hdr: hdr, ty: impl_ty, v: Value_::Constant(impl_con) }; trace!("Constant built: {} {:?}", id, impl_val); self.built_constants.insert(id, P(impl_val)); } fn build_global(&mut self, id: MuID) { self.visited.insert(id); let global = self.b.bundle.globals.get(&id).unwrap(); trace!("Building global {} {:?}", id, global); let hdr = self.make_mu_entity_header(id); let impl_ty = self.ensure_type_rec(global.ty); // global type assert_ir!(!impl_ty.is_hybrid()); let impl_val = Value { hdr: hdr, ty: self.ensure_iref(impl_ty.id()), // iref to global v: Value_::Global(impl_ty) }; trace!("Global built: {} {:?}", id, impl_val); self.built_globals.insert(id, P(impl_val)); } fn build_func(&mut self, id: MuID) { self.visited.insert(id); let fun = self.b.bundle.funcs.get(&id).unwrap(); trace!("Building function {} {:?}", id, fun); let hdr = self.make_mu_entity_header(id); let impl_sig = self.ensure_sig_rec(fun.sig); let impl_fun = MuFunction { hdr: hdr.clone(), sig: impl_sig, cur_ver: None, all_vers: Default::default() }; trace!("Function built: {} {:?}", id, impl_fun); self.built_funcs.insert(id, Box::new(impl_fun)); let impl_ty = self.ensure_funcref(fun.sig); let impl_val = Value { hdr: hdr.clone(), ty: impl_ty, v: Value_::Constant(Constant::FuncRef(hdr)) }; trace!("Function value built: {} {:?}", id, impl_val); self.built_constants.insert(id, P(impl_val)); } fn get_sig_for_func(&mut self, id: MuID) -> P { if let Some(impl_func) = self.built_funcs.get(&id) { impl_func.sig.clone() } else { self.vm.get_sig_for_func(id) } } fn build_funcver(&mut self, id: MuID) { let fv = self.b.bundle.funcvers.get(&id).unwrap(); trace!("Building function version {} {:?}", id, fv); let hdr = self.make_mu_entity_header(id); let func_id = fv.func; let impl_sig = self.get_sig_for_func(func_id); self.current_sig = Some(impl_sig.clone()); let mut fcb: FuncCtxBuilder = Default::default(); let entry_id = *fv.bbs.first().unwrap(); self.current_entry = entry_id; let mut blocks = fv .bbs .iter() .map(|bbid| { let block = self.build_block(&mut fcb, *bbid); (*bbid, block) }) .collect::>(); let a = blocks .iter() .map(|(bbid, block)| { (*bbid, self.build_block_content(&mut fcb, *bbid, &blocks)) }) .collect::>(); for (bbi, body) in a { blocks[&bbi].content.as_mut().unwrap().body = body; } assert_ir!({ let c = blocks[&entry_id].content.as_ref().unwrap(); c.args.len() == impl_sig.arg_tys.len() && c.args .iter() .zip(&impl_sig.arg_tys) .all(|(arg, t)| arg.ty == *t) && c.exn_arg.is_none() }); let ctn = FunctionContent { entry: entry_id, blocks: blocks, exception_blocks: LinkedHashSet::new() }; let impl_fv = MuFunctionVersion::new_(hdr, func_id, impl_sig, ctn, fcb.ctx); trace!("Function version built {} {:?}", id, impl_fv); self.built_funcvers.insert(id, Box::new(impl_fv)); } /// Copied from ast::ir::*. That was implemented for the previous API which /// implies mutability. When we migrate later, we can assume the AST is /// almost fully immutable, and can be constructed in a functional /// recursive-descendent style. fn new_ssa( &self, fcb: &mut FuncCtxBuilder, id: MuID, ty: P ) -> P { let hdr = self.make_mu_entity_header(id); let val = P(Value { hdr: hdr, ty: ty, v: Value_::SSAVar(id) }); fcb.ctx.values.insert(id, SSAVarEntry::new(val.clone())); let tn = P(TreeNode { v: TreeNode_::Value(val) }); fcb.tree_nodes.insert(id, tn.clone()); tn } pub fn new_inst(&self, v: Instruction) -> P { P(TreeNode { v: TreeNode_::Instruction(v) }) } pub fn new_global(&self, v: P) -> P { P(TreeNode { v: TreeNode_::Value(v) }) } fn get_treenode(&self, fcb: &FuncCtxBuilder, id: MuID) -> P { if let Some(tn) = fcb.tree_nodes.get(&id) { tn.clone() } else if let Some(v) = self.built_constants.get(&id) { self.new_global(v.clone()) } else if let Some(v) = self.built_globals.get(&id) { self.new_global(v.clone()) } else { panic!("Operand {} is neither a local var or a global var", id) } } fn build_block(&mut self, fcb: &mut FuncCtxBuilder, id: MuID) -> Block { let bb = self.b.bundle.bbs.get(&id).unwrap(); trace!("Building basic block {} {:?}", id, bb); let nor_ids = &bb.nor_param_ids; let nor_tys = &bb.nor_param_types; let args = nor_ids .iter() .zip(nor_tys) .map(|(arg_id, arg_ty_id)| { let arg_ty = self.get_built_type(*arg_ty_id); self.new_ssa(fcb, *arg_id, arg_ty).clone_value() }) .collect::>(); let exn_arg = bb.exc_param_id.map(|arg_id| { let arg_ty = self.ensure_refvoid(); self.new_ssa(fcb, arg_id, arg_ty).clone_value() }); let hdr = self.make_mu_entity_header(id); let ctn = BlockContent { args: args, exn_arg: exn_arg, body: vec![], keepalives: None }; Block { hdr: hdr, content: Some(ctn), trace_hint: TraceHint::None, control_flow: Default::default() } } fn build_block_content( &mut self, fcb: &mut FuncCtxBuilder, id: MuID, blocks: &LinkedHashMap ) -> Vec> { let res = self .b .bundle .bbs .get(&id) .unwrap() .insts .iter() .map(|iid| self.build_inst(fcb, *iid, blocks)) .collect::>(); let n = res.len(); for i in 0..(n - 1) { // None of the internal instruction should be a terminator assert_ir!(!res[i].as_inst().is_terminal_inst()); } // The last instruction should be a terminator assert_ir!(res[n - 1].as_inst().is_terminal_inst()); res } fn build_inst( &mut self, fcb: &mut FuncCtxBuilder, id: MuID, blocks: &LinkedHashMap ) -> P { let inst = self.b.bundle.insts.get(&id).unwrap(); trace!("Building instruction {} {:?}", id, inst); let hdr = self.make_mu_entity_header(id); let impl_inst = match **inst { NodeInst::NodeBinOp { id: _, result_id, ref status_result_ids, optr, flags, ty, opnd1, opnd2, exc_clause } => { let impl_optr = match optr { CMU_BINOP_ADD => BinOp::Add, CMU_BINOP_SUB => BinOp::Sub, CMU_BINOP_MUL => BinOp::Mul, CMU_BINOP_SDIV => BinOp::Sdiv, CMU_BINOP_SREM => BinOp::Srem, CMU_BINOP_UDIV => BinOp::Udiv, CMU_BINOP_UREM => BinOp::Urem, CMU_BINOP_SHL => BinOp::Shl, CMU_BINOP_LSHR => BinOp::Lshr, CMU_BINOP_ASHR => BinOp::Ashr, CMU_BINOP_AND => BinOp::And, CMU_BINOP_OR => BinOp::Or, CMU_BINOP_XOR => BinOp::Xor, CMU_BINOP_FADD => BinOp::FAdd, CMU_BINOP_FSUB => BinOp::FSub, CMU_BINOP_FMUL => BinOp::FMul, CMU_BINOP_FDIV => BinOp::FDiv, CMU_BINOP_FREM => BinOp::FRem, _ => panic!("Illegal binary operator {}", optr) }; let impl_ty = self.get_built_type(ty); let impl_opnd1 = self.get_treenode(fcb, opnd1); let impl_opnd2 = self.get_treenode(fcb, opnd2); assert_ir!( impl_opnd1.ty() == impl_opnd2.ty() && impl_opnd1.ty() == impl_ty, "Invalid instruction {:?}: Operand types {} and {} \ are not what was expected {}", inst, impl_opnd1.ty(), impl_opnd2.ty(), impl_ty ); assert_ir!( if impl_optr.is_fp() { impl_ty.is_fp() } else { impl_ty.is_int() }, "Type {}, is invalid for a {:?} instruction", impl_ty, impl_optr ); let impl_rv = self.new_ssa(fcb, result_id, impl_ty).clone_value(); if exc_clause.is_some() { unimplemented!() } if flags == 0 { // binop Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd1, impl_opnd2], v: Instruction_::BinOp(impl_optr, 0, 1) } } else { let mut values = vec![]; // result values.push(impl_rv); // status flags let ty_i1 = self.ensure_i1(); let mut flags_count = 0; let mut impl_flags = BinOpStatus::none(); if flags & CMU_BOS_N != 0 { assert_ir!( !impl_optr.is_fp(), "N flag is invalid for {:?} instruction.", impl_optr ); impl_flags.flag_n = true; let flag_n = self .new_ssa( fcb, status_result_ids[flags_count], ty_i1.clone() ) .clone_value(); flags_count += 1; values.push(flag_n); } if flags & CMU_BOS_Z != 0 { assert_ir!( !impl_optr.is_fp(), "Z flag is invalid for {:?} instruction.", impl_optr ); impl_flags.flag_z = true; let flag_z = self .new_ssa( fcb, status_result_ids[flags_count], ty_i1.clone() ) .clone_value(); flags_count += 1; values.push(flag_z); } if flags & CMU_BOS_C != 0 { assert_ir!( impl_optr == BinOp::Mul || impl_optr == BinOp::Add || impl_optr == BinOp::Sub, "C flag is invalid for {:?} instruction.", impl_optr ); impl_flags.flag_c = true; let flag_c = self .new_ssa( fcb, status_result_ids[flags_count], ty_i1.clone() ) .clone_value(); flags_count += 1; values.push(flag_c); } if flags & CMU_BOS_V != 0 { assert_ir!( impl_optr == BinOp::Mul || impl_optr == BinOp::Add || impl_optr == BinOp::Sub, "V flag is invalid for {:?} instruction.", impl_optr ); impl_flags.flag_v = true; let flag_v = self .new_ssa( fcb, status_result_ids[flags_count], ty_i1.clone() ) .clone_value(); values.push(flag_v); } Instruction { hdr: hdr, value: Some(values), ops: vec![impl_opnd1, impl_opnd2], v: Instruction_::BinOpWithStatus( impl_optr, impl_flags, 0, 1 ) } } } NodeInst::NodeCmp { id: _, result_id, optr, ty, opnd1, opnd2 } => { let impl_optr = match optr { CMU_CMP_EQ => CmpOp::EQ, CMU_CMP_NE => CmpOp::NE, CMU_CMP_SGE => CmpOp::SGE, CMU_CMP_SGT => CmpOp::SGT, CMU_CMP_SLE => CmpOp::SLE, CMU_CMP_SLT => CmpOp::SLT, CMU_CMP_UGE => CmpOp::UGE, CMU_CMP_UGT => CmpOp::UGT, CMU_CMP_ULE => CmpOp::ULE, CMU_CMP_ULT => CmpOp::ULT, CMU_CMP_FFALSE => CmpOp::FFALSE, CMU_CMP_FTRUE => CmpOp::FTRUE, CMU_CMP_FUNO => CmpOp::FUNO, CMU_CMP_FUEQ => CmpOp::FUEQ, CMU_CMP_FUNE => CmpOp::FUNE, CMU_CMP_FUGT => CmpOp::FUGT, CMU_CMP_FUGE => CmpOp::FUGE, CMU_CMP_FULT => CmpOp::FULT, CMU_CMP_FULE => CmpOp::FULE, CMU_CMP_FORD => CmpOp::FORD, CMU_CMP_FOEQ => CmpOp::FOEQ, CMU_CMP_FONE => CmpOp::FONE, CMU_CMP_FOGT => CmpOp::FOGT, CMU_CMP_FOGE => CmpOp::FOGE, CMU_CMP_FOLT => CmpOp::FOLT, CMU_CMP_FOLE => CmpOp::FOLE, _ => panic!("Illegal comparing operator {}", optr) }; // NOTE: vectors not implemented. Otherwise the result would be // a vector of int<1> let impl_i1 = self.ensure_i1(); let impl_opnd1 = self.get_treenode(fcb, opnd1); let impl_opnd2 = self.get_treenode(fcb, opnd2); let impl_ty = self.get_built_type(ty); let impl_rv = self.new_ssa(fcb, result_id, impl_i1).clone_value(); assert_ir!( impl_opnd1.ty() == impl_opnd2.ty() && impl_opnd1.ty() == impl_ty, "Operand types {} and {} are not what was expected {} (for {:?})", impl_opnd1.ty(), impl_opnd2.ty(), impl_ty, impl_optr ); assert_ir!(if impl_optr.is_fp_cmp() { impl_ty.is_fp() } else if impl_optr.is_eq_cmp() { impl_ty.is_eq_comparable() } else if impl_optr.is_ult_cmp() { impl_ty.is_ult_comparable() } else { impl_ty.is_int() }); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd1, impl_opnd2], v: Instruction_::CmpOp(impl_optr, 0, 1) } } NodeInst::NodeConv { id: _, result_id, optr, from_ty, to_ty, opnd } => { let impl_optr = match optr { CMU_CONV_TRUNC => ConvOp::TRUNC, CMU_CONV_ZEXT => ConvOp::ZEXT, CMU_CONV_SEXT => ConvOp::SEXT, CMU_CONV_FPTRUNC => ConvOp::FPTRUNC, CMU_CONV_FPEXT => ConvOp::FPEXT, CMU_CONV_FPTOUI => ConvOp::FPTOUI, CMU_CONV_FPTOSI => ConvOp::FPTOSI, CMU_CONV_UITOFP => ConvOp::UITOFP, CMU_CONV_SITOFP => ConvOp::SITOFP, CMU_CONV_BITCAST => ConvOp::BITCAST, CMU_CONV_REFCAST => ConvOp::REFCAST, CMU_CONV_PTRCAST => ConvOp::PTRCAST, _ => panic!("Illegal conversion operator {}", optr) }; let impl_from_ty = self.get_built_type(from_ty); let impl_to_ty = self.get_built_type(to_ty); let impl_opnd = self.get_treenode(fcb, opnd); let impl_rv = self .new_ssa(fcb, result_id, impl_to_ty.clone()) .clone_value(); assert_ir!( impl_opnd.ty() == impl_from_ty, "Invalid {:?} instruction: trying to \ convert {}, expected types {} -> \ {}", impl_optr, impl_opnd.ty(), impl_from_ty, impl_to_ty ); assert_ir!( match impl_optr { ConvOp::TRUNC => { let from_length = impl_from_ty.get_int_length(); let to_length = impl_to_ty.get_int_length(); from_length.is_some() && to_length.is_some() && from_length > to_length } ConvOp::ZEXT | ConvOp::SEXT => { let from_length = impl_from_ty.get_int_length(); let to_length = impl_to_ty.get_int_length(); from_length.is_some() && to_length.is_some() && from_length < to_length } ConvOp::FPTRUNC => { impl_from_ty.is_double() && impl_to_ty.is_float() } ConvOp::FPEXT => { impl_from_ty.is_float() && impl_to_ty.is_double() } ConvOp::FPTOUI | ConvOp::FPTOSI => { impl_from_ty.is_fp() && impl_to_ty.is_int() } ConvOp::UITOFP | ConvOp::SITOFP => { impl_from_ty.is_int() && impl_to_ty.is_fp() } ConvOp::BITCAST => { (impl_from_ty.is_float() && impl_to_ty.is_int() && impl_to_ty.get_int_length().unwrap() == 32) || (impl_from_ty.is_double() && impl_to_ty.is_int() && impl_to_ty.get_int_length().unwrap() == 64) || (impl_from_ty.is_int() && impl_from_ty.get_int_length().unwrap() == 32 && impl_to_ty.is_float()) || (impl_from_ty.is_int() && impl_from_ty.get_int_length().unwrap() == 64 && impl_to_ty.is_double()) } ConvOp::REFCAST => { (impl_from_ty.is_ref() && impl_to_ty.is_ref()) || (impl_from_ty.is_iref() && impl_to_ty.is_iref()) || (impl_from_ty.is_funcref() && impl_to_ty.is_funcref()) } ConvOp::PTRCAST => { (impl_from_ty.is_ptr() || impl_from_ty.is_int()) && (impl_to_ty.is_ptr() || impl_to_ty.is_int()) && !(impl_from_ty.is_int() && impl_to_ty.is_int()) } }, "Invalid {:?} instruction: Unsupported conversion {} -> {}", impl_optr, impl_from_ty, impl_to_ty ); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::ConvOp { operation: impl_optr, from_ty: impl_from_ty, to_ty: impl_to_ty, operand: 0 } } } NodeInst::NodeSelect { id: _, result_id, cond_ty, opnd_ty, cond, if_true, if_false } => { let impl_cond_ty = self.get_built_type(cond_ty); let impl_opnd_ty = self.get_built_type(opnd_ty); let impl_cond = self.get_treenode(fcb, cond); let impl_if_true = self.get_treenode(fcb, if_true); let impl_if_false = self.get_treenode(fcb, if_false); // NOTE: only implemented scalar SELECT. Vector select is not // implemented yet. let impl_rv = self .new_ssa(fcb, result_id, impl_opnd_ty.clone()) .clone_value(); assert_ir!( impl_cond_ty.is_int() && impl_cond_ty.get_int_length().unwrap() == 1 ); assert_ir!( impl_if_true.ty() == impl_opnd_ty && impl_if_false.ty() == impl_opnd_ty ); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_cond, impl_if_true, impl_if_false], v: Instruction_::Select { cond: 0, true_val: 1, false_val: 2 } } } NodeInst::NodeBranch { id: _, dest } => { let mut ops: Vec> = Vec::new(); let impl_dest = self.build_destination(fcb, dest, &mut ops, &[], blocks); Instruction { hdr: hdr, value: None, ops: ops, v: Instruction_::Branch1(impl_dest) } } NodeInst::NodeBranch2 { id: _, cond, if_true, if_false } => { let mut ops: Vec> = Vec::new(); let impl_opnd = self.add_opnd(fcb, &mut ops, cond); assert_ir!( impl_opnd.ty().is_int() && impl_opnd.ty().get_int_length().unwrap() == 1 ); let impl_dest_true = self.build_destination(fcb, if_true, &mut ops, &[], blocks); let impl_dest_false = self.build_destination( fcb, if_false, &mut ops, &[], blocks ); Instruction { hdr: hdr, value: None, ops: ops, v: Instruction_::Branch2 { cond: 0, true_dest: impl_dest_true, false_dest: impl_dest_false, true_prob: DEFAULT_TRUE_PROB } } } NodeInst::NodeSwitch { id: _, opnd_ty, opnd, default_dest, ref cases, ref dests } => { let mut ops: Vec> = Vec::new(); let impl_opnd = self.add_opnd(fcb, &mut ops, opnd); let impl_opnd_ty = self.get_built_type(opnd_ty); assert_ir!(impl_opnd_ty.is_eq_comparable()); assert_ir!(impl_opnd.ty() == impl_opnd_ty); let impl_dest_def = self.build_destination( fcb, default_dest, &mut ops, &[], blocks ); let impl_branches = cases .iter() .zip(dests) .map(|(cid, did)| { let case_opindex = ops.len(); let impl_case = self.add_opnd(fcb, &mut ops, *cid); assert_ir!( impl_case.ty() == impl_opnd_ty && impl_case.as_value().is_const() ); // TODO: Check that each case value is unique let impl_dest = self.build_destination( fcb, *did, &mut ops, &[], blocks ); (case_opindex, impl_dest) }) .collect::>(); Instruction { hdr: hdr, value: None, ops: ops, v: Instruction_::Switch { cond: 0, default: impl_dest_def, branches: impl_branches } } } NodeInst::NodeCall { id: _, ref result_ids, sig, callee, ref args, exc_clause, keepalive_clause } => { let impl_sig = self.ensure_sig_rec(sig); self.build_call_or_ccall( fcb, hdr, result_ids, &impl_sig, callee, args, exc_clause, keepalive_clause, false, CallConvention::Mu, blocks ) } NodeInst::NodeTailCall { id: _, sig, callee, ref args } => { let mut ops: Vec> = Vec::new(); let rettys = &self.current_sig.clone().unwrap().ret_tys; let signode = self.ensure_sig_rec(sig); assert_ir!(*rettys == signode.ret_tys); let call_data = self.build_call_data( fcb, &mut ops, callee, args, &signode, false, CallConvention::Mu ); Instruction { hdr: hdr, value: None, ops: ops, v: Instruction_::TailCall(call_data) } } NodeInst::NodeRet { id: _, ref rvs } => { let rettys = &self.current_sig.clone().unwrap().ret_tys; assert_ir!(rettys.len() == rvs.len()); let ops = rvs .iter() .zip(rettys) .map(|(rvid, rty)| { let op = self.get_treenode(fcb, *rvid); assert_ir!(op.ty() == *rty); op }) .collect::>(); let op_indexes = (0..(ops.len())).collect::>(); Instruction { hdr: hdr, value: None, ops: ops, v: Instruction_::Return(op_indexes) } } NodeInst::NodeThrow { id: _, exc } => { let impl_exc = self.get_treenode(fcb, exc); assert_ir!(impl_exc.ty().is_ref()); Instruction { hdr: hdr, value: None, ops: vec![impl_exc], v: Instruction_::Throw(0) } } NodeInst::NodeNew { id: _, result_id, allocty, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for NEW" ); let impl_allocty = self.get_built_type(allocty); assert_ir!(!impl_allocty.is_hybrid()); let impl_rvtype = self.ensure_ref(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![], v: Instruction_::New(impl_allocty) } } NodeInst::NodeNewHybrid { id: _, result_id, allocty, lenty, length, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for NEWHYBRID" ); let impl_allocty = self.get_built_type(allocty); let impl_lenty = self.get_built_type(lenty); let impl_length = self.get_treenode(fcb, length); let impl_rvtype = self.ensure_ref(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(impl_allocty.is_hybrid() && impl_lenty.is_int()); assert_ir!(impl_lenty == impl_length.ty()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_length], v: Instruction_::NewHybrid(impl_allocty, 0) } } NodeInst::NodeAlloca { id: _, result_id, allocty, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for ALLOCA" ); let impl_allocty = self.get_built_type(allocty); let impl_rvtype = self.ensure_iref(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(!impl_allocty.is_hybrid()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![], v: Instruction_::AllocA(impl_allocty) } } NodeInst::NodeAllocaHybrid { id: _, result_id, allocty, lenty, length, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for ALLOCAHYBRID" ); let impl_allocty = self.get_built_type(allocty); let impl_lenty = self.get_built_type(lenty); let impl_length = self.get_treenode(fcb, length); let impl_rvtype = self.ensure_iref(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(impl_allocty.is_hybrid() && impl_lenty.is_int()); assert_ir!(impl_lenty == impl_length.ty()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_length], v: Instruction_::AllocAHybrid(impl_allocty, 0) } } #[cfg(feature = "realtime")] NodeInst::NodeAllocaU { id: _, result_id, allocty, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for ALLOCAU" ); let impl_allocty = self.get_built_type(allocty); let impl_rvtype = self.ensure_uptr(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(!impl_allocty.is_hybrid()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![], v: Instruction_::AllocAU(impl_allocty) } } #[cfg(feature = "realtime")] NodeInst::NodeAllocaUHybrid { id, result_id, allocty, lenty, length, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for ALLOCAUHYBRID" ); let impl_allocty = self.get_built_type(allocty); let impl_lenty = self.get_built_type(lenty); let impl_length = self.get_treenode(fcb, length); let impl_rvtype = self.ensure_uptr(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(impl_allocty.is_hybrid() && impl_lenty.is_int()); assert_ir!(impl_lenty == impl_length.ty()); Instruction { hdr, value: Some(vec![impl_rv]), ops: vec![impl_length], v: Instruction_::AllocAUHybrid(impl_allocty, 0) } } #[cfg(feature = "realtime")] NodeInst::NodeEAlloc { id, result_id, allocty, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for EALLOC" ); let impl_allocty = self.get_built_type(allocty); let impl_rvtype = self.ensure_uptr(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(!impl_allocty.is_hybrid()); Instruction { hdr, value: Some(vec![impl_rv]), ops: vec![], v: Instruction_::eAlloc(impl_allocty) } } #[cfg(feature = "realtime")] NodeInst::NodeEAllocHybrid { id, result_id, allocty, lenty, length, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for EALLOCHYBRID" ); let impl_allocty = self.get_built_type(allocty); let impl_lenty = self.get_built_type(lenty); let impl_length = self.get_treenode(fcb, length); let impl_rvtype = self.ensure_uptr(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(impl_allocty.is_hybrid() && impl_lenty.is_int()); assert_ir!(impl_lenty == impl_length.ty()); Instruction { hdr, value: Some(vec![impl_rv]), ops: vec![impl_length], v: Instruction_::eAllocHybrid(impl_allocty, 0) } } #[cfg(feature = "realtime")] NodeInst::NodeEDelete { id, ptrty, ptr, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for EDELETE" ); // let impl_ptrty = self.ensure_type_rec(ptrty); let ptrtype = self.ensure_type_rec(ptrty); let ptr = self.get_treenode(fcb, ptr); assert_ir!(ptrtype.is_ptr()); assert_ir!(ptr.ty() == ptrtype); Instruction { hdr, value: None, ops: vec![ptr], v: Instruction_::eDelete(0) } } #[cfg(feature = "realtime")] NodeInst::NodeNewRegion { id, result_id, size, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for NEWREGION" ); let size = self.get_treenode(fcb, size as usize); let rvtype = self.ensure_regionref(); let rv = self.new_ssa(fcb, result_id, rvtype).clone_value(); Instruction { hdr, value: Some(vec![rv]), ops: vec![size], v: Instruction_::NewReg(0) } } #[cfg(feature = "realtime")] NodeInst::NodeDeleteRegion { id, ptr, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for DELETEREGION" ); let ptr = self.get_treenode(fcb, ptr as usize); Instruction { hdr, value: None, ops: vec![ptr], v: Instruction_::NewReg(0) } } #[cfg(feature = "realtime")] NodeInst::NodeRAlloc { id, result_id, allocty, reg, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for RALLOC" ); let reg = self.get_treenode(fcb, reg as usize); let impl_allocty = self.get_built_type(allocty); let impl_rvtype = self.ensure_uptr(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(!impl_allocty.is_hybrid()); Instruction { hdr, value: Some(vec![impl_rv]), ops: vec![reg], v: Instruction_::rAlloc(0, impl_allocty) } } #[cfg(feature = "realtime")] NodeInst::NodeRAllocHybrid { id, result_id, allocty, lenty, length, reg, exc_clause } => { assert!( exc_clause.is_none(), "exc_clause is not implemented for RALLOCHYBRID" ); let reg = self.get_treenode(fcb, reg as usize); let impl_allocty = self.get_built_type(allocty); let impl_lenty = self.get_built_type(lenty); let impl_length = self.get_treenode(fcb, length); let impl_rvtype = self.ensure_uptr(allocty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(impl_allocty.is_hybrid() && impl_lenty.is_int()); assert_ir!(impl_lenty == impl_length.ty()); Instruction { hdr, value: Some(vec![impl_rv]), ops: vec![reg, impl_length], v: Instruction_::rAllocHybrid(0, impl_allocty, 1) } } NodeInst::NodeGetIRef { id, result_id, refty, opnd } => { let impl_refty = self.get_built_type(refty); let impl_opnd = self.get_treenode(fcb, opnd); let impl_rvtype = self.ensure_iref(refty); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); assert_ir!(match impl_opnd.ty().v { MuType_::Ref(ref r) => *r == impl_refty, _ => false }); Instruction { hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::GetIRef(0) } } NodeInst::NodeGetFieldIRef { id, result_id, is_ptr, refty, index, opnd } => { let impl_opnd = self.get_treenode(fcb, opnd); let index = index as usize; let impl_refty = self.get_built_type(refty); let refty_node = self.b.bundle.types.get(&refty).unwrap(); assert_ir!( match impl_opnd.ty().v { MuType_::IRef(ref r) => !is_ptr && *r == impl_refty, MuType_::UPtr(ref r) => is_ptr && *r == impl_refty, _ => false }, "Invalid GETFIELDIREF: (PTR[{}] + {}) != {}", is_ptr, impl_refty, impl_opnd.ty() ); let field_ty_id = match **refty_node { NodeType::TypeStruct { id: _, ref fieldtys } => fieldtys[index], NodeType::TypeHybrid { id: _, ref fixedtys, varty: _ } => fixedtys[index], ref t => { panic!( "GETFIELDIREF {}: Expected struct or hybrid type. actual: {:?}", id, t ) } }; let impl_rvtype = self.ensure_iref_or_uptr(field_ty_id, is_ptr); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::GetFieldIRef { is_ptr: is_ptr, base: 0, index: index } } } NodeInst::NodeGetElemIRef { id: _, result_id, is_ptr, refty, indty, opnd, index } => { let impl_opnd = self.get_treenode(fcb, opnd); let impl_index = self.get_treenode(fcb, index); let impl_refty = self.get_built_type(refty); let impl_indty = self.get_built_type(indty); let refty_node = self.b.bundle.types.get(&refty).unwrap(); assert_ir!( impl_indty.is_int() && impl_index.ty() == impl_indty ); assert_ir!( match impl_opnd.ty().v { MuType_::IRef(ref r) => !is_ptr && *r == impl_refty, MuType_::UPtr(ref r) => is_ptr && *r == impl_refty, _ => false }, "Invalid GETELEMIREF: (PTR[{}] + {}) != {}", is_ptr, impl_refty, impl_opnd.ty() ); let elem_ty_id = match **refty_node { NodeType::TypeArray { id: _, elemty, len: _ } => elemty, NodeType::TypeVector { id: _, elemty, len: _ } => elemty, ref t => { panic!( "GETELEMIREF {}: Expected array or vector type. actual: {:?}", id, t ) } }; let impl_rvtype = self.ensure_iref_or_uptr(elem_ty_id, is_ptr); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd, impl_index], v: Instruction_::GetElementIRef { is_ptr: is_ptr, base: 0, index: 1 } } } NodeInst::NodeShiftIRef { id: _, result_id, is_ptr, refty, offty, opnd, offset } => { let impl_opnd = self.get_treenode(fcb, opnd); let impl_offset = self.get_treenode(fcb, offset); let impl_rvtype = self.ensure_iref_or_uptr(refty, is_ptr); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); let impl_refty = self.get_built_type(refty); let impl_offty = self.get_built_type(offty); assert_ir!( impl_offty.is_int() && impl_offset.ty() == impl_offty ); assert_ir!( match impl_opnd.ty().v { MuType_::IRef(ref r) => !is_ptr && *r == impl_refty, MuType_::UPtr(ref r) => is_ptr && *r == impl_refty, _ => false }, "Invalid SHIFTIREF: (PTR[{}] + {}) != {}", is_ptr, impl_refty, impl_opnd.ty() ); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd, impl_offset], v: Instruction_::ShiftIRef { is_ptr: is_ptr, base: 0, offset: 1 } } } NodeInst::NodeGetVarPartIRef { id: _, result_id, is_ptr, refty, opnd } => { let impl_opnd = self.get_treenode(fcb, opnd); let refty_node = self.b.bundle.types.get(&refty).unwrap(); let impl_refty = self.get_built_type(refty); assert_ir!( match impl_opnd.ty().v { MuType_::IRef(ref r) => !is_ptr && *r == impl_refty, MuType_::UPtr(ref r) => is_ptr && *r == impl_refty, _ => false }, "Invalid GETVARPARTIREF: (PTR[{}] + {}) != {}", is_ptr, impl_refty, impl_opnd.ty() ); let elem_ty_id = match **refty_node { NodeType::TypeHybrid { id: _, fixedtys: _, varty } => varty, ref t => panic!( "GETVARPARTIREF {}: Expected hybrid type. actual: {:?}", id, t ) }; let impl_rvtype = self.ensure_iref_or_uptr(elem_ty_id, is_ptr); let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::GetVarPartIRef { is_ptr: is_ptr, base: 0 } } } NodeInst::NodeLoad { id: _, result_id, is_ptr, ord, refty, loc, exc_clause } => { let impl_ord = self.build_mem_ord(ord); let impl_loc = self.get_treenode(fcb, loc); let impl_rvtype = self.get_built_type(refty); let impl_actual_rvtype = self.ensure_strong_variant(&impl_rvtype); let impl_rv = self .new_ssa(fcb, result_id, impl_actual_rvtype) .clone_value(); let impl_refty = self.get_built_type(refty); assert_ir!( impl_ord != MemoryOrder::Release && impl_ord != MemoryOrder::AcqRel ); assert_ir!( match impl_loc.ty().v { MuType_::IRef(ref r) => !is_ptr && *r == impl_refty, MuType_::UPtr(ref r) => is_ptr && *r == impl_refty, _ => false }, "Invalid LOAD: (PTR[{}] + {}) != {}", is_ptr, impl_refty, impl_loc.ty() ); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_loc], v: Instruction_::Load { is_ptr: is_ptr, order: impl_ord, mem_loc: 0 } } } NodeInst::NodeStore { id: _, is_ptr, ord, refty, loc, newval, exc_clause } => { let impl_ord = self.build_mem_ord(ord); let impl_loc = self.get_treenode(fcb, loc); let impl_newval = self.get_treenode(fcb, newval); let impl_rvtype = self.get_built_type(refty); let impl_refty = self.get_built_type(refty); assert_ir!( impl_ord != MemoryOrder::Acquire && impl_ord != MemoryOrder::Consume && impl_ord != MemoryOrder::AcqRel ); assert_ir!( match impl_loc.ty().v { MuType_::IRef(ref r) => !is_ptr && *r == impl_refty, MuType_::UPtr(ref r) => is_ptr && *r == impl_refty, _ => false }, "Invalid STORE: (PTR[{}] + {}) != {}", is_ptr, impl_refty, impl_loc.ty() ); assert_ir!( impl_newval.ty().v == impl_refty.v.strong_variant(), "Invalid STORE: Can't store a {} to a {}", impl_newval.ty(), impl_refty ); Instruction { hdr: hdr, value: None, ops: vec![impl_loc, impl_newval], v: Instruction_::Store { is_ptr: is_ptr, order: impl_ord, mem_loc: 0, value: 1 } } } NodeInst::NodeCCall { id: _, ref result_ids, callconv, callee_ty, sig, callee, ref args, exc_clause, keepalive_clause } => { let impl_sig = self.ensure_sig_rec(sig); let impl_callee_ty = self.get_built_type(callee_ty); assert_ir!(callconv == CMU_CC_DEFAULT); assert_ir!(match impl_callee_ty.v { MuType_::UFuncPtr(ref s) => *s == impl_sig, _ => false }); self.build_call_or_ccall( fcb, hdr, result_ids, &impl_sig, callee, args, exc_clause, keepalive_clause, true, CallConvention::Foreign(ForeignFFI::C), blocks ) } NodeInst::NodeSwapStack { id: _, ref result_ids, swappee, // MuVarNode cur_stack_clause, //MuCurStackClause new_stack_clause, //MuNewStackClause exc_clause, //Option, keepalive_clause // Option, } => { let mut ops: Vec> = vec![self.get_treenode(fcb, swappee)]; assert_ir!(ops[0].ty().is_stackref()); let cur_stack_clause = self.b.bundle.cs_clauses.get(&cur_stack_clause).unwrap(); let new_stack_clause = self.b.bundle.ns_clauses.get(&new_stack_clause).unwrap(); let empty_vec = Vec::::new(); let rettys_ids = match **cur_stack_clause { NodeCurrentStackClause::RetWith { ref rettys, .. } => { &rettys } NodeCurrentStackClause::KillOld { .. } => &empty_vec }; assert_ir!(result_ids.len() == rettys_ids.len()); let rvs = result_ids .iter() .zip(rettys_ids) .map(|(rvid, rvty)| { let impl_rvty = self.get_built_type(*rvty); self.new_ssa(fcb, *rvid, impl_rvty).clone_value() }) .collect::>(); let (is_exception, args) = self.build_new_stack_clause( new_stack_clause, fcb, &mut ops ); match exc_clause { Some(ecid) => { let ecnode = self.b.bundle.exc_clauses.get(&ecid).unwrap(); let impl_normal_dest = self.build_destination( fcb, ecnode.nor, &mut ops, result_ids, blocks ); let impl_exn_dest = self.build_destination( fcb, ecnode.exc, &mut ops, &[], blocks ); assert_ir!(match **cur_stack_clause { // Can't have an exception clause NodeCurrentStackClause::KillOld { .. } => false, _ => true }); Instruction { hdr: hdr, value: Some(rvs), ops: ops, v: Instruction_::SwapStackExc { stack: 0, is_exception: is_exception, args: args, resume: ResumptionData { normal_dest: impl_normal_dest, exn_dest: impl_exn_dest } } } } None => match **cur_stack_clause { NodeCurrentStackClause::RetWith { .. } => Instruction { hdr: hdr, value: Some(rvs), ops: ops, v: Instruction_::SwapStackExpr { stack: 0, is_exception: is_exception, args: args } }, NodeCurrentStackClause::KillOld { .. } => Instruction { hdr: hdr, value: Some(rvs), ops: ops, v: Instruction_::SwapStackKill { stack: 0, is_exception: is_exception, args: args } } } } } NodeInst::NodeNewThread { id, result_id, stack, threadlocal, new_stack_clause, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } let mut ops: Vec> = vec![self.get_treenode(fcb, stack)]; assert_ir!(ops[0].ty().is_stackref()); let new_stack_clause = self.b.bundle.ns_clauses.get(&new_stack_clause).unwrap(); let impl_threadref = self.ensure_threadref(); let impl_rv = self.new_ssa(fcb, result_id, impl_threadref).clone_value(); let threadlocal = match threadlocal { Some(tl) => { let index = ops.len(); let tl = self.add_opnd(fcb, &mut ops, tl); assert_ir!( tl.ty().is_ref() && tl.ty().get_referent_ty().unwrap().is_void() ); Some(index) } None => None }; let (is_exception, args) = self.build_new_stack_clause( new_stack_clause, fcb, &mut ops ); Instruction { hdr, value: Some(vec![impl_rv]), ops, v: Instruction_::NewThread { stack: 0, thread_local: threadlocal, is_exception, args } } } #[cfg(feature = "realtime")] NodeInst::NodeNewRTThread { id, result_id, attr, stack, threadlocal, new_stack_clause, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } let mut ops: Vec> = vec![ self.get_treenode(fcb, stack), self.get_treenode(fcb, attr), ]; assert_ir!(ops[0].ty().is_stackref()); let new_stack_clause = self.b.bundle.ns_clauses.get(&new_stack_clause).unwrap(); let impl_threadref = self.ensure_threadref(); let impl_rv = self.new_ssa(fcb, result_id, impl_threadref).clone_value(); let threadlocal = match threadlocal { Some(tl) => { let index = ops.len(); let tl = self.add_opnd(fcb, &mut ops, tl); assert_ir!( tl.ty().is_ref() && tl.ty().get_referent_ty().unwrap().is_void() ); Some(index) } None => None }; let (is_exception, args) = self.build_new_stack_clause( new_stack_clause, fcb, &mut ops ); Instruction { hdr, value: Some(vec![impl_rv]), ops, v: Instruction_::NewRTThread { attr: 1, stack: 0, thread_local: threadlocal, is_exception, args } } } #[cfg(feature = "realtime")] NodeInst::NodeExitThread { id, tref, exc_clause } => { unimplemented!(); } #[cfg(feature = "realtime")] NodeInst::NodeYield { id, exc_clause } => { unimplemented!(); } NodeInst::NodeNewFutex { id, result_id } => { let futexref_t = self.ensure_futexref(); let rv = self.new_ssa(fcb, result_id, futexref_t).clone_value(); Instruction { hdr, value: Some(vec![rv]), ops: vec![], v: Instruction_::NewFutex } } #[cfg(feature = "realtime")] NodeInst::NodeNewAttr { id, result_id } => { let attrref_t = self.ensure_attrref(); let rv = self.new_ssa(fcb, result_id, attrref_t).clone_value(); Instruction { hdr, value: Some(vec![rv]), ops: vec![], v: Instruction_::NewAttr } } #[cfg(feature = "realtime")] NodeInst::NodeSetAttr { id, tref, aref, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } let mut ops: Vec> = vec![ self.get_treenode(fcb, tref), self.get_treenode(fcb, aref), ]; assert_ir!( ops[0].ty().is_threadref() && ops[1].ty().is_attrref() ); Instruction { hdr, value: None, ops, v: Instruction_::ThreadSetAttr(0, 1) } } #[cfg(feature = "realtime")] NodeInst::NodeGetAttr { id, result_id, tref, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } let mut ops: Vec> = vec![self.get_treenode(fcb, tref)]; assert_ir!(ops[0].ty().is_threadref()); let attrref_t = self.ensure_attrref(); let rv = self.new_ssa(fcb, result_id, attrref_t).clone_value(); Instruction { hdr, value: Some(vec![rv]), ops, v: Instruction_::ThreadGetAttr(0) } } #[cfg(feature = "realtime")] NodeInst::NodeGetTime { id, result_id } => { let time_t = self.ensure_i64(); let rv = self.new_ssa(fcb, result_id, time_t).clone_value(); Instruction { hdr, value: Some(vec![rv]), ops: vec![], v: Instruction_::GetTime } } #[cfg(feature = "realtime")] NodeInst::NodeSetTime { id, tm, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } let mut ops: Vec> = vec![self.get_treenode(fcb, tm)]; Instruction { hdr, value: None, ops, v: Instruction_::SetTime(0) } } #[cfg(feature = "realtime")] NodeInst::NodeNewTimer { id, result_id, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } let timerref_t = self.ensure_timerref(); let rv = self.new_ssa(fcb, result_id, timerref_t).clone_value(); Instruction { hdr, value: Some(vec![rv]), ops: vec![], v: Instruction_::NewTimer } } #[cfg(feature = "realtime")] NodeInst::NodeDeleteTimer { id, tmr } => { let mut ops: Vec> = vec![self.get_treenode(fcb, tmr)]; assert_ir!(ops[0].ty().is_timerref()); Instruction { hdr, value: None, ops, v: Instruction_::DeleteTimer(0) } } #[cfg(feature = "realtime")] NodeInst::NodeSetTimer { id, tmr, tm, prd, fsig, func, ref args, exc_clause } => { if exc_clause.is_some() { unimplemented!(); } trace!("NodeInst::NodeSetTimer"); // let func = self.get_treenode(fcb, func); // let fsig = self.ensure_sig_rec(fsig); let impl_sig = self.ensure_sig_rec(fsig); // assert_ir!(match func.ty().v { // MuType_::FuncRef(ref sig) => *sig == fsig, // _ => false // }); let mut ops: Vec> = vec![ self.get_treenode(fcb, tmr), self.get_treenode(fcb, tm), self.get_treenode(fcb, prd), ]; assert_ir!(ops[0].ty().is_timerref()); let mut cd_ops: Vec> = Vec::new(); let mut call_data = self.build_call_data( fcb, &mut cd_ops, func, args, &impl_sig, false, CallConvention::Mu ); call_data.func = 3; ops.extend(cd_ops); trace!("ops = {:#?}", ops); Instruction { hdr, value: None, ops, v: Instruction_::SetTimer { tmr: 0, tm: 1, prd: 2, data: call_data } } } #[cfg(feature = "realtime")] NodeInst::NodeCancelTimer { id, tmr } => { let mut ops: Vec> = vec![self.get_treenode(fcb, tmr)]; Instruction { hdr, value: None, ops, v: Instruction_::CancelTimer(0) } } NodeInst::NodeCommInst { id, ref result_ids, opcode, ref flags, ref tys, ref sigs, ref args, ref exc_clause, ref keepalive_clause } => self.build_comm_inst( fcb, hdr, result_ids, opcode, flags, tys, sigs, args, exc_clause, keepalive_clause ), ref i => panic!("{:?} not implemented", i) }; trace!("Instruction built {} {:?}", id, impl_inst); self.new_inst(impl_inst) } fn build_destination( &mut self, fcb: &mut FuncCtxBuilder, id: MuID, ops: &mut Vec>, inst_result_ids: &[MuID], blocks: &LinkedHashMap ) -> Destination { let dest_clause = self.b.bundle.dest_clauses.get(&id).unwrap(); let target = dest_clause.dest; assert_ir!(target != self.current_entry); let ref block = blocks[&target]; let target_block = block.content.as_ref().unwrap(); assert_ir!(target_block.args.len() == dest_clause.vars.len()); let dest_args = dest_clause .vars .iter() .zip(&target_block.args) .map(|(vid, arg)| { // if let Some(ind) = // inst_result_ids.iter().position(|rid| *rid == *vid) { // DestArg::Freshbound(ind) // } else { // let my_index = ops.len(); // self.add_opnd(fcb, ops, *vid); // DestArg::Normal(my_index) // } let my_index = ops.len(); let op = self.add_opnd(fcb, ops, *vid); assert_ir!(op.ty() == arg.ty, "op.ty() is -{:#?}- but arg.ty is -{:#?}", op.ty(), arg.ty); DestArg::Normal(my_index) }) .collect::>(); let impl_dest = Destination { target: block.hdr.clone(), args: dest_args }; impl_dest } fn add_opnd( &mut self, fcb: &mut FuncCtxBuilder, ops: &mut Vec>, opnd: MuID ) -> P { let impl_opnd = self.get_treenode(fcb, opnd); ops.push(impl_opnd.clone()); impl_opnd } fn add_opnds( &mut self, fcb: &mut FuncCtxBuilder, ops: &mut Vec>, opnds: &[MuID] ) -> Vec> { let mut res = Vec::>::new(); for opnd in opnds { res.push(self.add_opnd(fcb, ops, *opnd)) } res } // Returns true indicating an exception clause // and returns a list of ops fn build_new_stack_clause( &mut self, nsc: &NodeNewStackClause, fcb: &mut FuncCtxBuilder, ops: &mut Vec> ) -> (bool, Vec) { match nsc { &NodeNewStackClause::PassValues { ref tys, ref vars, .. } => { let args_begin_index = ops.len(); let args = self.add_opnds(fcb, ops, vars); assert_ir!( args.len() == tys.len() && args .iter() .zip(tys) .all(|(arg, tid)| arg.ty() == self.get_built_type(*tid)) ); let arg_indices = (args_begin_index..(vars.len() + 1)).collect::>(); (false, arg_indices) } &NodeNewStackClause::ThrowExc { ref exc, .. } => { let exc_arg = ops.len(); self.add_opnd(fcb, ops, *exc); (true, vec![exc_arg]) } } } fn build_call_data( &mut self, fcb: &mut FuncCtxBuilder, ops: &mut Vec>, callee: MuID, args: &[MuID], sig: &P, is_ccall: bool, call_conv: CallConvention ) -> CallData { let func_index = ops.len(); let callee = self.add_opnd(fcb, ops, callee); let args_begin_index = ops.len(); let impl_args = self.add_opnds(fcb, ops, args); assert_ir!( impl_args.len() == sig.arg_tys.len() && impl_args .iter() .zip(&sig.arg_tys) .all(|(arg, t)| arg.ty() == *t) ); assert_ir!(match callee.ty().v { MuType_::FuncRef(ref s) => !is_ccall && *s == *sig, MuType_::UFuncPtr(ref s) => is_ccall && *s == *sig, _ => false }); let args_opindexes = (args_begin_index..(args.len() + 1)).collect::>(); let call_data = CallData { func: func_index, args: args_opindexes, convention: call_conv }; call_data } fn build_call_or_ccall( &mut self, fcb: &mut FuncCtxBuilder, hdr: MuEntityHeader, result_ids: &[MuID], sig: &P, callee: MuID, args: &[MuID], exc_clause: Option, keepalive_claue: Option, is_ccall: bool, call_conv: CallConvention, blocks: &LinkedHashMap ) -> Instruction { let mut ops: Vec> = Vec::new(); let call_data = self.build_call_data( fcb, &mut ops, callee, args, sig, is_ccall, CallConvention::Mu ); let rettys = &sig.ret_tys; assert_ir!(result_ids.len() == rettys.len()); let rvs = result_ids .iter() .zip(rettys) .map(|(rvid, rty)| { self.new_ssa(fcb, *rvid, rty.clone()).clone_value() }) .collect::>(); if let Some(ecid) = exc_clause { // terminating inst let ecnode = self.b.bundle.exc_clauses.get(&ecid).unwrap(); let impl_normal_dest = self.build_destination( fcb, ecnode.nor, &mut ops, result_ids, blocks ); let impl_exn_dest = self.build_destination(fcb, ecnode.exc, &mut ops, &[], blocks); let resumption_data = ResumptionData { normal_dest: impl_normal_dest, exn_dest: impl_exn_dest }; let impl_inst_ = if is_ccall { Instruction_::CCall { data: call_data, resume: resumption_data } } else { Instruction_::Call { data: call_data, resume: resumption_data } }; Instruction { hdr: hdr, value: Some(rvs), ops: ops, v: impl_inst_ } } else { // non-terminating inst Instruction { hdr: hdr, value: Some(rvs), ops: ops, v: if is_ccall { Instruction_::ExprCCall { data: call_data, is_abort: false } } else { Instruction_::ExprCall { data: call_data, is_abort: false } } } } } #[allow(unused_variables)] fn build_comm_inst( &mut self, fcb: &mut FuncCtxBuilder, hdr: MuEntityHeader, result_ids: &Vec, opcode: MuCommInst, flags: &Vec, tys: &Vec, sigs: &Vec, args: &Vec, exc_clause: &Option, keepalives: &Option ) -> Instruction { match opcode { CMU_CI_UVM_GET_THREADLOCAL => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && args.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); let rv_ty = self.ensure_refvoid(); let rv = self.new_ssa(fcb, result_ids[0], rv_ty).clone_value(); Instruction { hdr: hdr, value: Some(vec![rv]), ops: vec![], v: Instruction_::CommonInst_GetThreadLocal } } CMU_CI_UVM_SET_THREADLOCAL => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && result_ids.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(args.len() == 1); let op = self.get_treenode(fcb, args[0]); assert_ir!( op.ty().is_ref() && op.ty().get_referent_ty().unwrap().is_void(), "@uvm.set_threadlocal expected ref got {}", op.ty() ); Instruction { hdr: hdr, value: None, ops: vec![op], v: Instruction_::CommonInst_SetThreadLocal(0) } } CMU_CI_UVM_NATIVE_PIN => { assert_ir!( sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); assert!(tys.len() == 1); let op_ty = self.ensure_type_rec(tys[0]); let op = self.get_treenode(fcb, args[0]); assert_ir!(op_ty.is_iref() || op_ty.is_ref()); assert_ir!(op.ty() == op_ty); let referent_ty = match op_ty.get_referent_ty() { Some(ty) => ty, _ => panic!( "expected ty in PIN to be ref/iref, found {}", op_ty ) }; let rv_ty = self.ensure_uptr(referent_ty.id()); let rv = self.new_ssa(fcb, result_ids[0], rv_ty).clone_value(); Instruction { hdr: hdr, value: Some(vec![rv]), ops: vec![op], v: Instruction_::CommonInst_Pin(0) } } CMU_CI_UVM_NATIVE_UNPIN => { assert_ir!( sigs.is_empty() && flags.is_empty() && result_ids.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(args.len() == 1); assert!(tys.len() == 1); let op_ty = self.ensure_type_rec(tys[0]); let op = self.get_treenode(fcb, args[0]); assert_ir!(op_ty.is_iref() || op_ty.is_ref()); assert_ir!(op.ty() == op_ty); Instruction { hdr: hdr, value: None, ops: vec![op], v: Instruction_::CommonInst_Unpin(0) } } CMU_CI_UVM_THREAD_EXIT => { assert_ir!( tys.is_empty() && args.is_empty() && sigs.is_empty() && flags.is_empty() && result_ids.is_empty() && exc_clause.is_none() && keepalives.is_none() ); Instruction { hdr: hdr, value: None, ops: vec![], v: Instruction_::ThreadExit } } CMU_CI_UVM_NEW_STACK => { assert_ir!( tys.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(sigs.len() == 1); assert!(args.len() == 1); assert!(result_ids.len() == 1); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_sig = self.ensure_sig_rec(sigs[0]); assert_ir!(impl_sig.ret_tys.is_empty()); // The function isn't supposed to return assert_ir!(match impl_opnd.ty().v { MuType_::FuncRef(ref sig) => *sig == impl_sig, _ => false }); let impl_stackref = self.ensure_stackref(); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_stackref) .clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::NewStack(0) } } CMU_CI_UVM_CURRENT_STACK => { assert_ir!( tys.is_empty() && args.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); let impl_stackref = self.ensure_stackref(); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_stackref) .clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![], v: Instruction_::CurrentStack } } CMU_CI_UVM_KILL_STACK => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() && result_ids.is_empty() ); assert!(args.len() == 1); let impl_opnd = self.get_treenode(fcb, args[0]); assert_ir!(impl_opnd.ty().is_stackref()); Instruction { hdr: hdr, value: None, ops: vec![impl_opnd], v: Instruction_::KillStack(0) } } CMU_CI_UVM_TR64_IS_FP => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // int<1> let impl_i1 = self.ensure_i1(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self.new_ssa(fcb, result_ids[0], impl_i1).clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64IsFp(0) } } CMU_CI_UVM_TR64_IS_INT => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // int<1> let impl_i1 = self.ensure_i1(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self.new_ssa(fcb, result_ids[0], impl_i1).clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64IsInt(0) } } CMU_CI_UVM_TR64_IS_REF => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // int<1> let impl_i1 = self.ensure_i1(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self.new_ssa(fcb, result_ids[0], impl_i1).clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64IsRef(0) } } CMU_CI_UVM_TR64_FROM_FP => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // tagref64 let impl_tagref64 = self.ensure_tagref64(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!(impl_opnd.ty().is_double()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64FromFp(0) } } CMU_CI_UVM_TR64_FROM_INT => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // tagref64 let impl_tagref64 = self.ensure_tagref64(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!( impl_opnd.ty().is_int() && impl_opnd.ty().get_int_length().unwrap() == 52 ); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64FromInt(0) } } CMU_CI_UVM_TR64_FROM_REF => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 2); // tagref64 let impl_tagref64 = self.ensure_tagref64(); let impl_opnd1 = self.get_treenode(fcb, args[0]); let impl_opnd2 = self.get_treenode(fcb, args[1]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!( impl_opnd1.ty().is_ref() && impl_opnd1.ty().get_referent_ty().unwrap().is_void() ); assert_ir!( impl_opnd2.ty().is_int() && impl_opnd2.ty().get_int_length().unwrap() == 6 ); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd1, impl_opnd2], v: Instruction_::CommonInst_Tr64FromRef(0, 1) } } CMU_CI_UVM_TR64_TO_FP => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // tagref64 let impl_tagref64 = self.ensure_double(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64ToFp(0) } } CMU_CI_UVM_TR64_TO_INT => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // tagref64 let impl_tagref64 = self.ensure_i52(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64ToInt(0) } } CMU_CI_UVM_TR64_TO_REF => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // tagref64 let impl_tagref64 = self.ensure_ref_void(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64ToRef(0) } } CMU_CI_UVM_TR64_TO_TAG => { assert_ir!( tys.is_empty() && sigs.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(result_ids.len() == 1); assert!(args.len() == 1); // tagref64 let impl_tagref64 = self.ensure_i6(); let impl_opnd = self.get_treenode(fcb, args[0]); let impl_rv = self .new_ssa(fcb, result_ids[0], impl_tagref64) .clone_value(); assert_ir!(impl_opnd.ty().is_tagref64()); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_opnd], v: Instruction_::CommonInst_Tr64ToTag(0) } } CMU_CI_UVM_IRBUILDER_ATTR_SETPRIORITY => { assert_ir!( tys.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(args.len() == 2); let impl_a = self.get_treenode(fcb, args[0]); let impl_p = self.get_treenode(fcb, args[1]); Instruction { hdr: hdr, value: None, ops: vec![impl_a, impl_p], v: Instruction_::AttrSetPriority(0, 1) } } CMU_CI_UVM_IRBUILDER_ATTR_GETPRIORITY => { assert_ir!( tys.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(args.len() == 1); assert!(result_ids.len() == 1); let impl_a = self.get_treenode(fcb, args[0]); let impl_p = self.ensure_i64(); let impl_rv = self.new_ssa(fcb, result_ids[0], impl_p).clone_value(); Instruction { hdr: hdr, value: Some(vec![impl_rv]), ops: vec![impl_a], v: Instruction_::AttrGetPriority(0) } } CMU_CI_UVM_IRBUILDER_PIFUTEX_WAIT => { assert_ir!( tys.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(args.len() == 2); let impl_f = self.get_treenode(fcb, args[0]); let impl_to = self.get_treenode(fcb, args[1]); Instruction { hdr: hdr, value: None, ops: vec![impl_f, impl_to], v: Instruction_::LockFutex(0, 1) } } CMU_CI_UVM_IRBUILDER_PIFUTEX_WAKE => { assert_ir!( tys.is_empty() && flags.is_empty() && exc_clause.is_none() && keepalives.is_none() ); assert!(args.len() == 1); let impl_f = self.get_treenode(fcb, args[0]); Instruction { hdr: hdr, value: None, ops: vec![impl_f], v: Instruction_::UnlockFutex(0) } } _ => unimplemented!() } } fn build_mem_ord(&self, ord: MuMemoryOrder) -> MemoryOrder { match ord { CMU_ORD_NOT_ATOMIC => MemoryOrder::NotAtomic, CMU_ORD_RELAXED => MemoryOrder::Relaxed, CMU_ORD_CONSUME => MemoryOrder::Consume, CMU_ORD_ACQUIRE => MemoryOrder::Acquire, CMU_ORD_RELEASE => MemoryOrder::Release, CMU_ORD_ACQ_REL => MemoryOrder::AcqRel, CMU_ORD_SEQ_CST => MemoryOrder::SeqCst, o => panic!("Illegal memory order {}", o) } } fn add_everything_to_vm(&mut self) { let vm = self.b.get_mvm_immutable().vm.clone(); let arc_vm = vm.clone(); trace!("Loading bundle to the VM..."); vm.declare_many( &mut self.name_id_map, &mut self.built_types, &mut self.built_sigs, &mut self.built_constants, &mut self.built_globals, &mut self.built_funcs, &mut self.built_funcvers, arc_vm ); trace!("Bundle loaded to the VM!"); } }