Commit fbfe7dba authored by Yi Lin's avatar Yi Lin

Merge branch 'gc-rewrite' into 'master'

GC rewrite

See merge request !55
parents 0f303eca 71aefd55
......@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[package]
name = "mu"
version = "0.0.1"
......@@ -61,19 +61,19 @@ num-traits = "*"
#built = "0.1"
[target.aarch64-unknown-linux-gnu.dependencies]
mu_gc = { path = "src/gc", default-features = false}
mu_gc = { path = "src/gc"}
built = "0.1"
[target.x86_64-unknown-linux-gnu.dependencies]
mu_gc = { path = "src/gc", default-features = false}
mu_gc = { path = "src/gc"}
built = "0.1"
[target.x86_64-apple-darwin.dependencies]
mu_gc = { path = "src/gc", default-features = false}
mu_gc = { path = "src/gc"}
built = "0.1"
[target.x86_64-rumprun-netbsd.dependencies]
mu_gc = { path = "src/gc", default-features = false, features = ["sel4-rumprun-target-side"], target = "x86_64-rumprun-netbsd"}
mu_gc = { path = "src/gc", features = ["sel4-rumprun-target-side"], target = "x86_64-rumprun-netbsd"}
[target.aarch64-unknown-linux-gnu.build-dependencies]
built = "0.1"
......
......@@ -29,7 +29,7 @@ are not compliant to Mu spec.
## Building
You will need:
* rust version 1.19 (0ade33941 2017-07-17)
* rust version 1.20 (f3d6973f4 2017-08-27)
* clang 4.0+
* cmake 3.8+ (we do not depend on cmake, but some Rust crates use it)
* internet connection (as Rust will download dependencies)
......
......@@ -108,6 +108,7 @@ impl Instruction {
Move(_) |
PrintHex(_) |
SetRetval(_) |
GetVMThreadLocal |
KillStack(_) |
CurrentStack |
SwapStackExpr { .. } => false
......@@ -185,7 +186,8 @@ impl Instruction {
CommonInst_Tr64ToRef(_) |
CommonInst_Tr64ToTag(_) |
Move(_) |
CurrentStack => false
CurrentStack |
GetVMThreadLocal => false
}
}
......@@ -250,6 +252,7 @@ impl Instruction {
Move(_) |
PrintHex(_) |
SetRetval(_) |
GetVMThreadLocal |
KillStack(_) |
CurrentStack |
SwapStackExpr { .. } |
......@@ -322,6 +325,7 @@ impl Instruction {
Move(_) |
PrintHex(_) |
SetRetval(_) |
GetVMThreadLocal |
KillStack(_) |
CurrentStack |
SwapStackExpr { .. } |
......@@ -395,6 +399,7 @@ impl Instruction {
Move(_) |
PrintHex(_) |
SetRetval(_) |
GetVMThreadLocal |
KillStack(_) |
CurrentStack |
SwapStackKill { .. } => false,
......@@ -820,7 +825,9 @@ impl Instruction {
// print hex
&Instruction_::PrintHex(i) => format!("PRINTHEX<{}> {}", ops[i].ty(), ops[i]),
// set retval
&Instruction_::SetRetval(val) => format!("SETRETVAL {}", ops[val])
&Instruction_::SetRetval(val) => format!("SETRETVAL {}", ops[val]),
// get vm thread local
&Instruction_::GetVMThreadLocal => format!("GETVMTHREADLOCAL")
}
}
}
......@@ -1118,7 +1125,9 @@ pub enum Instruction_ {
/// internal use: print op as hex value
PrintHex(OpIndex),
/// internal use: set return value for main
SetRetval(OpIndex)
SetRetval(OpIndex),
/// internal use: get zebu thread local
GetVMThreadLocal
}
fn format_value_types(value: &Option<Vec<P<Value>>>) -> String {
......
......@@ -566,6 +566,14 @@ impl Block {
}
}
pub fn clear_insts(&mut self) {
self.content.as_mut().unwrap().body.clear();
}
pub fn append_inst(&mut self, inst: P<TreeNode>) {
self.content.as_mut().unwrap().body.push(inst);
}
/// does this block have an exception arguments?
pub fn is_receiving_exception_arg(&self) -> bool {
return self.content.as_ref().unwrap().exn_arg.is_some();
......@@ -859,6 +867,30 @@ impl TreeNode {
})
}
/// is instruction
pub fn is_inst(&self) -> bool {
match self.v {
TreeNode_::Instruction(_) => true,
_ => false
}
}
/// is value
pub fn is_value(&self) -> bool {
match self.v {
TreeNode_::Value(_) => true,
_ => false
}
}
/// is constant value
pub fn is_const_value(&self) -> bool {
match self.v {
TreeNode_::Value(ref val) => val.is_const(),
_ => false
}
}
/// extracts the MuID of an SSA TreeNode
/// if the node is not an SSA, returns None
pub fn extract_ssa_id(&self) -> Option<MuID> {
......@@ -975,10 +1007,14 @@ rodal_struct!(Value { hdr, ty, v });
impl Value {
/// creates an int constant value
pub fn make_int_const(id: MuID, val: u64) -> P<Value> {
pub fn make_int32_const(id: MuID, val: u64) -> P<Value> {
Value::make_int_const_ty(id, UINT32_TYPE.clone(), val)
}
pub fn make_int64_const(id: MuID, val: u64) -> P<Value> {
Value::make_int_const_ty(id, UINT64_TYPE.clone(), val)
}
pub fn make_int_const_ty(id: MuID, ty: P<MuType>, val: u64) -> P<Value> {
P(Value {
hdr: MuEntityHeader::unnamed(id),
......
......@@ -75,6 +75,14 @@ lazy_static! {
MuType::new(new_internal_id(), MuType_::iref(VOID_TYPE.clone()))
);
pub static ref UPTR_U8_TYPE: P<MuType> = P(
MuType::new(new_internal_id(), MuType_::uptr(UINT8_TYPE.clone()))
);
pub static ref UPTR_U64_TYPE: P<MuType> = P(
MuType::new(new_internal_id(), MuType_::uptr(UINT64_TYPE.clone()))
);
pub static ref STACKREF_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::StackRef)
);
......@@ -99,6 +107,8 @@ lazy_static! {
IREF_VOID_TYPE.clone(),
STACKREF_TYPE.clone(),
THREADREF_TYPE.clone(),
UPTR_U8_TYPE.clone(),
UPTR_U64_TYPE.clone()
];
}
......@@ -477,6 +487,29 @@ impl MuType {
_ => None
}
}
/// prints a struct type
pub fn print_details(&self) -> String {
match self.v {
MuType_::Struct(ref tag) => {
let lock = STRUCT_TAG_MAP.read().unwrap();
format!("{} = {}", tag, lock.get(tag).unwrap())
}
MuType_::Hybrid(ref tag) => {
let lock = HYBRID_TAG_MAP.read().unwrap();
format!("{} = {}", tag, lock.get(tag).unwrap())
}
_ => format!("{}", self)
}
}
/// prints a struct type
pub fn print_hybrid(&self) -> String {
match self.v {
_ => panic!()
}
}
}
pub type StructTag = MuName;
......
......@@ -18,6 +18,7 @@ use utils::ByteSize;
use utils::Address;
use utils::POINTER_SIZE;
use compiler::backend::aarch64::*;
use runtime::mm::*;
use compiler::backend::{Reg, Mem};
use compiler::machine_code::MachineCode;
......@@ -37,6 +38,7 @@ use std::usize;
use std::ops;
use std::collections::HashSet;
use std::sync::RwLock;
use std::io::Write;
macro_rules! trace_emit {
($arg1:tt $($arg:tt)*) => {
......@@ -3684,9 +3686,10 @@ pub fn emit_context_with_reloc(
let global_addrs: Vec<Address> =
global_locs_lock.values().map(|x| x.to_address()).collect();
debug!("going to dump these globals: {:?}", global_addrs);
// heap dump
let mut global_dump = mm::persist_heap(global_addrs);
debug!("Heap Dump from GC: {:?}", global_dump);
let ref objects = global_dump.objects;
let ref mut relocatable_refs = global_dump.relocatable_refs;
......@@ -3695,15 +3698,18 @@ pub fn emit_context_with_reloc(
relocatable_refs.insert(addr, mangle_name(str));
}
// for all the reachable object, we write them to the boot image
for obj_dump in objects.values() {
// write object metadata
write_align(&mut file, 8);
write_obj_header(&mut file, &obj_dump.encode);
// .bytes xx,xx,xx,xx (between mem_start to reference_addr)
write_data_bytes(&mut file, obj_dump.mem_start, obj_dump.reference_addr);
if global_addr_id_map.contains_key(&obj_dump.reference_addr) {
let global_id = global_addr_id_map.get(&obj_dump.reference_addr).unwrap();
// write alignment for the object
write_align(&mut file, obj_dump.align);
// if this object is a global cell, we add labels so it can be accessed
if global_addr_id_map.contains_key(&obj_dump.addr) {
let global_id = global_addr_id_map.get(&obj_dump.addr).unwrap();
let global_value = global_lock.get(global_id).unwrap();
// .globl global_cell_name
......@@ -3713,6 +3719,7 @@ pub fn emit_context_with_reloc(
writeln!(file, "\t{}", directive_globl(global_cell_name.clone())).unwrap();
writeln!(file, "{}:", global_cell_name.clone()).unwrap();
// .equiv global_cell_name_if_its_valid_c_ident
if is_valid_c_identifier(&demangled_name) {
let demangled_name = (*demangled_name).clone();
writeln!(file, "\t{}", directive_globl(demangled_name.clone())).unwrap();
......@@ -3724,51 +3731,56 @@ pub fn emit_context_with_reloc(
}
}
// dump_label:
let dump_label = relocatable_refs
.get(&obj_dump.reference_addr)
.unwrap()
.clone();
writeln!(file, "{}:", dump_label).unwrap();
// put dump_label for this object (so it can be referred to from other dumped objects)
let dump_label = relocatable_refs.get(&obj_dump.addr).unwrap().clone();
file.write_fmt(format_args!("{}:\n", dump_label)).unwrap();
let base = obj_dump.reference_addr;
let end = obj_dump.mem_start + obj_dump.mem_size;
// get ready to go through from the object start (not mem_start) to the end
let base = obj_dump.addr;
let end = obj_dump.addr + obj_dump.size;
assert!(base.is_aligned_to(POINTER_SIZE));
// offset as cursor
let mut offset = 0;
while offset < obj_dump.mem_size {
while offset < obj_dump.size {
let cur_addr = base + offset;
if obj_dump.reference_offsets.contains(&offset) {
// write ref with label
// if this offset is a reference field, we put a relocatable label
// generated by the GC instead of address value
let load_ref = unsafe { cur_addr.load::<Address>() };
if load_ref.is_zero() {
// write 0
writeln!(file, ".xword 0").unwrap();
// null reference, write 0
file.write("\t.xword 0\n".as_bytes()).unwrap();
} else {
// get the relocatable label
let label = match relocatable_refs.get(&load_ref) {
Some(label) => label,
None => {
panic!(
"cannot find label for address {}, \
it is not dumped by GC (why GC didn't trace to it)",
"cannot find label for address {}, it is not dumped by GC \
(why GC didn't trace to it?)",
load_ref
)
}
};
writeln!(file, ".xword {}", label.clone()).unwrap();
file.write_fmt(format_args!("\t.xword {}\n", label.clone()))
.unwrap();
}
} else if fields.contains_key(&cur_addr) {
// write uptr (or other relocatable value) with label
// if this offset is a field named by the client to relocatable,
// we put the relocatable label given by the client
let label = fields.get(&cur_addr).unwrap();
writeln!(file, ".xword {}", mangle_name(label.clone())).unwrap();
file.write_fmt(format_args!("\t.xword {}\n", mangle_name(label.clone())))
.unwrap();
} else {
// otherwise this offset is plain data
// write plain word (as bytes)
let next_word_addr = cur_addr + POINTER_SIZE;
if next_word_addr <= end {
write_data_bytes(&mut file, cur_addr, next_word_addr);
} else {
......@@ -3815,6 +3827,15 @@ pub fn emit_context_with_reloc(
debug!("---finish---");
}
fn write_obj_header(f: &mut File, obj: &ObjectEncode) {
// header is 8 bytes aligned, and takes 24 bytes
write_align(f, 8);
let hdr = obj.as_raw();
f.write_fmt(format_args!("\t.xword {}\n", hdr[0])).unwrap();
f.write_fmt(format_args!("\t.xword {}\n", hdr[1])).unwrap();
f.write_fmt(format_args!("\t.xword {}\n", hdr[2])).unwrap();
}
pub fn emit_context(vm: &VM) {
emit_context_with_reloc(vm, hashmap!{}, hashmap!{}, None);
}
......
......@@ -964,7 +964,8 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
PrintHex(_) => 10,
SetRetval(_) => 10,
ExnInstruction { ref inner, .. } => estimate_insts_for_ir(&inner),
_ => unimplemented!()
GetVMThreadLocal => 10,
_ => 1
}
}
......
......@@ -26,6 +26,7 @@ use compiler::backend::x86_64::check_op_len;
use compiler::machine_code::MachineCode;
use vm::VM;
use runtime::ValueLocation;
use runtime::mm::*;
use utils::vec_utils;
use utils::string_utils;
......@@ -42,6 +43,8 @@ use std::ops;
use std::collections::HashSet;
use std::sync::{RwLock, Arc};
use std::any::Any;
use std::path;
use std::io::prelude::*;
/// ASMCode represents a segment of assembly machine code. Usually it is machine code for
/// a Mu function, but it could simply be a sequence of machine code.
......@@ -1715,7 +1718,7 @@ impl ASMCodeGen {
// merge use vec
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
let locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1.clone());
} else {
uses.insert(id1, vec![loc1]);
......@@ -1735,7 +1738,7 @@ impl ASMCodeGen {
let (mem, mut uses) = self.prepare_mem(op2, inst.len() + 1 + reg.len() + 1);
if uses.contains_key(&id) {
let mut locs = uses.get_mut(&id).unwrap();
let locs = uses.get_mut(&id).unwrap();
vec_utils::add_unique(locs, loc.clone());
} else {
uses.insert(id, vec![loc.clone()]);
......@@ -1841,7 +1844,7 @@ impl ASMCodeGen {
let (reg, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + mem.len() + 1);
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
let locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1.clone());
} else {
uses.insert(id1, vec![loc1.clone()]);
......@@ -2079,7 +2082,7 @@ impl ASMCodeGen {
// the register we used for the memory location is counted as 'use'
// use the vec from mem as 'use' (push use reg from src to it)
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
let locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1);
} else {
uses.insert(id1, vec![loc1]);
......@@ -3787,9 +3790,6 @@ use std::fs::File;
/// emit assembly file for a function version
pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) {
use std::io::prelude::*;
use std::path;
// acquire lock and function
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&fv.func_id).unwrap().read().unwrap();
......@@ -4034,10 +4034,6 @@ fn mangle_all(name_vec: &mut Vec<String>) {
#[cfg(feature = "sel4-rumprun")]
pub fn emit_sym_table(vm: &VM) {
use std::path;
use std::io::Write;
// Here goes the code to generate an asm file to resolve symbol addresses at link time
// in this stage, a single sym_file is generated for the test
// these sym_files will be compiled in build.rs in the parent directory of sel4 side
......@@ -4168,9 +4164,6 @@ pub fn emit_context_with_reloc(
fields: HashMap<Address, MuName>,
primordial_threadlocal: Option<Address>
) {
use std::path;
use std::io::prelude::*;
// creates emit directy, and file
debug!("---Emit VM Context---");
create_emit_directory(vm);
......@@ -4229,15 +4222,16 @@ pub fn emit_context_with_reloc(
// for all the reachable object, we write them to the boot image
for obj_dump in objects.values() {
// write object metadata
write_align(&mut file, 8);
write_obj_header(&mut file, &obj_dump.encode);
// write object metadata
// .bytes xx,xx,xx,xx (between mem_start to reference_addr)
write_data_bytes(&mut file, obj_dump.mem_start, obj_dump.reference_addr);
// write alignment for the object
write_align(&mut file, obj_dump.align);
// if this object is a global cell, we add labels so it can be accessed
if global_addr_id_map.contains_key(&obj_dump.reference_addr) {
let global_id = global_addr_id_map.get(&obj_dump.reference_addr).unwrap();
if global_addr_id_map.contains_key(&obj_dump.addr) {
let global_id = global_addr_id_map.get(&obj_dump.addr).unwrap();
let global_value = global_lock.get(global_id).unwrap();
// .globl global_cell_name
......@@ -4260,20 +4254,17 @@ pub fn emit_context_with_reloc(
}
// put dump_label for this object (so it can be referred to from other dumped objects)
let dump_label = symbol(&&relocatable_refs
.get(&obj_dump.reference_addr)
.unwrap()
.clone());
let dump_label = symbol(&&relocatable_refs.get(&obj_dump.addr).unwrap().clone());
file.write_fmt(format_args!("{}:\n", dump_label)).unwrap();
// get ready to go through from the object start (not mem_start) to the end
let base = obj_dump.reference_addr;
let end = obj_dump.mem_start + obj_dump.mem_size;
let base = obj_dump.addr;
let end = obj_dump.addr + obj_dump.size;
assert!(base.is_aligned_to(POINTER_SIZE));
// offset as cursor
let mut offset = 0;
while offset < obj_dump.mem_size {
while offset < obj_dump.size {
let cur_addr = base + offset;
if obj_dump.reference_offsets.contains(&offset) {
......@@ -4364,6 +4355,16 @@ pub fn emit_context(vm: &VM) {
emit_context_with_reloc(vm, hashmap!{}, hashmap!{}, None);
}
/// writes header for a dumped object
fn write_obj_header(f: &mut File, obj: &ObjectEncode) {
// header is 8 bytes aligned, and takes 24 bytes
write_align(f, 8);
let hdr = obj.as_raw();
f.write_fmt(format_args!("\t.quad {}\n", hdr[0])).unwrap();
f.write_fmt(format_args!("\t.quad {}\n", hdr[1])).unwrap();
f.write_fmt(format_args!("\t.quad {}\n", hdr[2])).unwrap();
}
/// writes raw bytes from memory between from_address (inclusive) to to_address (exclusive)
fn write_data_bytes(f: &mut File, from: Address, to: Address) {
use std::io::Write;
......
......@@ -648,6 +648,17 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
CmpOp(_, _, _) => 1,
ConvOp { .. } => 0,
CommonInst_Tr64IsFp(_) |
CommonInst_Tr64IsInt(_) |
CommonInst_Tr64IsRef(_) |
CommonInst_Tr64FromFp(_) |
CommonInst_Tr64FromInt(_) |
CommonInst_Tr64FromRef(_, _) |
CommonInst_Tr64ToFp(_) |
CommonInst_Tr64ToInt(_) |
CommonInst_Tr64ToRef(_) |
CommonInst_Tr64ToTag(_) => 3,
// control flow
Branch1(_) => 1,
Branch2 { .. } => 1,
......@@ -685,14 +696,14 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
Throw(_) => 10,
SwapStackExpr { .. } | SwapStackExc { .. } | SwapStackKill { .. } => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
CommonInst_Pin(_) | CommonInst_Unpin(_) => 10,
CommonInst_Pin(_) | CommonInst_Unpin(_) | CommonInst_GetAddr(_) => 10,
// others
Move(_) => 0,
PrintHex(_) => 10,
SetRetval(_) => 10,
ExnInstruction { ref inner, .. } => estimate_insts_for_ir(&inner),
_ => unimplemented!()
GetVMThreadLocal => 10,
ExnInstruction { ref inner, .. } => estimate_insts_for_ir(&inner)
}
}
......
This diff is collapsed.
......@@ -128,7 +128,7 @@ impl PeepholeOptimization {
}
fn remove_unnecessary_jump(&mut self, inst: usize, cf: &mut CompiledFunction) {
let mut mc = cf.mc_mut();
let mc = cf.mc_mut();
// if this is last instruction, return
if inst == mc.number_of_insts() - 1 {
......@@ -158,7 +158,7 @@ impl PeepholeOptimization {
}
fn remove_jump_to_jump(&mut self, inst: usize, cf: &mut CompiledFunction) {
let mut mc = cf.mc_mut();
let mc = cf.mc_mut();
// the instruction that we may rewrite
let orig_inst = inst;
......
......@@ -424,7 +424,7 @@ impl<'a> GraphColoring<'a> {
) {
trace!(" add {:?} to movelist[{}]", mov, reg);
if movelist.contains_key(&reg) {
let mut list = movelist.get_mut(&reg).unwrap();
let list = movelist.get_mut(&reg).unwrap();
list.insert(mov);
} else {
let mut list = LinkedHashSet::new();
......
......@@ -771,7 +771,7 @@ fn global_liveness_analysis(
// in <- use + (out - def)
{
let mut inset = livein.get_mut(node).unwrap();
let inset = livein.get_mut(node).unwrap();
inset.clear();
......@@ -789,7 +789,7 @@ fn global_liveness_analysis(
// out[n] <- union(in[s] for every successor s of n)
{
let mut outset = liveout.get_mut(node).unwrap();
let outset = liveout.get_mut(node).unwrap();
outset.clear();
for s in cfg_node.succ.iter() {
......
......@@ -194,7 +194,7 @@ pub fn validate_regalloc(
if visited.contains_key(&block) {
// if current block exists in visited, intersect with current
let mut old = visited.get_mut(&block).unwrap();
let old = visited.get_mut(&block).unwrap();
let changed = old.intersect(&alive);
if changed {
......
......@@ -218,7 +218,7 @@ impl FrameSlot {
ty: ty.clone(),
v: Value_::Memory(MemoryLocation::Address {
base: x86_64::RBP.clone(),
offset: Some(Value::make_int_const(vm.next_id(), self.offset as u64)),
offset: Some(Value::make_int32_const(vm.next_id(), self.offset as u64)),
index: None,
scale: None
})
......@@ -234,7 +234,7 @@ impl FrameSlot {
ty: ty.clone(),
v: Value_::Memory(MemoryLocation::VirtualAddress {
base: aarch64::FP.clone(),
offset: Some(Value::make_int_const(vm.next_id(), self.offset as u64)),
offset: Some(Value::make_int32_const(vm.next_id(), self.offset as u64)),
scale: 1,
signed: true
})
......
......@@ -104,6 +104,7 @@ impl Default for CompilerPolicy {
// ir level passes
passes.push(Box::new(passes::RetSink::new()));
passes.push(Box::new(passes::Inlining::new()));
passes.push(Box::new(passes::InjectRuntime::new()));
passes.push(Box::new(passes::DefUse::new()));
passes.push(Box::new(passes::TreeGen::new()));
passes.push(Box::new(passes::GenMovPhi::new()));
......
......@@ -195,7 +195,7 @@ fn dfs(cur: MuID, stack: &mut Vec<MuID>, visited: &mut Vec<MuID>, func: &mut MuF
target: MuID,
prob: f32| {
if map.contains_key(&target) {
let mut edge : &mut BlockEdge = map.get_mut(&target).unwrap();
let edge: &mut BlockEdge = map.get_mut(&target).unwrap();
edge.probability += prob;
} else {
map.insert(
......
......@@ -57,7 +57,7 @@ impl CompilerPass for GenMovPhi {
let mut new_blocks_to_insert: Vec<IntermediateBlockInfo> = vec![];
// first step - collects info on intermediate blocks
for (blk_id, mut block) in f_content.blocks.iter_mut() {
for (blk_id, block) in f_content.blocks.iter_mut() {
trace!("block: {}", blk_id);
// old block content
......@@ -313,7 +313,7 @@ impl CompilerPass for GenMovPhi {
block_info.blk_name.clone()
));
let mut target_block = f_content.get_block_mut(target_id);
let target_block = f_content.get_block_mut(target_id);
assert!(target_block.content.is_some());
// if target_block is an exception block,
......
This diff is collapsed.
......@@ -158,7 +158,7 @@ impl Inlining {
let call_edges = func.get_static_call_edges();
let mut f_content = func.content.as_mut().unwrap();
let f_content = func.content.as_mut().unwrap();
let ref mut f_context = func.context;
let mut new_blocks: Vec<Block> = vec![];
......