testing rodal alternatives

parent a7336ae4
......@@ -57,3 +57,4 @@ extprim = "*"
num-traits = "*"
built = "*"
mu_gc = { path = "src/gc"}
cfg-if = "*"
......@@ -53,5 +53,6 @@ fn main() {
}
fn built() {
built::write_built_file().expect("Failed to acquire build-time information");
built::write_built_file()
.expect("Failed to acquire build-time information");
}
#trailing_comma = "Never"
max_width = 80
wrap_comments = true
trailing_comma = "Never"
......@@ -85,6 +85,17 @@ impl Instruction {
| NewStack(_)
| NewThread { .. }
| NewRTThread { .. }
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_, _)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| NotifyThread(_) // TODO remove - not needed anymore
| SetPriority(_,_)
| GetPriority(_)
......@@ -150,6 +161,17 @@ impl Instruction {
| AllocA(_)
| NewHybrid(_, _)
| AllocAHybrid(_, _)
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_,_)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_,_)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| NewStack(_)
| NewThread { .. }
| NewRTThread { .. }
......@@ -239,6 +261,17 @@ impl Instruction {
| NewHybrid(_, _)
| AllocAHybrid(_, _)
| NewStack(_)
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_, _)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_,_)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| NewThread { .. }
| NewRTThread { .. }
| NotifyThread(_)
......@@ -320,6 +353,17 @@ impl Instruction {
| New(_)
| AllocA(_)
| NewHybrid(_, _)
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_,_)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_,_)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| AllocAHybrid(_, _)
| NewStack(_)
| NewThread { .. }
......@@ -572,6 +616,55 @@ impl Instruction {
ops[func].ty().get_sig().unwrap(),
ops[func]
),
&Instruction_::NewReg(size) => format!(
"COMMINST @uvm.new_region({})",
ops[size]
),
&Instruction_::DeleteReg(regref) => format!(
"COMMINST @uvm.delete_region({})",
ops[regref]
),
&Instruction_::AllocAU(ref ty) => format!(
"COMMINST @uvm.AllocAU({})",
ty.id()
),
&Instruction_::rAlloc(regref, ref ty) => format!(
"COMMINST @uvm.rAlloc({}, {})",
ops[regref],
ty.id()
),
&Instruction_::rAllocHybrid(regref, ref ty, var_len) => format!(
"COMMINST @uvm.rAllocHybrid({}, {}, {})",
ops[regref],
ty.id(),
ops[var_len]
),
// &Instruction_::rAllocT(regref, ty) => format!(
// "COMMINST @uvm.rAllocT({}, {})",
// ops[regref],
// ops[ty]
// ),
&Instruction_::eAlloc( ref ty) => format!(
"COMMINST @uvm.eAlloc({})",
ty.id()
),
&Instruction_::eAllocHybrid( ref ty, var_len) => format!(
"COMMINST @uvm.eAllocHybrid({}, {})",
ty.id(),
ops[var_len]
),
// &Instruction_::eAllocT( ty) => format!(
// "COMMINST @uvm.eAllocT({})",
// ops[ty]
// ),
&Instruction_::eDelete(obj) => format!(
"COMMINST @uvm.eDelete({})",
ops[obj]
),
// &Instruction_::eDeleteT( obj) => format!(
// "COMMINST @uvm.eDeleteT({})",
// ops[obj]
// ),
&Instruction_::NewThread {
stack,
thread_local,
......@@ -995,14 +1088,62 @@ pub enum Instruction_ {
/// allocate an object (non hybrid type) on the stack, yields an iref of the type
AllocA(P<MuType>),
/// allocate an object (non hybrid type) on the stack, yields an uptr of the type
AllocAU(P<MuType>),
/// allocate a hybrid type object in the heap, yields ref
/// args: the type of the hybrid, hybrid part length
NewHybrid(P<MuType>, OpIndex),
/// allocate an emm region
/// args: region size
NewReg(OpIndex),
/// deallocate a whole emm region
/// args: regionref to the target region
DeleteReg(OpIndex),
/// allocates an untraced object on an emm region
/// args: 1. a regionref to the target emm region
/// args: 2. the object type
/// returns: a uptr to the allocated object
rAlloc(OpIndex, P<MuType>),
rAllocHybrid(OpIndex, P<MuType>, OpIndex),
// /// allocates a traced object on an emm region
// /// args: 1. a regionref to the target emm region
// /// args: 2. the object type
// /// returns: an iref to the allocated object
// rAllocT(OpIndex, P<MuType>),
/// allocate an untraced object on the emm
/// args: type of the object
/// returns: a uptr to the object
eAlloc(P<MuType>),
eAllocHybrid(P<MuType>, OpIndex),
/// deallocate an untraced emm object
/// args: an uptr to the object
eDelete(OpIndex),
// /// allocate a traced object on the emm
// /// args: type of the object
// /// returns: an iref to the object
// eAllocT(P<MuType>),
// /// deallocate a traced emm object
// /// args: an iref to the object
// eDeleteT(OpIndex),
/// allocate a hybrid type object on the stack, yields iref
/// args: the type of the hybrid, hybrid part length
AllocAHybrid(P<MuType>, OpIndex),
// TODO add hybrid versions
// /// allocate a hybrid type object on the stack, yields uptr
// /// args: the type of the hybrid, hybrid part length
// AllocAUHybrid(P<MuType>, OpIndex),
/// create a new Mu stack, yields stack ref
/// args: functionref of the entry function
NewStack(OpIndex),
......
......@@ -745,6 +745,10 @@ impl fmt::Debug for BlockContent {
}
impl BlockContent {
pub fn get_own_args(&self, index: usize) -> &P<Value> {
&self.args[index]
}
/// returns all the arguments passed to its successors
pub fn get_out_arguments(&self) -> Vec<P<Value>> {
let n_insts = self.body.len();
......
This diff is collapsed.
This diff is collapsed.
......@@ -12,7 +12,7 @@ pub enum CallConvResult {
GPR(P<Value>),
GPREX(P<Value>, P<Value>),
FPR(P<Value>),
STACK,
STACK
}
pub mod mu {
......@@ -55,10 +55,12 @@ pub mod c {
ret.push(CallConvResult::STACK);
}
} else if arg_reg_group == RegGroup::GPREX {
// need two regsiters for this, otherwise, we need to pass on stack
// need two regsiters for this, otherwise, we need to pass on
// stack
if gpr_arg_count + 1 < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr1 = x86_64::ARGUMENT_GPRS[gpr_arg_count].clone();
let arg_gpr2 = x86_64::ARGUMENT_GPRS[gpr_arg_count + 1].clone();
let arg_gpr2 =
x86_64::ARGUMENT_GPRS[gpr_arg_count + 1].clone();
ret.push(CallConvResult::GPREX(arg_gpr1, arg_gpr2));
gpr_arg_count += 2;
......@@ -83,7 +85,10 @@ pub mod c {
ret
}
pub fn compute_stack_args(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
pub fn compute_stack_args(
tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_arguments(tys);
let mut stack_arg_tys = vec![];
......@@ -124,7 +129,8 @@ pub mod c {
} else if RegGroup::get_from_ty(ty) == RegGroup::GPREX {
if gpr_ret_count + 1 < x86_64::RETURN_GPRS.len() {
let ret_gpr1 = x86_64::RETURN_GPRS[gpr_ret_count].clone();
let ret_gpr2 = x86_64::RETURN_GPRS[gpr_ret_count + 1].clone();
let ret_gpr2 =
x86_64::RETURN_GPRS[gpr_ret_count + 1].clone();
ret.push(CallConvResult::GPREX(ret_gpr1, ret_gpr2));
} else {
......@@ -149,14 +155,19 @@ pub mod c {
ret
}
pub fn compute_stack_retvals(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
pub fn compute_stack_retvals(
tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_return_values(tys);
let mut stack_ret_val_tys = vec![];
for i in 0..callconv.len() {
let ref cc = callconv[i];
match cc {
&CallConvResult::STACK => stack_ret_val_tys.push(tys[i].clone()),
&CallConvResult::STACK => {
stack_ret_val_tys.push(tys[i].clone())
}
_ => {}
}
}
......@@ -164,11 +175,11 @@ pub mod c {
compute_stack_locations(&stack_ret_val_tys, vm)
}
/// computes the area on the stack for a list of types that need to put on stack,
/// returns a tuple of (size, offset for each values on stack)
/// computes the area on the stack for a list of types that need to put on
/// stack, returns a tuple of (size, offset for each values on stack)
pub fn compute_stack_locations(
stack_val_tys: &Vec<P<MuType>>,
vm: &VM,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_val_tys, vm);
......@@ -176,7 +187,8 @@ pub mod c {
// "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment)
// (then the args will be put to stack following their regular
// alignment)
let mut stack_arg_size_with_padding = stack_arg_size;
if stack_arg_size % 16 == 0 {
......
......@@ -19,17 +19,21 @@ use runtime::ValueLocation;
use compiler::backend::{Mem, Reg};
use compiler::machine_code::MachineCode;
/// CodeGenerator provides an interface to emit x86_64 code for instruction selection.
/// This allows us to implement the other parts of the compiler (mostly instruction selection)
/// without assuming code generator. Currently there is only an assembly backend
/// that implements this interface for ahead-of-time compilation. We plan to add
/// a binary backend for just-in-time compilation.
/// CodeGenerator provides an interface to emit x86_64 code for instruction
/// selection. This allows us to implement the other parts of the compiler
/// (mostly instruction selection) without assuming code generator. Currently
/// there is only an assembly backend that implements this interface for
/// ahead-of-time compilation. We plan to add a binary backend for just-in-time
/// compilation.
pub trait CodeGenerator {
/// starts code for a function
fn start_code(&mut self, func_name: MuName, entry: MuName) -> ValueLocation;
fn start_code(&mut self, func_name: MuName, entry: MuName)
-> ValueLocation;
/// finishes code for a function
fn finish_code(&mut self, func_name: MuName)
-> (Box<MachineCode + Sync + Send>, ValueLocation);
fn finish_code(
&mut self,
func_name: MuName
) -> (Box<MachineCode + Sync + Send>, ValueLocation);
/// starts a sequence of linear code (no branch)
fn start_code_sequence(&mut self);
......@@ -43,7 +47,8 @@ pub trait CodeGenerator {
fn start_block(&mut self, block_name: MuName);
/// starts an exceptional block, and returns its code address
fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation;
/// finishes a block (must have called start_block() or start_excpetion_block() first)
/// finishes a block (must have called start_block() or
/// start_excpetion_block() first)
fn end_block(&mut self, block_name: MuName);
// adds CFI info
......@@ -238,7 +243,7 @@ pub trait CodeGenerator {
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool,
is_native: bool
) -> ValueLocation;
fn emit_call_near_r64(
&mut self,
......@@ -246,7 +251,7 @@ pub trait CodeGenerator {
func: &P<Value>,
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
fn emit_call_near_mem64(
&mut self,
......@@ -254,7 +259,7 @@ pub trait CodeGenerator {
func: &P<Value>,
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
// sometimes we use jmp as a call (but without pushing return address)
......@@ -265,7 +270,7 @@ pub trait CodeGenerator {
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool,
is_native: bool
) -> ValueLocation;
fn emit_call_jmp_indirect(
&mut self,
......@@ -273,7 +278,7 @@ pub trait CodeGenerator {
func: &P<Value>,
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
fn emit_ret(&mut self);
......
......@@ -18,8 +18,9 @@
pub mod inst_sel;
mod codegen;
/// CodeGenerator trait serves as an interface to the backend code generator, which
/// may generate assembly code or binary (not implemented yet)
/// CodeGenerator trait serves as an interface to the backend code
/// generator, which may generate assembly code or binary (not implemented
/// yet)
use compiler::backend::x86_64::codegen::CodeGenerator;
/// assembly backend as AOT compiler
......@@ -54,16 +55,22 @@ use utils::LinkedHashMap;
// number of normal callee saved registers (excluding RSP and RBP)
pub const CALLEE_SAVED_COUNT: usize = 5;
/// a macro to declare a set of general purpose registers that are aliased to the first one
/// a macro to declare a set of general purpose registers that are aliased to
/// the first one
macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) ->
$r32: ident, $r16: ident, $r8l: ident, $r8h: ident) => {
lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32: P<Value> = GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> = GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8l: P<Value> = GPR!($id64 + 3, stringify!($r8l), UINT8_TYPE);
pub static ref $r8h: P<Value> = GPR!($id64 + 4, stringify!($r8h), UINT8_TYPE);
pub static ref $r64: P<Value> =
GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32: P<Value> =
GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> =
GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8l: P<Value> =
GPR!($id64 + 3, stringify!($r8l), UINT8_TYPE);
pub static ref $r8h: P<Value> =
GPR!($id64 + 4, stringify!($r8h), UINT8_TYPE);
pub static ref $alias: [P<Value>; 5] = [
$r64.clone(),
$r32.clone(),
......@@ -76,10 +83,14 @@ macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) -> $r32: ident, $r16: ident, $r8: ident) => {
lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32: P<Value> = GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> = GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8: P<Value> = GPR!($id64 + 3, stringify!($r8), UINT8_TYPE);
pub static ref $r64: P<Value> =
GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32: P<Value> =
GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> =
GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8: P<Value> =
GPR!($id64 + 3, stringify!($r8), UINT8_TYPE);
pub static ref $alias: [P<Value>; 4] =
[$r64.clone(), $r32.clone(), $r16.clone(), $r8.clone()];
}
......@@ -87,7 +98,8 @@ macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident)) => {
lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r64: P<Value> =
GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $alias: [P<Value>; 4] =
[$r64.clone(), $r64.clone(), $r64.clone(), $r64.clone()];
}
......@@ -100,7 +112,7 @@ macro_rules! GPR {
P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: $ty.clone(),
v: Value_::SSAVar($id),
v: Value_::SSAVar($id)
})
}};
}
......@@ -111,7 +123,7 @@ macro_rules! FPR {
P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: DOUBLE_TYPE.clone(),
v: Value_::SSAVar($id),
v: Value_::SSAVar($id)
})
}};
}
......@@ -186,7 +198,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
if id < FPR_ID_START {
let vec = match GPR_ALIAS_TABLE.get(&id) {
Some(vec) => vec,
None => panic!("didnt find {} as GPR", id),
None => panic!("didnt find {} as GPR", id)
};
match length {
......@@ -195,7 +207,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
16 => vec[2].clone(),
8 => vec[3].clone(),
1 => vec[3].clone(),
_ => panic!("unexpected length {} for {}", length, vec[0]),
_ => panic!("unexpected length {} for {}", length, vec[0])
}
} else {
for r in ALL_FPRS.iter() {
......@@ -208,11 +220,13 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
}
}
/// are two registers aliased? (both must be machine register IDs, otherwise this function panics)
/// are two registers aliased? (both must be machine register IDs, otherwise
/// this function panics)
pub fn is_aliased(id1: MuID, id2: MuID) -> bool {
if get_color_for_precolored(id1) == get_color_for_precolored(id2) {
// we need to specially check the case for AH/BH/CH/DH
// because both AH and AL are aliased to RAX, but AH and AL are not aliased
// because both AH and AL are aliased to RAX, but AH and AL are not
// aliased
macro_rules! is_match {
($a1: expr, $a2: expr; $b: expr) => {
$a1 == $b.id() || $a2 == $b.id()
......@@ -242,7 +256,7 @@ pub fn get_color_for_precolored(id: MuID) -> MuID {
if id < FPR_ID_START {
match GPR_ALIAS_LOOKUP.get(&id) {
Some(val) => val.id(),
None => panic!("cannot find GPR {}", id),
None => panic!("cannot find GPR {}", id)
}
} else {
// we do not have alias for FPRs
......@@ -259,7 +273,7 @@ pub fn check_op_len(op: &P<Value>) -> usize {
Some(16) => 16,
Some(8) => 8,
Some(1) => 8,
_ => panic!("unsupported register length for x64: {}", op.ty),
_ => panic!("unsupported register length for x64: {}", op.ty)
}
}
......@@ -529,7 +543,7 @@ pub fn number_of_usable_regs_in_group(group: RegGroup) -> usize {
match group {
RegGroup::GPR => ALL_USABLE_GPRS.len(),
RegGroup::GPREX => ALL_USABLE_GPRS.len(),
RegGroup::FPR => ALL_USABLE_FPRS.len(),
RegGroup::FPR => ALL_USABLE_FPRS.len()
}
}
......@@ -569,7 +583,10 @@ pub fn get_return_address(frame_pointer: Address) -> Address {
/// gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address, stack_arg_size: usize) -> Address {
pub fn get_previous_stack_pointer(
frame_pointer: Address,
stack_arg_size: usize
) -> Address {
frame_pointer + 16 as ByteSize + stack_arg_size
}
......@@ -619,12 +636,13 @@ pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
1...32 => op.is_int_const(),
64 => match op.v {
Value_::Constant(Constant::Int(val)) => {
val as i64 >= i32::MIN as i64 && val as i64 <= i32::MAX as i64
val as i64 >= i32::MIN as i64
&& val as i64 <= i32::MAX as i64
}
_ => false,
_ => false
},
128 => false,
_ => unimplemented!(),
_ => unimplemented!()
}
} else {
false
......@@ -674,6 +692,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
AtomicRMW { .. } => 1,
AllocA(_) => 1,
AllocAHybrid(_, _) => 1,
AllocAU(_) => 1,
Fence(_) => 1,
// memory addressing
......@@ -684,7 +703,19 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
| GetVarPartIRef { .. } => 0,
// runtime call
New(_) | NewHybrid(_, _) => 10,
New(_)
| NewHybrid(_, _)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
// | rAllocT(_, _)
| eAlloc(_)
| eAllocHybrid(_, _)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
=> 10,
NewStack(_)
| NewThread { .. }
| NewRTThread { .. }
......
......@@ -29,13 +29,13 @@ pub use vm::uir_output::create_emit_directory;
pub const EMIT_MC_DOT: bool = true;
pub struct CodeEmission {
name: &'static str,
name: &'static str
}
impl CodeEmission {
pub fn new() -> CodeEmission {
CodeEmission {
name: "Code Emission",
name: "Code Emission"
}
}
}
......@@ -72,7 +72,7 @@ fn create_emit_file(name: String, vm: &VM) -> File {
file_path.to_str().unwrap(),
why
),
Ok(file) => file,
Ok(file) => file
}
}
......@@ -141,7 +141,10 @@ fn emit_mc_dot(func: &MuFunctionVersion, vm: &VM) {
writeln!(file, "{} -> {};", source_id, target_id).unwrap();
}
None => {
panic!("cannot find succesor {} for block {}", succ, block_name);
panic!(
"cannot find succesor {} for block {}",
succ, block_name
);
}
}
}
......
......@@ -27,7 +27,7 @@ use std::any::Any;
const TRACE_LOOPANALYSIS: bool = true;
pub struct MCLoopAnalysis {
name: &'static str,
name: &'static str
}
impl CompilerPass for MCLoopAnalysis {
......@@ -65,7 +65,8 @@ impl CompilerPass for MCLoopAnalysis {
trace!("---merged loops---");
trace!("{:?}", merged_loops);
let loop_nest_tree = compute_loop_nest_tree(prologue.clone(), &merged_loops);
let loop_nest_tree =
compute_loop_nest_tree(prologue.clone(), &merged_loops);
trace!("---loop-nest tree---");
trace!("{:?}", loop_nest_tree);
......@@ -77,7 +78,7 @@ impl CompilerPass for MCLoopAnalysis {
domtree,
loops,
loop_nest_tree,
loop_depth,
loop_depth
});
cf.loop_analysis = Some(result);
......@@ -89,18 +90,21 @@ pub struct MCLoopAnalysisResult {
pub domtree: MCDomTree,
pub loops: LinkedRepeatableMultiMap<MuName, MCNaturalLoop>,
pub loop_nest_tree: MCLoopNestTree,
pub loop_depth: LinkedHashMap<MuName, usize>,
pub loop_depth: LinkedHashMap<MuName, usize>
}