Commit 5b67db51 authored by qinsoon's avatar qinsoon

[wip]

parent 00a0a2f6
......@@ -105,7 +105,7 @@ pub struct MuFunctionVersion {
pub func_id: MuID,
pub sig: P<MuFuncSig>,
pub orig_content: Option<FunctionContent>,
orig_content: Option<FunctionContent>,
pub content: Option<FunctionContent>,
pub context: FunctionContext,
......@@ -153,6 +153,23 @@ impl MuFunctionVersion {
}
}
pub fn new_(hdr: MuEntityHeader, id: MuID, sig: P<MuFuncSig>, content: FunctionContent, context: FunctionContext) -> MuFunctionVersion {
MuFunctionVersion {
hdr: hdr,
func_id: id,
sig: sig,
orig_content: Some(content.clone()),
content: Some(content),
context: context,
block_trace: None,
force_inline: false
}
}
pub fn get_orig_ir(&self) -> Option<&FunctionContent> {
self.orig_content.as_ref()
}
pub fn define(&mut self, content: FunctionContent) {
self.orig_content = Some(content.clone());
self.content = Some(content);
......@@ -267,7 +284,7 @@ impl MuFunctionVersion {
}
}
#[derive(RustcEncodable, RustcDecodable, Clone)]
#[derive(RustcEncodable, RustcDecodable)]
pub struct FunctionContent {
pub entry: MuID,
pub blocks: HashMap<MuID, Block>
......@@ -288,6 +305,21 @@ impl fmt::Debug for FunctionContent {
}
}
impl Clone for FunctionContent {
fn clone(&self) -> Self {
let mut new_blocks = HashMap::new();
for (id, block) in self.blocks.iter() {
new_blocks.insert(*id, block.clone());
}
FunctionContent {
entry: self.entry,
blocks: new_blocks
}
}
}
impl FunctionContent {
pub fn get_entry_block(&self) -> &Block {
self.get_block(self.entry)
......
......@@ -278,6 +278,9 @@ impl ASMCode {
};
for i in 0..n_insts {
if TRACE_CFA {
trace!("---inst {}---", i);
}
// determine predecessor - if cur is not block start, its predecessor is previous insts
let is_block_start = block_start.contains(&i);
if !is_block_start {
......@@ -339,6 +342,12 @@ impl ASMCode {
trace!("inst {}: set PREDS as {}", target_n, i);
}
},
ASMBranchTarget::Return => {
if TRACE_CFA {
trace!("inst {}: is a return", i);
trace!("inst {}: has no successor", i);
}
}
ASMBranchTarget::None => {
// not branch nor cond branch, succ is next inst
if TRACE_CFA {
......@@ -642,7 +651,8 @@ impl MachineCode for ASMCode {
enum ASMBranchTarget {
None,
Conditional(MuName),
Unconditional(MuName)
Unconditional(MuName),
Return
}
#[derive(Clone, Debug)]
......@@ -839,7 +849,7 @@ impl ASMCodeGen {
// otherwise it will keep RETURN REGS alive
// and if there is no actual move into RETURN REGS, it will keep RETURN REGS for alive for very long
// and prevents anything using those regsiters
self.add_asm_inst(code, hashmap!{}, hashmap!{}, false);
self.add_asm_inst_internal(code, hashmap!{}, hashmap!{}, false, ASMBranchTarget::Return);
}
fn add_asm_branch(&mut self, code: String, target: MuName) {
......@@ -1075,92 +1085,6 @@ impl ASMCodeGen {
fn mangle_block_label(&self, label: MuName) -> String {
format!("{}_{}", self.cur().name, label)
}
fn control_flow_analysis(&mut self) {
// control flow analysis
let n_insts = self.line();
let code = self.cur_mut();
let ref blocks = code.blocks;
let ref mut asm = code.code;
let block_start = {
let mut ret = vec![];
for block in blocks.values() {
ret.push(block.start_inst);
}
ret
};
for i in 0..n_insts {
// determine predecessor - if cur is not block start, its predecessor is previous insts
let is_block_start = block_start.contains(&i);
if !is_block_start {
if i > 0 {
trace!("inst {}: not a block start", i);
trace!("inst {}: set PREDS as previous inst {}", i, i-1);
asm[i].preds.push(i - 1);
}
} else {
// if cur is a branch target, we already set its predecessor
// if cur is a fall-through block, we set it in a sanity check pass
}
// determine successor
let branch = asm[i].branch.clone();
match branch {
ASMBranchTarget::Unconditional(ref target) => {
// branch to target
trace!("inst {}: is a branch to {}", i, target);
let target_n = code.blocks.get(target).unwrap().start_inst;
trace!("inst {}: branch target index is {}", i, target_n);
// cur inst's succ is target
trace!("inst {}: set SUCCS as branch target {}", i, target_n);
asm[i].succs.push(target_n);
// target's pred is cur
trace!("inst {}: set PREDS as branch source {}", target_n, i);
asm[target_n].preds.push(i);
},
ASMBranchTarget::Conditional(ref target) => {
// branch to target
trace!("inst {}: is a cond branch to {}", i, target);
let target_n = code.blocks.get(target).unwrap().start_inst;
trace!("inst {}: branch target index is {}", i, target_n);
// cur insts' succ is target and next inst
asm[i].succs.push(target_n);
trace!("inst {}: set SUCCS as branch target {}", i, target_n);
if i < n_insts - 1 {
trace!("inst {}: set SUCCS as next inst", i + 1);
asm[i].succs.push(i + 1);
}
// target's pred is cur
asm[target_n].preds.push(i);
trace!("inst {}: set PREDS as {}", target_n, i);
},
ASMBranchTarget::None => {
// not branch nor cond branch, succ is next inst
trace!("inst {}: not a branch inst", i);
if i < n_insts - 1 {
trace!("inst {}: set SUCCS as next inst {}", i, i + 1);
asm[i].succs.push(i + 1);
}
}
}
}
// a sanity check for fallthrough blocks
for i in 0..n_insts {
if i != 0 && asm[i].preds.len() == 0 {
asm[i].preds.push(i - 1);
}
}
}
fn finish_code_sequence_asm(&mut self) -> Box<ASMCode> {
self.cur.take().unwrap()
......@@ -1609,7 +1533,7 @@ impl CodeGenerator for ASMCodeGen {
self.add_asm_symbolic(directive_globl(symbol(func_end.clone())));
self.add_asm_symbolic(format!("{}:", symbol(func_end.clone())));
self.control_flow_analysis();
self.cur.as_mut().unwrap().control_flow_analysis();
(
self.cur.take().unwrap(),
......
......@@ -231,8 +231,8 @@ impl InterferenceGraph {
}
}
fn build_live_set (cf: &mut CompiledFunction) {
info!("start building live set");
fn build_live_set (cf: &mut CompiledFunction, func: &MuFunctionVersion) {
info!("---start building live set---");
let n_insts = cf.mc().number_of_insts();
......@@ -241,7 +241,11 @@ fn build_live_set (cf: &mut CompiledFunction) {
let mut is_changed = true;
let mut i = 0;
while is_changed {
trace!("---iteration {}---", i);
i += 1;
// reset
is_changed = false;
......@@ -255,14 +259,13 @@ fn build_live_set (cf: &mut CompiledFunction) {
inset.clear();
// (1) in[n] = use[n]
inset.add_from_vec(cf.mc().get_inst_reg_uses(n));
// (2) + out[n]
// (1) out[n] - def[n]
inset.add_all(liveout[n].clone());
// (3) - def[n]
for def in cf.mc().get_inst_reg_defines(n) {
inset.remove(&def);
}
// (2) in[n] + (out[n] - def[n])
inset.add_from_vec(cf.mc().get_inst_reg_uses(n));
}
// out[n] <- union(in[s] for every successor s of n)
......@@ -278,22 +281,42 @@ fn build_live_set (cf: &mut CompiledFunction) {
// is in/out changed in this iteration?
let n_changed = !in_set_old.equals(&livein[n]) || !out_set_old.equals(&liveout[n]);
trace!("inst {}", n);
trace!("in(old) = {:?}", in_set_old);
trace!("in(new) = {:?}", livein[n]);
trace!("out(old) = {:?}", out_set_old);
trace!("out(new) = {:?}", liveout[n]);
is_changed = is_changed || n_changed;
}
}
info!("---finish building live set---");
for block in cf.mc().get_all_blocks().to_vec() {
let start_inst = cf.mc().get_block_range(&block).unwrap().start;
cf.mc_mut().set_ir_block_livein(&block, livein[start_inst].clone().to_vec());
let livein = livein[start_inst].clone().to_vec();
{
let display_array : Vec<String> = livein.iter().map(|x| func.context.get_temp_display(*x)).collect();
trace!("livein for block {}: {:?}", block, display_array);
}
cf.mc_mut().set_ir_block_livein(&block, livein);
let end_inst = cf.mc().get_block_range(&block).unwrap().end;
cf.mc_mut().set_ir_block_liveout(&block, liveout[end_inst].clone().to_vec());
let end_inst = cf.mc().get_block_range(&block).unwrap().end - 1;
let liveout = liveout[end_inst].clone().to_vec();
{
let display_array : Vec<String> = liveout.iter().map(|x| func.context.get_temp_display(*x)).collect();
trace!("liveout for block {}: {:?}", block, display_array);
}
cf.mc_mut().set_ir_block_liveout(&block, liveout);
}
}
// from Tailoring Graph-coloring Register Allocation For Runtime Compilation, Figure 4
pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion) -> InterferenceGraph {
build_live_set(cf);
build_live_set(cf, func);
info!("---start building interference graph---");
let mut ig = InterferenceGraph::new();
......@@ -341,7 +364,9 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// for every inst I in reverse order
for i in range.unwrap().rev() {
if cfg!(debug_assertions) {
trace!("Block{}: Inst{}: start. current_live:", block, i);
trace!("Block{}: Inst{}", block, i);
cf.mc().trace_inst(i);
trace!("current live: ");
for ele in current_live.iter() {
trace!("{}", func.context.get_temp_display(*ele));
}
......@@ -436,7 +461,8 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
}
}
}
info!("---finish building interference graph---");
ig
}
......
......@@ -140,6 +140,9 @@ impl Inlining {
let inlined_fv_lock = inlined_fvs_guard.get(&inlined_fvid).unwrap();
let inlined_fv_guard = inlined_fv_lock.read().unwrap();
trace!("QINSOON_DEBUG: orig_content: {:?}", inlined_fv_guard.get_orig_ir().unwrap());
trace!("QINSOON_DEBUG: content : {:?}", inlined_fv_guard.content.as_ref().unwrap());
let new_inlined_entry_id = vm.next_id();
// change current call insts to a branch
......@@ -191,7 +194,7 @@ impl Inlining {
// deal with the inlined function
copy_inline_blocks(&mut new_blocks, cur_block.id(),
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
inlined_fv_guard.get_orig_ir().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
......@@ -223,7 +226,7 @@ impl Inlining {
let next_block = resume.normal_dest.target;
copy_inline_blocks(&mut new_blocks, next_block,
inlined_fv_guard.content.as_ref().unwrap(), new_inlined_entry_id,
inlined_fv_guard.get_orig_ir().unwrap(), new_inlined_entry_id,
vm);
copy_inline_context(f_context, &inlined_fv_guard.context);
},
......@@ -277,6 +280,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
};
for block in callee.blocks.values() {
let old_id = block.id();
let new_id = *block_map.get(&block.id()).unwrap();
let mut block = Block {
hdr: MuEntityHeader::named(new_id, format!("IB{}_for_{}", new_id, block.id())),
......@@ -284,6 +288,8 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
control_flow: ControlFlow::default()
};
trace!("starts copying instruction from {} to {}", old_id, new_id);
// check its last instruction
{
let block_content = block.content.as_mut().unwrap();
......@@ -292,6 +298,8 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
match last_inst.v {
TreeNode_::Instruction(inst) => {
trace!("last instruction: {}", inst);
let hdr = inst.hdr;
let value = inst.value;
let ops = inst.ops;
......@@ -310,6 +318,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
})
};
trace!("rewrite to: {}", branch);
block_content.body.push(TreeNode::new_boxed_inst(branch));
},
......@@ -322,6 +331,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
v: Instruction_::Branch1(fix_dest(dest))
};
trace!("rewrite to: {}", branch);
block_content.body.push(TreeNode::new_boxed_inst(branch));
}
Instruction_::Branch2{cond, true_dest, false_dest, true_prob} => {
......@@ -337,6 +347,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
}
};
trace!("rewrite to: {}", branch2);
block_content.body.push(TreeNode::new_boxed_inst(branch2));
}
Instruction_::Call{data, resume} => {
......@@ -350,6 +361,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
}
};
trace!("rewrite to: {}", call);
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::CCall{data, resume} => {
......@@ -363,6 +375,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
}
};
trace!("rewrite to: {}", call);
block_content.body.push(TreeNode::new_boxed_inst(call));
}
Instruction_::Switch {cond, default, mut branches} => {
......@@ -377,6 +390,7 @@ fn copy_inline_blocks(caller: &mut Vec<Block>, ret_block: MuID, callee: &Functio
}
};
trace!("rewrite to: {}", switch);
block_content.body.push(TreeNode::new_boxed_inst(switch));
}
......
......@@ -1143,16 +1143,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
blocks: blocks,
};
let impl_fv = MuFunctionVersion {
hdr: hdr,
func_id: func_id,
sig: impl_sig,
orig_content: Some(ctn.clone()),
content: Some(ctn),
context: fcb.ctx,
block_trace: None,
force_inline: false
};
let impl_fv = MuFunctionVersion::new_(hdr, func_id, impl_sig, ctn, fcb.ctx);
trace!("Function version built {} {:?}", id, impl_fv);
......
......@@ -332,6 +332,24 @@ macro_rules! inst {
}
});
};
(($vm: expr, $fv: ident) $name: ident: EXPRCALL ($cc: expr, is_abort: $is_abort: expr) $func: ident ($($val: ident), +)) => {
let ops = vec![$func.clone(), $($val.clone()), *];
let ops_len = ops.len();
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
value: Some(vec![]),
ops: RwLock::new(ops),
v: Instruction_::ExprCall {
data: CallData {
func: 0,
args: (1..ops_len).collect(),
convention: $cc
},
is_abort: $is_abort
}
});
};
// RET
(($vm: expr, $fv: ident) $name: ident: RET ($($val: ident), +)) => {
......
......@@ -15,6 +15,22 @@ use std::sync::Arc;
use std::sync::RwLock;
use std::collections::HashMap;
fn get_number_of_moves(fv_id: MuID, vm: &VM) -> usize {
let cfs = vm.compiled_funcs().read().unwrap();
let cf = cfs.get(&fv_id).unwrap().read().unwrap();
let mut n_mov_insts = 0;
let mc = cf.mc();
for i in 0..mc.number_of_insts() {
if mc.is_move(i) {
n_mov_insts += 1;
}
}
n_mov_insts
}
#[test]
fn test_ir_liveness_fac() {
VM::start_logging_trace();
......@@ -97,7 +113,7 @@ fn test_spill1() {
let lib = libloading::Library::new(dylib.as_os_str()).unwrap();
unsafe {
let simple_spill : libloading::Symbol<unsafe extern fn() -> u64> = match lib.get(b"spill1") {
let spill1 : libloading::Symbol<unsafe extern fn() -> u64> = match lib.get(b"spill1") {
Ok(symbol) => symbol,
Err(e) => panic!("cannot find symbol spill1 in dylib: {:?}", e)
};
......@@ -588,19 +604,7 @@ fn test_coalesce_branch_moves() {
// check
let fv_id = func_ver.id();
let cfs = vm.compiled_funcs().read().unwrap();
let cf = cfs.get(&fv_id).unwrap().read().unwrap();
let mut n_mov_insts = 0;
let mc = cf.mc();
for i in 0..mc.number_of_insts() {
if mc.is_move(i) {
n_mov_insts += 1;
}
}
assert!(n_mov_insts == 1, "The function should not yield any mov instructions other than mov %rsp->%rbp (some possible coalescing failed)");
assert!(get_number_of_moves(fv_id, &vm) == 1, "The function should not yield any mov instructions other than mov %rsp->%rbp (some possible coalescing failed)");
}
}
......@@ -644,5 +648,202 @@ fn coalesce_branch_moves() -> VM {
blk_entry, blk1
});
vm
}
#[test]
#[cfg(target_arch = "x86_64")]
fn test_coalesce_args() {
VM::start_logging_trace();
let vm = Arc::new(coalesce_args());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("coalesce_args");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
// check
let fv_id = func_ver.id();
assert!(get_number_of_moves(fv_id, &vm) == 1, "The function should not yield any mov instructions other than mov %rsp->%rbp (some possible coalescing failed)");
}
}
fn coalesce_args() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
funcsig! ((vm) sig = (int64, int64, int64, int64) -> ());
funcdecl! ((vm) <sig> coalesce_args);
funcdef! ((vm) <sig> coalesce_args VERSION coalesce_args_v1);
typedef! ((vm) funcref_to_sig = mu_funcref(sig));
constdef! ((vm) <funcref_to_sig> funcref = Constant::FuncRef(coalesce_args));
// blk entry
block! ((vm, coalesce_args_v1) blk_entry);
ssa! ((vm, coalesce_args_v1) <int64> arg0);
ssa! ((vm, coalesce_args_v1) <int64> arg1);
ssa! ((vm, coalesce_args_v1) <int64> arg2);
ssa! ((vm, coalesce_args_v1) <int64> arg3);
consta! ((vm, coalesce_args_v1) funcref_local = funcref);
inst! ((vm, coalesce_args_v1) blk_entry_call:
EXPRCALL (CallConvention::Mu, is_abort: false) funcref_local (arg0, arg1, arg2, arg3)
);
inst! ((vm, coalesce_args_v1) blk_entry_ret:
RET
);
define_block! ((vm, coalesce_args_v1) blk_entry(arg0, arg1, arg2, arg3) {blk_entry_call, blk_entry_ret});
define_func_ver!((vm) coalesce_args_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
#[cfg(target_arch = "x86_64")]
fn test_coalesce_branch2_moves() {
VM::start_logging_trace();
let vm = Arc::new(coalesce_branch2_moves());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("coalesce_branch2_moves");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
// check
let fv_id = func_ver.id();
assert!(get_number_of_moves(fv_id, &vm) <= 3, "too many moves (some possible coalescing failed)");
}
backend::emit_context(&vm);
let dylib = aot::link_dylib(vec![Mu("coalesce_branch2_moves")], "libcoalesce_branch2_moves.dylib", &vm);
let lib = libloading::Library::new(dylib.as_os_str()).unwrap();
unsafe {
let coalesce_branch2_moves : libloading::Symbol<unsafe extern fn(u64, u64, u64, u64, u64, u64) -> u64> = match lib.get(b"coalesce_branch2_moves") {
Ok(symbol) => symbol,
Err(e) => panic!("cannot find symbol coalesce_branch2_moves in dylib: {:?}", e)
};
let res = coalesce_branch2_moves(1, 1, 10, 10, 0, 0);
println!("if 0 == 0 then return 1 + 1 else return 10 + 10");
println!("coalesce_branch2_moves(1, 1, 10, 10, 0, 0) = {}", res);
assert!(res == 2);
let res = coalesce_branch2_moves(1, 1, 10, 10, 1, 0);
println!("if 1 == 0 then return 1 + 1 else return 10 + 10");
println!("coalesce_branch2_moves(1, 1, 10, 10, 1, 0) = {}", res);
assert!(res == 20);
}
}
fn coalesce_branch2_moves() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) int1 = mu_int(1));
funcsig! ((vm) sig = (int64, int64, int64, int64) -> ());
funcdecl!((vm) <sig> coalesce_branch2_moves);
funcdef! ((vm) <sig> coalesce_branch2_moves VERSION coalesce_branch2_moves_v1);
// blk entry
block! ((vm, coalesce_branch2_moves_v1) blk_entry);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> arg0);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> arg1);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> arg2);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> arg3);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> arg4);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> arg5);
block! ((vm, coalesce_branch2_moves_v1) blk1);
ssa! ((vm, coalesce_branch2_moves_v1) <int1> cond);
inst! ((vm, coalesce_branch2_moves_v1) blk_entry_cmp:
cond = CMPOP (CmpOp::EQ) arg4 arg5
);
block! ((vm, coalesce_branch2_moves_v1) blk_add01);
block! ((vm, coalesce_branch2_moves_v1) blk_add23);
block! ((vm, coalesce_branch2_moves_v1) blk_ret);
inst! ((vm, coalesce_branch2_moves_v1) blk_entry_branch2:
BRANCH2 (cond, arg0, arg1, arg2, arg3)
IF (OP 0)
THEN blk_add01 (vec![1, 2]) WITH 0.6f32,
ELSE blk_add23 (vec![3, 4])
);
define_block!((vm, coalesce_branch2_moves_v1) blk_entry (arg0, arg1, arg2, arg3, arg4, arg5) {
blk_entry_cmp, blk_entry_branch2
});
// blk_add01
ssa! ((vm, coalesce_branch2_moves_v1) <int64> blk_add01_arg0);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> blk_add01_arg1);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> res01);
inst! ((vm, coalesce_branch2_moves_v1) blk_add01_add:
res01 = BINOP (BinOp::Add) blk_add01_arg0 blk_add01_arg1
);
inst! ((vm, coalesce_branch2_moves_v1) blk_add01_branch:
BRANCH blk_ret (res01)
);
define_block!((vm, coalesce_branch2_moves_v1) blk_add01 (blk_add01_arg0, blk_add01_arg1) {
blk_add01_add, blk_add01_branch
});
// blk_add23
ssa! ((vm, coalesce_branch2_moves_v1) <int64> blk_add23_arg2);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> blk_add23_arg3);
ssa! ((vm, coalesce_branch2_moves_v1) <int64> res23);
inst! ((vm, coalesce_branch2_moves_v1) blk_add23_add:
res23 = BINOP (BinOp::Add) blk_add23_arg2 blk_add23_arg3
);
inst! ((vm, coalesce_branch2_moves_v1) blk_add23_branch:
BRANCH blk_ret (res23)
);
define_block!((vm, coalesce_branch2_moves_v1) blk_add23 (blk_add23_arg2, blk_add23_arg3) {
blk_add23_add, blk_add23_branch
});
// blk_ret
ssa! ((vm, coalesce_branch2_moves_v1) <int64> res);
inst! ((vm, coalesce_branch2_moves_v1) blk_ret_ret:
RET (res)
);
define_block!((vm, coalesce_branch2_moves_v1) blk_ret (res) {
blk_ret_ret
});
define_func_ver!((vm) coalesce_branch2_moves_v1 (entry: blk_entry){
blk_entry, blk_add01, blk_add23, blk_ret
});
vm
}
\ No newline at end of file
Markdown is supported
0% or