Commit 2c08ff8c authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Merge branch 'develop' of gitlab.anu.edu.au:mu/mu-impl-fast into aarch64-fixes

parents b844bfc3 ff42dd1e
Pipeline #1186 failed with stages
in 55 minutes and 18 seconds
......@@ -152,12 +152,12 @@ mubench:
- virtualenv -p python3 mubench_venv
- source mubench_venv/bin/activate
- pip install -Ue ./mu-perf-benchmarks
- mkdir example
- mubench local ./mu-perf-benchmarks/example/mu-impl-fast.yml --dump /home/gitlab-runner/results/$(git log -1 --pretty="%h_%at") --pipeline ""
- mkdir ci
- mubench local ./mu-perf-benchmarks/ci/*.yml --dump /home/gitlab-runner/results/$(git log -1 --pretty="%h_%at") --pipeline ""
- rsync -a /home/gitlab-runner/results/* squirrel:~/mu-impl-fast/angus
rustfmt:
stage: rustfmt
script:
- cargo-fmt -- --write-mode=diff --verbose -- src/ast/src/lib.rs src/gc/src/lib.rs src/utils/src/lib.rs
- cargo-fmt -- --write-mode=diff --verbose -- src/lib.rs src/ast/src/lib.rs src/gc/src/lib.rs src/utils/src/lib.rs
......@@ -536,6 +536,14 @@ impl ASMCode {
use std::any::Any;
impl MachineCode for ASMCode {
fn is_nop(&self, index: usize) -> bool {
let ref inst = self.code[index];
if inst.code == "" || inst.code == "NOP" {
true
} else {
false
}
}
fn as_any(&self) -> &Any {
self
}
......
......@@ -851,6 +851,16 @@ impl MachineCode for ASMCode {
self.code[index].code.clear();
}
/// is the specified index is a nop?
fn is_nop(&self, index: usize) -> bool {
let ref inst = self.code[index];
if inst.code == "" || inst.code == "nop" {
true
} else {
false
}
}
/// remove unnecessary push/pop if the callee saved register is not used
/// returns what registers push/pop have been deleted, and the number of callee saved registers
/// that weren't deleted
......@@ -1598,8 +1608,8 @@ impl ASMCodeGen {
self.cur.take().unwrap()
}
/// emits an instruction (use 1 reg, define none)
fn internal_uniop_def_r(&mut self, inst: &str, op: &P<Value>) {
/// emits an instruction (use 0 reg, define 1)
fn internal_uniop_def_nouse_r(&mut self, inst: &str, op: &P<Value>) {
trace!("emit: {} {}", inst, op);
let (reg, id, loc) = self.prepare_reg(op, inst.len() + 1);
......@@ -1616,6 +1626,26 @@ impl ASMCodeGen {
)
}
/// emits an instruction (use 1 reg, define 1 reg)
fn internal_uniop_def_r(&mut self, inst: &str, op: &P<Value>) {
trace!("emit: {} {}", inst, op);
let (reg, id, loc) = self.prepare_reg(op, inst.len() + 1);
let asm = format!("{} {}", inst, reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc.clone()]
},
linked_hashmap!{
id => vec![loc]
},
false
)
}
/// emits an instruction (use 2 regs, define none)
fn internal_binop_no_def_r_r(&mut self, inst: &str, op1: &P<Value>, op2: &P<Value>) {
let len = check_op_len(op1);
......@@ -2598,46 +2628,46 @@ impl CodeGenerator for ASMCodeGen {
// set byte
fn emit_sets_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("sets", dest)
self.internal_uniop_def_nouse_r("sets", dest)
}
fn emit_setz_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("setz", dest)
self.internal_uniop_def_nouse_r("setz", dest)
}
fn emit_seto_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("seto", dest)
self.internal_uniop_def_nouse_r("seto", dest)
}
fn emit_setb_r8(&mut self, dest: Reg) {
self.internal_uniop_def_r("setb", dest)
self.internal_uniop_def_nouse_r("setb", dest)
}
fn emit_seta_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("seta", dest)
self.internal_uniop_def_nouse_r("seta", dest)
}
fn emit_setae_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setae", dest)
self.internal_uniop_def_nouse_r("setae", dest)
}
fn emit_setb_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setb", dest)
self.internal_uniop_def_nouse_r("setb", dest)
}
fn emit_setbe_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setbe", dest)
self.internal_uniop_def_nouse_r("setbe", dest)
}
fn emit_sete_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("sete", dest)
self.internal_uniop_def_nouse_r("sete", dest)
}
fn emit_setg_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setg", dest)
self.internal_uniop_def_nouse_r("setg", dest)
}
fn emit_setge_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setge", dest)
self.internal_uniop_def_nouse_r("setge", dest)
}
fn emit_setl_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setl", dest)
self.internal_uniop_def_nouse_r("setl", dest)
}
fn emit_setle_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setle", dest)
self.internal_uniop_def_nouse_r("setle", dest)
}
fn emit_setne_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("setne", dest)
self.internal_uniop_def_nouse_r("setne", dest)
}
// cmov src -> dest
......@@ -2814,6 +2844,20 @@ impl CodeGenerator for ASMCodeGen {
self.internal_binop_def_r_imm("sbb", dest, src)
}
// inc and dec
fn emit_inc_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("inc", dest)
}
fn emit_inc_mem(&mut self, dest: Mem) {
unimplemented!()
}
fn emit_dec_r(&mut self, dest: Reg) {
self.internal_uniop_def_r("dec", dest)
}
fn emit_dec_mem(&mut self, dest: Mem) {
unimplemented!()
}
fn emit_mul_r(&mut self, src: &P<Value>) {
let len = check_op_len(src);
......@@ -3516,6 +3560,14 @@ impl CodeGenerator for ASMCodeGen {
self.internal_fp_binop_no_def_r_r("ucomiss", op1, op2);
}
// bitwise - float
fn emit_xorps_f32_f32(&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("xorps", &dest, &src)
}
fn emit_xorpd_f64_f64(&mut self, dest: Reg, src: Reg) {
self.internal_fp_binop_def_r_r("xorpd", &dest, &src)
}
// add - double
fn emit_addsd_f64_f64(&mut self, dest: &P<Value>, src: &P<Value>) {
......
......@@ -178,6 +178,12 @@ pub trait CodeGenerator {
fn emit_sbb_r_mem(&mut self, dest: Reg, src: Mem);
fn emit_sbb_r_imm(&mut self, dest: Reg, src: i32);
// inc and dec
fn emit_inc_r(&mut self, dest: Reg);
fn emit_inc_mem(&mut self, dest: Mem);
fn emit_dec_r(&mut self, dest: Reg);
fn emit_dec_mem(&mut self, dest: Mem);
// multiply
fn emit_mul_r(&mut self, src: Reg);
fn emit_mul_mem(&mut self, src: Mem);
......@@ -320,6 +326,10 @@ pub trait CodeGenerator {
fn emit_comiss_f32_f32(&mut self, op1: Reg, op2: Reg);
fn emit_ucomiss_f32_f32(&mut self, op1: Reg, op2: Reg);
// fp bitwise
fn emit_xorps_f32_f32(&mut self, dest: Reg, src: Reg);
fn emit_xorpd_f64_f64(&mut self, dest: Reg, src: Reg);
// fp conversion
fn emit_cvtsi2sd_f64_r(&mut self, dest: Reg, src: Reg);
fn emit_cvtsd2si_r_f64(&mut self, dest: Reg, src: Reg);
......@@ -332,7 +342,6 @@ pub trait CodeGenerator {
fn emit_cvtss2sd_f64_f32(&mut self, dest: Reg, src: Reg);
// used for unsigned int to fp conversion
fn emit_cvttsd2si_r_f64(&mut self, dest: Reg, src: Reg);
fn emit_cvttss2si_r_f32(&mut self, dest: Reg, src: Reg);
......
......@@ -37,10 +37,19 @@ impl CompilerPass for PeepholeOptimization {
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut cf = compiled_funcs.get(&func.id()).unwrap().write().unwrap();
// remove redundant move first
for i in 0..cf.mc().number_of_insts() {
cf.mc().trace_inst(i);
// if two sides of a move instruction are the same,
// it is redundant, and can be eliminated
trace!("trying to remove redundant move");
self.remove_redundant_move(i, &mut cf);
}
// then remove jumps (because removing movs will affect this)
for i in 0..cf.mc().number_of_insts() {
cf.mc().trace_inst(i);
// if a branch jumps a label that contains another jump, such as
// ..
......@@ -53,9 +62,11 @@ impl CompilerPass for PeepholeOptimization {
// the order matters: we need to run this first, then remove_unnecessary_jump()
// as this will give us more chances to remove unnecessary jumps
trace!("trying to remove jump-to-jump");
self.remove_jump_to_jump(i, &mut cf);
// if a branch targets a block that immediately follow it, it can be eliminated
trace!("trying to remove unnecessary jmp");
self.remove_unnecessary_jump(i, &mut cf);
}
......@@ -74,8 +85,6 @@ impl PeepholeOptimization {
fn remove_redundant_move(&mut self, inst: usize, cf: &mut CompiledFunction) {
// if this instruction is a move, and move from register to register (no memory operands)
if cf.mc().is_move(inst) && !cf.mc().is_using_mem_op(inst) {
cf.mc().trace_inst(inst);
// get source reg/temp ID
let src: MuID = {
let uses = cf.mc().get_inst_reg_uses(inst);
......@@ -166,34 +175,48 @@ impl PeepholeOptimization {
let opt_dest = mc.is_jmp(cur_inst);
match opt_dest {
Some(ref dest) => {
trace!("current instruction {} jumps to {}", cur_inst, dest);
// if we have already visited this instruction
// this means we met an infinite loop, we need to break
if visited_labels.contains(dest) {
warn!("met an infinite loop in removing jump-to-jump");
warn!("we are not optimizing this case");
return;
} else {
visited_labels.insert(dest.clone());
debug!("visited {}", dest);
}
// get the block for destination
let first_inst = mc.get_block_range(dest).unwrap().start;
debug_assert!(
mc.is_label(first_inst).is_none(),
"expect start inst {} of \
block {} is a inst instead of label",
let first_inst = {
let start = mc.get_block_range(dest).unwrap().start;
let last = mc.number_of_insts();
let mut first = start;
for i in start..last {
if mc.is_label(i).is_some() || mc.is_nop(i) {
continue;
} else {
first = i;
break;
}
}
first
};
trace!(
"examining first valid inst {} from block {}",
first_inst,
dest
);
trace!("examining first inst {} of block {}", first_inst, dest);
// if first instruction is jump
match mc.is_jmp(first_inst) {
Some(ref dest2) => {
// its a jump-to-jump case
cur_inst = first_inst;
last_dest = Some(dest2.clone());
visited_labels.insert(dest2.clone());
debug!("visited {}", dest2);
}
None => break
}
......
......@@ -173,6 +173,8 @@ pub trait MachineCode {
fn is_move(&self, index: usize) -> bool;
/// is the specified index using memory operands?
fn is_using_mem_op(&self, index: usize) -> bool;
/// is the specified index is a nop?
fn is_nop(&self, index: usize) -> bool;
/// is the specified index a jump instruction? (unconditional jump)
/// returns an Option for target block
fn is_jmp(&self, index: usize) -> Option<MuName>;
......
......@@ -309,10 +309,11 @@ fn branch_adjustment(func: &mut MuFunctionVersion, vm: &VM) {
let next_block_in_trace: Option<usize> = {
if let Some(index) = trace.iter().position(|x| x == blk_id) {
if index == trace.len() {
if index >= trace.len() - 1 {
// we do not have next block in the trace
None
} else {
Some(index + 1)
Some(trace[index + 1])
}
} else {
warn!("find an unreachable block (a block exists in IR, but is not in trace");
......@@ -344,6 +345,14 @@ fn branch_adjustment(func: &mut MuFunctionVersion, vm: &VM) {
let true_label = true_dest.target;
let false_label = false_dest.target;
trace_if!(LOG_TRACE_SCHEDULE, "true_label = {}", true_label);
trace_if!(LOG_TRACE_SCHEDULE, "false_label = {}", false_label);
trace_if!(
LOG_TRACE_SCHEDULE,
"next_block_in_trace = {:?}",
next_block_in_trace
);
if next_block_in_trace.is_some() &&
next_block_in_trace.unwrap() == false_label
{
......@@ -500,7 +509,9 @@ fn branch_adjustment(func: &mut MuFunctionVersion, vm: &VM) {
new_body.push(new_cond_branch);
// add new false block to trace (immediate after this block)
if let Some(next_block_index) = next_block_in_trace {
if let Some(next_block) = next_block_in_trace {
let next_block_index =
trace.iter().position(|x| *x == next_block).unwrap();
trace.insert(next_block_index, new_false_block.id());
} else {
trace.push(new_false_block.id());
......
......@@ -267,7 +267,6 @@ pub fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> Obje
/// allocates an object in the immix space
// size doesn't include HEADER_SIZE
#[no_mangle]
#[inline(never)]
pub extern "C" fn muentry_alloc_fast(
mutator: *mut ImmixMutatorLocal,
size: usize,
......@@ -332,6 +331,21 @@ pub extern "C" fn muentry_alloc_large(
unsafe { ret.to_object_reference() }
}
#[no_mangle]
// size doesn't include HEADER_SIZE
pub extern "C" fn muentry_alloc_any(
mutator: *mut ImmixMutatorLocal,
size: usize,
align: usize
) -> ObjectReference {
let actual_size = size + OBJECT_HEADER_SIZE;
if actual_size <= LARGE_OBJECT_THRESHOLD {
muentry_alloc_fast(mutator, size, align)
} else {
muentry_alloc_large(mutator, size, align)
}
}
/// initializes a fix-sized object
#[no_mangle]
#[inline(never)]
......
......@@ -60,7 +60,7 @@ pub fn link_primordial(funcs: Vec<MuName>, out: &str, vm: &VM) -> PathBuf {
ret.push(dest);
// include mu static lib
if vm.vm_options.flag_link_statically {
if vm.vm_options.flag_aot_link_static {
ret.push(get_path_under_zebu(if cfg!(debug_assertions) {
"target/debug/libmu.a"
} else {
......@@ -75,7 +75,7 @@ pub fn link_primordial(funcs: Vec<MuName>, out: &str, vm: &VM) -> PathBuf {
out_path.push(out);
link_executable_internal(
!vm.vm_options.flag_link_statically,
!vm.vm_options.flag_aot_link_static,
files,
&vm.vm_options.flag_bootimage_external_lib,
&vm.vm_options.flag_bootimage_external_libpath,
......@@ -120,7 +120,7 @@ pub fn link_test_primordial(funcs: Vec<MuName>, out: &str, vm: &VM) -> PathBuf {
ret.push(dest);
// include mu static lib
if vm.vm_options.flag_link_statically {
if vm.vm_options.flag_aot_link_static {
ret.push(get_path_under_zebu(if cfg!(debug_assertions) {
"target/debug/libmu.a"
} else {
......@@ -135,7 +135,7 @@ pub fn link_test_primordial(funcs: Vec<MuName>, out: &str, vm: &VM) -> PathBuf {
out_path.push(out);
link_executable_internal(
!vm.vm_options.flag_link_statically,
!vm.vm_options.flag_aot_link_static,
files,
&vm.vm_options.flag_bootimage_external_lib,
&vm.vm_options.flag_bootimage_external_libpath,
......
......@@ -96,6 +96,10 @@ lazy_static! {
"muentry_alloc_large",
vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
pub static ref ALLOC_ANY : RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_alloc_any",
vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()],
vec![ADDRESS_TYPE.clone()]);
pub static ref INIT_OBJ : RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_init_object",
vec![ADDRESS_TYPE.clone(), ADDRESS_TYPE.clone(), UINT64_TYPE.clone()],
......
......@@ -1287,7 +1287,7 @@ struct BundleLoader<'lb, 'lvm> {
built_constint_of: HashMap<u64, P<Value>>,
current_sig: Option<P<MuFuncSig>>,
current_entry: MuID,
current_entry: MuID
}
fn load_bundle(b: &mut MuIRBuilder) {
......@@ -1328,7 +1328,7 @@ fn load_bundle(b: &mut MuIRBuilder) {
built_strong_variant: Default::default(),
built_constint_of: Default::default(),
current_sig: Default::default(),
current_entry: Default::default(),
current_entry: Default::default()
};
bl.load_bundle();
......@@ -2748,10 +2748,9 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
impl_to_ty.is_double())
}
ConvOp::REFCAST => {
(impl_from_ty.is_ref() || impl_from_ty.is_iref() ||
impl_from_ty.is_funcref()) &&
(impl_to_ty.is_ref() || impl_to_ty.is_iref() ||
impl_to_ty.is_funcref())
(impl_from_ty.is_ref() && impl_to_ty.is_ref()) ||
(impl_from_ty.is_iref() && impl_to_ty.is_iref()) ||
(impl_from_ty.is_funcref() && impl_to_ty.is_funcref())
}
ConvOp::PTRCAST => {
(impl_from_ty.is_ptr() || impl_from_ty.is_int()) &&
......
......@@ -33,12 +33,13 @@ VM:
Compiler:
--disable-inline disable compiler function inlining
--disable-regalloc-validate disable register allocation validation
--disable-ir-validate disable IR validation
--emit-debug-info emit debugging information
--dont-validate-ir don't validate for invalid IR
AOT Compiler:
--aot-emit-dir=<dir> the emit directory for ahead-of-time compiling
[default: emit]
--link-statically link boot image to libmu statically (defaults to dynamic)
--aot-link-static link boot image to libmu statically (defaults to dynamic)
--bootimage-external-lib=<lib> ... library that will be linked against when making bootimage
[default: ]
--bootimage-external-libpath=<path> ... path for the libraries during bootimage generation
......@@ -62,11 +63,12 @@ pub struct VMOptions {
// Compiler
pub flag_disable_inline: bool,
pub flag_disable_regalloc_validate: bool,
pub flag_disable_ir_validate: bool,
pub flag_emit_debug_info: bool,
pub flag_dont_validate_ir: bool,
// AOT compiler
pub flag_aot_emit_dir: String,
pub flag_link_statically: bool,
pub flag_aot_link_static: bool,
pub flag_bootimage_external_lib: Vec<String>,
pub flag_bootimage_external_libpath: Vec<String>,
......@@ -88,9 +90,9 @@ rodal_struct!(VMOptions {
flag_log_level,
flag_disable_inline,
flag_disable_regalloc_validate,
flag_disable_ir_validate,
flag_emit_debug_info,
flag_dont_validate_ir,
flag_link_statically,
flag_aot_link_static,
flag_gc_disable_collection
});
......@@ -155,13 +157,13 @@ impl VMOptions {
}
if cfg!(target_os = "macos") {
if !ret.flag_link_statically {
if !ret.flag_aot_link_static {
warn!("link-statically is forced to true (opposite to user setting)");
ret.flag_link_statically = true;
ret.flag_aot_link_static = true;
}
}
unsafe{super::api::VALIDATE_IR = !ret.flag_dont_validate_ir};
unsafe { super::api::VALIDATE_IR = !ret.flag_disable_ir_validate };
ret
}
}
......
......@@ -930,7 +930,6 @@ macro_rules! emit_test {
);
consta! (($vm, $tester_name) int64_pass_local = int64_pass);
consta! (($vm, $tester_name) int64_fail_local = int64_fail);
inst! (($vm, $tester_name) blk_entry_inst_ret:
SET_RETVAL int64_pass_local
......
......@@ -26,7 +26,6 @@ use mu::utils::LinkedHashMap;
use std::sync::Arc;
use self::mu::linkutils::aot;
use self::mu::runtime::thread::check_result;
use self::mu::compiler::*;
use std::u64;
......@@ -650,3 +649,125 @@ fn add_int64_nzc() -> VM {
vm
}
#[test]
fn test_nest_mul_simple() {
VM::start_logging_trace();
let lib = linkutils::aot::compile_fnc("nest_mul_simple", &nest_mul_simple);
unsafe {
let nest_mul_simple: libloading::Symbol<unsafe extern "C" fn(u64, u64, u64) -> u64> =
lib.get(b"nest_mul_simple").unwrap();
let res = nest_mul_simple(2, 3, 4);
println!("mul(2, 3, 4) = {}", res);
assert_eq!(res, 24);
}
}
fn nest_mul_simple() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
funcsig! ((vm) sig = (int64, int64, int64) -> (int64));
funcdecl! ((vm) <sig> nest_mul_simple);
funcdef! ((vm) <sig> nest_mul_simple VERSION nest_mul_simple_v1);
// %entry(%x, %y, %z)
block! ((vm, nest_mul_simple_v1) blk_entry);
ssa! ((vm, nest_mul_simple_v1) <int64> x);
ssa! ((vm, nest_mul_simple_v1) <int64> y);
ssa! ((vm, nest_mul_simple_v1) <int64> z);
// %a = MUL %x %y
ssa! ((vm, nest_mul_simple_v1) <int64> a);
inst! ((vm, nest_mul_simple_v1) blk_entry_mul1:
a = BINOP (BinOp::Mul) x y
);
// %b = MUL %a %z
ssa! ((vm, nest_mul_simple_v1) <int64> b);
inst! ((vm, nest_mul_simple_v1) blk_entry_mul2:
b = BINOP (BinOp::Mul) a z
);
// RET b
inst! ((vm, nest_mul_simple_v1) blk_entry_ret:
RET (b)
);
define_block!((vm, nest_mul_simple_v1) blk_entry(x, y, z) {
blk_entry_mul1,
blk_entry_mul2,
blk_entry_ret
});
define_func_ver!((vm) nest_mul_simple_v1(entry: blk_entry) {
blk_entry
});
vm
}
#[test]
fn test_nest_mul_times_10() {
VM::start_logging_trace();
let lib = linkutils::aot::compile_fnc("nest_mul_times_10", &nest_mul_times_10);
unsafe {
let nest_mul: libloading::Symbol<unsafe extern "C" fn(u64, u64) -> u64> =
lib.get(b"nest_mul_times_10").unwrap();
let res = nest_mul(2, 3);
println!("mul(2, 3) x 10 = {}", res);
assert_eq!(res, 60);
}
}
fn nest_mul_times_10() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
constdef! ((vm) <int64> int64_10 = Constant::Int(10));
funcsig! ((vm) sig = (int64, int64) -> (int64));
funcdecl! ((vm) <sig> nest_mul_times_10);
funcdef! ((vm) <sig> nest_mul_times_10 VERSION nest_mul_times_10_v1);
// %entry(%x, %y)
block! ((vm, nest_mul_times_10_v1) blk_entry);
ssa! ((vm, nest_mul_times_10_v1) <int64> x);
ssa! ((vm, nest_mul_times_10_v1) <int64> y);
consta! ((vm, nest_mul_times_10_v1) int64_10_local = int64_10);
// %a = MUL %x %y
ssa! ((vm, nest_mul_times_10_v1) <int64> a);
inst! ((vm, nest_mul_times_10_v1) blk_entry_mul1:
a = BINOP (BinOp::Mul) x y
);
// %b = MUL 10 %a
ssa! ((vm, nest_mul_times_10_v1) <int64> b);
inst! ((vm, nest_mul_times_10_v1) blk_entry_mul2:
b = BINOP (BinOp::Mul) int64_10_local a
);
// RET b
inst! ((vm, nest_mul_times_10_v1) blk_entry_ret:
RET (b)
);
define_block!((vm, nest_mul_times_10_v1) blk_entry(x, y) {
blk_entry_mul1,
blk_entry_mul2,
blk_entry_ret
});
define_func_ver!((vm) nest_mul_times_10_v1(entry: blk_entry) {
blk_entry
});
vm
}
......@@ -24,7 +24,6 @@ use mu::utils::LinkedHashMap;
use std::sync::Arc;
use mu::linkutils::aot;
use mu::runtime::thread::check_result;
use mu::compiler::*;
#[test]
......@@ -1155,3 +1154,119 @@ fn branch2_high_prob_branch_cannot_fallthrough() -> VM {
vm
}
#[test]
fn test_branch_adjust_follow_by_neither() {
VM::start_logging_trace();
linkutils::aot::compile_fnc(
"branch_adjust_follow_by_neither",
&branch_adjust_follow_by_neither
);
}
fn branch_adjust_follow_by_neither() -> VM {