To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit f31749c9 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Fixed all errors that were found when running the python tests.

Also modified some of the rust tests so that they are applicable to aarch64.
parent c313f4d7
......@@ -667,7 +667,7 @@ impl fmt::Display for TreeNode {
match self.v {
TreeNode_::Value(ref pv) => pv.fmt(f),
TreeNode_::Instruction(ref inst) => {
write!(f, "{}", inst)
write!(f, "({})", inst)
}
}
}
......@@ -763,7 +763,8 @@ impl Value {
}
}
const DISPLAY_TYPE : bool = true;
const DISPLAY_ID : bool = true;
const DISPLAY_TYPE : bool = false;
const PRINT_ABBREVIATE_NAME: bool = true;
impl fmt::Debug for Value {
......@@ -786,7 +787,7 @@ impl fmt::Display for Value {
write!(f, "{}(@{})", ty, self.hdr)
},
Value_::Memory(ref mem) => {
write!(f, "{}(%{})", mem, self.hdr)
write!(f, "%{}{})", self.hdr, mem)
}
}
} else {
......@@ -801,7 +802,7 @@ impl fmt::Display for Value {
write!(f, "@{}", self.hdr)
},
Value_::Memory(ref mem) => {
write!(f, "{}(%{})", mem, self.hdr)
write!(f, "%{}{}", self.hdr, mem)
}
}
}
......@@ -1170,12 +1171,11 @@ impl PartialEq for MuEntityHeader {
}
}
const DISPLAY_ID : bool = false;
impl fmt::Display for MuEntityHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if DISPLAY_ID {
if self.name().is_none() {
write!(f, "UNAMED #{}", self.id)
write!(f, "{}", self.id)
} else {
if PRINT_ABBREVIATE_NAME {
write!(f, "{} #{}", self.abbreviate_name().unwrap(), self.id)
......
......@@ -43,87 +43,48 @@ pub trait CodeGenerator {
fn emit_ldr_callee_saved(&mut self, dest: Reg, src: Mem);
fn emit_str_callee_saved(&mut self, dest: Mem, src: Reg);
/* Bellow ar all ARMv8-A Aarch64 instruction menmonics (with all operand modes) except:
PRFM, PRFUM, CRC32*
All advanced SIMD instructions (except MOVI)
/* DON'T IMPLEMENT
SIMD instructions (unless they operate soley on GPRS or Dn, and Sn registers)
TODO: (maybye)
Other floating point instructions (that also operate on vectors)
(i've implemented all ones labeled as 'scalar')
(Other than those, all aarch64 instructions are implemented bellow)
(WAIT there are some strange loads and stores I may have missed)
(note the memory addreses shouyld be for a 64-bit access)e
PRFM (same adresesing moads as a normal load)
PRFM (<prfop>|#<imm5>), [<Xn|SP>{, #<pimm>}]
PRFM (<prfop>|#<imm5>), <label>
PRFM (<prfop>|#<imm5>), [<Xn|SP>, (<Wm>|<Xm>){, <extend> {<amount>}}]
PRFUM (<prfop>|#<imm5>), [<Xn|SP>{, #<simm>}]
prfop is a string (or an imm5)
PRFM PRFM_imm
TODO:
LSLV, ASRV, LSRV ??
Cryptograhpy instructions??
NOTE:
with loads and stores the menmonic indicated may be given a suffix indicating the size and signenedness of the access
also b_cond's menmononic is 'B.cond'
also b_cond's menmononic is 'B.cond' (where cond is the value of the 'cond' parameter)
all other instructions have the menmonic being the first word of the function name after emit_
(subsequent words are used to disambiguate different overloads)
NOTE unless otherwise indicated:
An instruction that dosn't start with an F operates on GPRS, those that start with an F operate on FPRs.
All instructions operate on 32-bit and 64-bit registers (but all register arguments must be the same size)
Also all arguments that may take the SP can't take the ZR (and vice versa)
*/
// loads
fn emit_ldr(&mut self, dest: Reg, src: Mem, signed: bool);
//LDTR <Xt>, [<Xn|SP>{, #<simm>}]
fn emit_ldtr(&mut self, dest: Reg, src: Mem, signed: bool);
//LDUR <Xt>, [<Xn|SP>{, #<simm>}]
fn emit_ldur(&mut self, dest: Reg, src: Mem, signed: bool);
//LDXR <Xt>, [<Xn|SP>{,#0}]
fn emit_ldxr(&mut self, dest: Reg, src: Mem);
//LDAXR <Xt>, [<Xn|SP>{,#0}]
fn emit_ldaxr(&mut self, dest: Reg, src: Mem);
//LDAR <Xt>, [<Xn|SP>{,#0}]
fn emit_ldar(&mut self, dest: Reg, src: Mem);
// Load pair
//LDP <Xt1>, <Xt2>, [<Xn|SP>{, #simm7}]
//LDXP <Xt1>, <Xt2>, [<Xn|SP>{,#0}]
//LDAXP <Xt1>, <Xt2>, [<Xn|SP>{,#0}]
//LDNP <Xt1>, <Xt2>, [<Xn|SP>{, #simm7}]
fn emit_ldp(&mut self, dest1: Mem, dest2: Reg, src: Mem);
fn emit_ldxp(&mut self, dest1: Mem, dest2: Reg, src: Mem);
fn emit_ldaxp(&mut self, dest1: Mem, dest2: Reg, src: Mem);
fn emit_ldnp(&mut self, dest1: Mem, dest2: Reg, src: Mem);
fn emit_ldr(&mut self, dest: Reg/*GPR or FPR*/, src: Mem, signed: bool); // supports the full full range of addressing modes
fn emit_ldtr(&mut self, dest: Reg, src: Mem, signed: bool); // [base, #simm9]
fn emit_ldur(&mut self, dest: Reg/*GPR or FPR*/, src: Mem, signed: bool); // [base, #simm9]
fn emit_ldxr(&mut self, dest: Reg, src: Mem);// [base]
fn emit_ldaxr(&mut self, dest: Reg, src: Mem);// [base]
fn emit_ldar(&mut self, dest: Reg, src: Mem);// [base]
fn emit_ldp(&mut self, dest1: Reg, dest2: Reg/*GPR or FPR*/, src: Mem); // [base, #simm7], [base], #simm7, [base, #simm7]!
fn emit_ldxp(&mut self, dest1: Reg, dest2: Reg, src: Mem); // [base]
fn emit_ldaxp(&mut self, dest1: Reg, dest2: Reg, src: Mem); // [base]
fn emit_ldnp(&mut self, dest1: Reg/*GPR or FPR*/, dest2: Reg/*GPR or FPR*/, src: Mem); // [base, #simm7]
// Stores
// TODO: Modify STXP, STLXP, STXR and STLXR
// WARNING: LDR and STR have a wider range of valid addresing modes than the other loads and stores (specifically atomics are base only)
// Note: for consistency the destination argument is placed first
// even though the output assembly code will have the src argument first
fn emit_str(&mut self, dest: Mem, src: Reg);
//STTR <Xt>, [<Xn|SP>{, #<simm>}]
//STUR <Xt>, [<Xn|SP>{, #<simm>}]
//STLR <Xt>, [<Xn|SP>{,#0}]
//STXR Ws, Rt, [Xn|SP]
//STLXR <Ws>, <Xt>, [<Xn|SP>{,#0}]
fn emit_sttr(&mut self, dest: Mem, src: Reg);
fn emit_stur(&mut self, dest: Mem, src: Reg);
fn emit_stlr(&mut self, dest: Mem, src: Reg);
fn emit_stxr(&mut self, dest: Mem, status: Reg, src: Reg);
fn emit_stlxr(&mut self, dest: Mem, status: Reg, src: Reg);
// Store Pairs
// STP <Xt1>, <Xt2>, [<Xn|SP>{, #simm7}]
// STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]
// STLXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]
// STNP <Xt1>, <Xt2>, [<Xn|SP>{, #simm7}]
fn emit_stp(&mut self, dest: Mem, src1: Reg, src2: Reg);
fn emit_stxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg);
fn emit_stlxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg);
fn emit_stnp(&mut self, dest: Mem, src1: Reg, src2: Reg);
// Stores
fn emit_str(&mut self, dest: Mem, src: Reg/*GPR or FPR*/); // supports the full full range of addressing modes
fn emit_sttr(&mut self, dest: Mem, src: Reg); // [base, #simm9]
fn emit_stur(&mut self, dest: Mem, src: Reg/*GPR or FPR*/); // [base, #simm9]
fn emit_stlr(&mut self, dest: Mem, src: Reg); // [base]
fn emit_stxr(&mut self, dest: Mem, status: Reg, src: Reg); // [base]
fn emit_stlxr(&mut self, dest: Mem, status: Reg, src: Reg); // [base]
fn emit_stp(&mut self, dest: Mem, src1: Reg, src2: Reg); // [base, #simm7], [base], #simm7, [base, #simm7]!
fn emit_stxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg); // [base]
fn emit_stlxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg); // [base]
fn emit_stnp(&mut self, dest: Mem, src1: Reg/*GPR or FPR*/, src2: Reg/*GPR or FPR*/); // [base, #simm7]
// branching
......@@ -156,31 +117,31 @@ TODO:
fn emit_negs(&mut self, dest: Reg, src: Reg);
fn emit_ngc(&mut self, dest: Reg, src: Reg);
fn emit_ngcs(&mut self, dest: Reg, src: Reg);
fn emit_sxtb(&mut self, dest: Reg, src: Reg);
fn emit_sxth(&mut self, dest: Reg, src: Reg);
fn emit_sxtw(&mut self, dest: Reg, src: Reg);
fn emit_uxtb(&mut self, dest: Reg, src: Reg);
fn emit_sxtb(&mut self, dest: Reg/*32*/, src: Reg/*32*/);
fn emit_sxth(&mut self, dest: Reg/*32*/, src: Reg/*32*/);
fn emit_sxtw(&mut self, dest: Reg/*64*/, src: Reg/*32*/);
fn emit_uxtb(&mut self, dest: Reg/*32*/, src: Reg/*32*/);
fn emit_uxth(&mut self, dest: Reg/*32*/, src: Reg/*32*/);
fn emit_cls(&mut self, dest: Reg, src: Reg);
fn emit_clz(&mut self, dest: Reg, src: Reg);
fn emit_uxth(&mut self, dest: Reg, src: Reg);
fn emit_rbit(&mut self, dest: Reg, src: Reg);
fn emit_rev(&mut self, dest: Reg, src: Reg);
fn emit_rev16(&mut self, dest: Reg, src: Reg);
fn emit_rev32(&mut self, dest: Reg/*64*/, src: Reg);
fn emit_rev64(&mut self, dest: Reg/*64*/, src: Reg); // alias of REV
fn emit_fabs(&mut self, dest: Reg, src: Reg);
fn emit_fcvt(&mut self, dest: Reg, src: Reg);
fn emit_fcvtas(&mut self, dest: Reg, src: Reg);
fn emit_fcvtau(&mut self, dest: Reg, src: Reg);
fn emit_fcvtms(&mut self, dest: Reg, src: Reg);
fn emit_fcvtmu(&mut self, dest: Reg, src: Reg);
fn emit_fcvtns(&mut self, dest: Reg, src: Reg);
fn emit_fcvtnu(&mut self, dest: Reg, src: Reg);
fn emit_fcvtps(&mut self, dest: Reg, src: Reg);
fn emit_fcvtpu(&mut self, dest: Reg, src: Reg);
fn emit_fcvtzs(&mut self, dest: Reg, src: Reg);
fn emit_fcvtzu(&mut self, dest: Reg, src: Reg);
fn emit_fmov(&mut self, dest: Reg, src: Reg);
fn emit_fabs(&mut self, dest: Reg, src: Reg/*Must have different size*/);
fn emit_fcvt(&mut self, dest: Reg, src: Reg/*Must have different size*/);
fn emit_fcvtas(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtau(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtms(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtmu(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtns(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtnu(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtps(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtpu(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtzs(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtzu(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fmov(&mut self, dest: Reg, src: Reg); // One register must be an FPR, the other may be a GPR or an FPR
fn emit_fneg(&mut self, dest: Reg, src: Reg);
fn emit_frinta(&mut self, dest: Reg, src: Reg);
fn emit_frinti(&mut self, dest: Reg, src: Reg);
......@@ -190,8 +151,8 @@ TODO:
fn emit_frintx(&mut self, dest: Reg, src: Reg);
fn emit_frintz(&mut self, dest: Reg, src: Reg);
fn emit_fsqrt(&mut self, dest: Reg, src: Reg);
fn emit_scvtf(&mut self, dest: Reg, src: Reg);
fn emit_ucvtf(&mut self, dest: Reg, src: Reg);
fn emit_scvtf(&mut self, dest: Reg/*FPR, may have different size*/, src: Reg);
fn emit_ucvtf(&mut self, dest: Reg/*FPR, may have different size*/, src: Reg);
// Unary operations with shift
fn emit_mov_shift(&mut self, dest: Reg, src: Reg, shift: &str, ammount: u8);
......@@ -204,14 +165,14 @@ TODO:
fn emit_movz(&mut self, dest: Reg, src: u16, shift: u8);
fn emit_movk(&mut self, dest: Reg, src: u16, shift: u8);
fn emit_movn(&mut self, dest: Reg, src: u16, shift: u8);
fn emit_movi(&mut self, dest: Reg, src: u64);
fn emit_movi(&mut self, dest: Reg /*FPR*/, src: u64);
fn emit_fmov_imm(&mut self, dest: Reg, src: f32);
// Extended binary ops
fn emit_add_ext(&mut self, dest: Reg, src1: Reg, src2: Reg, signed: bool, shift: u8);
fn emit_adds_ext(&mut self, dest: Reg, src1: Reg, src2: Reg, signed: bool, shift: u8);
fn emit_sub_ext(&mut self, dest: Reg, src1: Reg, src2: Reg, signed: bool, shift: u8);
fn emit_subs_ext(&mut self, dest: Reg, src1: Reg, src2: Reg, signed: bool, shift: u8);
fn emit_add_ext(&mut self, dest: Reg/*GPR or SP*/, src1: Reg/*GPR or SP*/, src2: Reg, signed: bool, shift: u8);
fn emit_adds_ext(&mut self, dest: Reg, src1: Reg/*GPR or SP*/, src2: Reg, signed: bool, shift: u8);
fn emit_sub_ext(&mut self, dest: Reg/*GPR or SP*/, src1: Reg/*GPR or SP*/, src2: Reg, signed: bool, shift: u8);
fn emit_subs_ext(&mut self, dest: Reg, src1: Reg/*GPR or SP*/, src2: Reg, signed: bool, shift: u8);
// Multiplication
fn emit_mul(&mut self, dest: Reg, src1: Reg, src2: Reg);
......@@ -226,8 +187,8 @@ TODO:
// Other binaries
fn emit_adc(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_adcs(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_add(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_adds(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_add(&mut self, dest: Reg, src1: Reg/*GPR or SP*/, src2: Reg);
fn emit_adds(&mut self, dest: Reg, src1: Reg/*GPR or SP*/, src2: Reg);
fn emit_sbc(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_sbcs(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_sub(&mut self, dest: Reg, src1: Reg, src2: Reg);
......@@ -235,8 +196,11 @@ TODO:
fn emit_sdiv(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_udiv(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_asr(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_asrv(&mut self, dest: Reg, src1: Reg, src2: Reg); // Alias of ASR
fn emit_lsl(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_lslv(&mut self, dest: Reg, src1: Reg, src2: Reg); // Alias of LSL
fn emit_lsr(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_lsrv(&mut self, dest: Reg, src1: Reg, src2: Reg); // Alias of LSR
fn emit_ror(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_bic(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_bics(&mut self, dest: Reg, src1: Reg, src2: Reg);
......@@ -271,18 +235,15 @@ TODO:
fn emit_orr_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8);
// binary ops with immediates
// The 'str' will be patched by the linker (used to access global variables)
fn emit_add_str(&mut self, dest: Reg, src1: Reg, src2: &str);
fn emit_add_imm(&mut self, dest: Reg/*GPR or SP*/, src1: Reg/*GPR or SP*/, src2: u16, shift: bool);
fn emit_adds_imm(&mut self, dest: Reg, src1: Reg/*GPR or SP*/, src2: u16, shift: bool);
fn emit_sub_imm(&mut self, dest: Reg/*GPR or SP*/, src1: Reg/*GPR or SP*/, src2: u16, shift: bool);
fn emit_subs_imm(&mut self, dest: Reg, src1: Reg/*GPR or SP*/, src2: u16, shift: bool);
fn emit_add_imm(&mut self, dest: Reg, src1: Reg, src2: u16, shift: bool);
fn emit_adds_imm(&mut self, dest: Reg, src1: Reg, src2: u16, shift: bool);
fn emit_sub_imm(&mut self, dest: Reg, src1: Reg, src2: u16, shift: bool);
fn emit_subs_imm(&mut self, dest: Reg, src1: Reg, src2: u16, shift: bool);
fn emit_and_imm(&mut self, dest: Reg, src1: Reg, src2: u64);
fn emit_and_imm(&mut self, dest: Reg/*GPR or SP*/, src1: Reg, src2: u64);
fn emit_ands_imm(&mut self, dest: Reg, src1: Reg, src2: u64);
fn emit_eor_imm(&mut self, dest: Reg, src1: Reg, src2: u64);
fn emit_orr_imm(&mut self, dest: Reg, src1: Reg, src2: u64);
fn emit_eor_imm(&mut self, dest: Reg/*GPR or SP*/, src1: Reg, src2: u64);
fn emit_orr_imm(&mut self, dest: Reg/*GPR or SP*/, src1: Reg, src2: u64);
fn emit_asr_imm(&mut self, dest: Reg, src1: Reg, src2: u8);
fn emit_lsr_imm(&mut self, dest: Reg, src1: Reg, src2: u8);
......@@ -321,8 +282,8 @@ TODO:
fn emit_fcmpe(&mut self, src1: Reg, src2: Reg);
// Comparisons with extension
fn emit_cmn_ext(&mut self, src1: Reg, src2: Reg, signed: bool, shift: u8);
fn emit_cmp_ext(&mut self, src1: Reg, src2: Reg, signed: bool, shift: u8);
fn emit_cmn_ext(&mut self, src1: Reg/*GPR or SP*/, src2: Reg, signed: bool, shift: u8);
fn emit_cmp_ext(&mut self, src1: Reg/*GPR or SP*/, src2: Reg, signed: bool, shift: u8);
// Comparisons with shift
fn emit_tst_shift(&mut self, src1: Reg, src2: Reg, shift: &str, ammount: u8);
......@@ -331,9 +292,9 @@ TODO:
// Immediat Comparisons
fn emit_tst_imm(&mut self, src1: Reg, src2: u64);
fn emit_cmn_imm(&mut self, src1: Reg, src2: u16, shift : bool);
fn emit_cmp_imm(&mut self, src1: Reg, src2: u16, shift : bool);
fn emit_cmn_imm(&mut self, src1: Reg/*GPR or SP*/, src2: u16, shift : bool);
fn emit_cmp_imm(&mut self, src1: Reg/*GPR or SP*/, src2: u16, shift : bool);
// Comparison against 0
fn emit_fcmp_0(&mut self, src: Reg);
fn emit_fcmpe_0(&mut self, src: Reg);
......@@ -404,5 +365,4 @@ TODO:
fn emit_smc(&mut self, val: u16);
fn emit_svc(&mut self, val: u16);
fn emit_eret(&mut self);
}
\ No newline at end of file
}
This diff is collapsed.
......@@ -200,11 +200,11 @@ fn emit_muir_dot_inner(file: &mut File,
file.write_fmt(format_args!("[{}]", block_content.exn_arg.as_ref().unwrap())).unwrap();
}
// :\n\n
file.write(":\\n\\n".as_bytes()).unwrap();
file.write(":\\l\\l".as_bytes()).unwrap();
// all the instructions
for inst in block_content.body.iter() {
file.write_fmt(format_args!("{}\\n", inst)).unwrap();
file.write_fmt(format_args!("{}\\l", inst)).unwrap();
}
// "];
......@@ -352,7 +352,7 @@ fn emit_mc_dot(func: &MuFunctionVersion, vm: &VM) {
for block_name in blocks.iter() {
// BB [label = "
file.write_fmt(format_args!("{} [label = \"{}:\\n\\n", id(block_name.clone()), block_name)).unwrap();
file.write_fmt(format_args!("{} [label = \"{}:\\l\\l", id(block_name.clone()), block_name)).unwrap();
for inst in mc.get_block_range(&block_name).unwrap() {
file.write(&mc.emit_inst(inst)).unwrap();
......
......@@ -108,6 +108,7 @@ impl MuStack {
}
#[cfg(target_arch = "aarch64")]
// TODO: What will hapen if some things need to be loaded on the stack?
// TODO: Should we save XR (X8, the indirect locations result register)
// (NOTE: Any changes to here need to be reflected in swap_to_mu_stack)
pub fn runtime_load_args(&mut self, vals: Vec<ValueLocation>) {
......
......@@ -14,6 +14,8 @@ use mu::testutil;
use mu::testutil::aot;
use mu::utils::LinkedHashMap;
// NOTE: aarch64 has 2 more parameter registers than x86-64 so it needs different test cases for stack arguments
#[test]
fn test_ccall_exit() {
VM::start_logging_trace();
......@@ -136,6 +138,113 @@ fn test_pass_1arg_by_stack() {
assert_eq!(output.status.code().unwrap(), 1);
}
#[cfg(target_arch = "aarch64")]
// aarch64 has 2 more parameter registers than x86-64 so we need to modify the test
fn pass_1arg_by_stack() -> VM {
let vm = VM::new_with_opts("init_mu --disable-inline");
typedef! ((vm) int64 = mu_int(64));
constdef! ((vm) <int64> int64_0 = Constant::Int(0));
constdef! ((vm) <int64> int64_1 = Constant::Int(1));
constdef! ((vm) <int64> int64_2 = Constant::Int(2));
// foo7
funcsig! ((vm) foo7_sig = (int64, int64, int64, int64, int64, int64, int64, int64, int64) -> (int64));
funcdecl! ((vm) <foo7_sig> foo7);
funcdef! ((vm) <foo7_sig> foo7 VERSION foo7_v1);
// blk_entry
ssa! ((vm, foo7_v1) <int64> v0);
ssa! ((vm, foo7_v1) <int64> v1);
ssa! ((vm, foo7_v1) <int64> v2);
ssa! ((vm, foo7_v1) <int64> v3);
ssa! ((vm, foo7_v1) <int64> v4);
ssa! ((vm, foo7_v1) <int64> v5);
ssa! ((vm, foo7_v1) <int64> v6);
ssa! ((vm, foo7_v1) <int64> v7);
ssa! ((vm, foo7_v1) <int64> v8);
block! ((vm, foo7_v1) blk_entry);
inst! ((vm, foo7_v1) blk_entry_ret:
RET (v8)
);
define_block!((vm, foo7_v1) blk_entry(v0, v1, v2, v3, v4, v5, v6, v7, v8) {
blk_entry_ret
});
define_func_ver!((vm) foo7_v1 (entry: blk_entry) {blk_entry});
// pass_1arg_by_stack
funcsig! ((vm) sig = () -> ());
funcdecl! ((vm) <sig> pass_1arg_by_stack);
funcdef! ((vm) <sig> pass_1arg_by_stack VERSION pass_1arg_by_stack_v1);
typedef! ((vm) type_funcref_foo7 = mu_funcref(foo7_sig));
constdef! ((vm) <type_funcref_foo7> const_funcref_foo7 = Constant::FuncRef(vm.id_of("foo7")));
// blk_entry
consta! ((vm, pass_1arg_by_stack_v1) int64_0_local = int64_0);
consta! ((vm, pass_1arg_by_stack_v1) int64_1_local = int64_1);
block! ((vm, pass_1arg_by_stack_v1) blk_entry);
block! ((vm, pass_1arg_by_stack_v1) blk_main);
inst! ((vm, pass_1arg_by_stack_v1) blk_entry_branch:
BRANCH blk_main (
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_1_local
)
);
define_block!((vm, pass_1arg_by_stack_v1) blk_entry() {blk_entry_branch});
// blk_main
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a0);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a1);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a2);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a3);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a4);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a5);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a6);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a7);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> a8);
consta! ((vm, pass_1arg_by_stack_v1) const_funcref_foo7_local = const_funcref_foo7);
ssa! ((vm, pass_1arg_by_stack_v1) <int64> retval);
inst! ((vm, pass_1arg_by_stack_v1) blk_main_call:
retval = EXPRCALL (CallConvention::Mu, is_abort: false) const_funcref_foo7_local (a0, a1, a2, a3, a4, a5, a6, a7, a8)
);
let blk_main_exit = gen_ccall_exit(retval.clone(), &mut pass_1arg_by_stack_v1, &vm);
inst! ((vm, pass_1arg_by_stack_v1) blk_main_ret:
RET
);
define_block!((vm, pass_1arg_by_stack_v1) blk_main(a0, a1, a2, a3, a4, a5, a6, a7, a8) {
blk_main_call,
blk_main_exit,
blk_main_ret
});
define_func_ver!((vm) pass_1arg_by_stack_v1 (entry: blk_entry) {
blk_entry,
blk_main
});
vm
}
#[cfg(target_arch = "x86_64")]
fn pass_1arg_by_stack() -> VM {
let vm = VM::new_with_opts("init_mu --disable-inline");
......@@ -272,6 +381,117 @@ fn test_pass_2args_by_stack() {
assert_eq!(output.status.code().unwrap(), 2);
}
#[cfg(target_arch = "aarch64")]
// aarch64 has 2 more parameter registers than x86-64 so we need to modify the test
fn pass_2args_by_stack() -> VM {
let vm = VM::new_with_opts("init_mu --disable-inline");
typedef! ((vm) int64 = mu_int(64));
constdef! ((vm) <int64> int64_0 = Constant::Int(0));
constdef! ((vm) <int64> int64_1 = Constant::Int(1));
constdef! ((vm) <int64> int64_2 = Constant::Int(2));
// foo8
funcsig! ((vm) foo8_sig = (int64, int64, int64, int64, int64, int64, int64, int64, int64, int64) -> (int64));
funcdecl! ((vm) <foo8_sig> foo8);
funcdef! ((vm) <foo8_sig> foo8 VERSION foo8_v1);
// blk_entry
ssa! ((vm, foo8_v1) <int64> v0);
ssa! ((vm, foo8_v1) <int64> v1);
ssa! ((vm, foo8_v1) <int64> v2);
ssa! ((vm, foo8_v1) <int64> v3);
ssa! ((vm, foo8_v1) <int64> v4);
ssa! ((vm, foo8_v1) <int64> v5);
ssa! ((vm, foo8_v1) <int64> v6);
ssa! ((vm, foo8_v1) <int64> v7);
ssa! ((vm, foo8_v1) <int64> v8);
ssa! ((vm, foo8_v1) <int64> v9);
block! ((vm, foo8_v1) blk_entry);
inst! ((vm, foo8_v1) blk_entry_ret:
RET (v9)
);
define_block!((vm, foo8_v1) blk_entry(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) {
blk_entry_ret
});
define_func_ver!((vm) foo8_v1 (entry: blk_entry) {blk_entry});
// pass_2args_by_stack
funcsig! ((vm) sig = () -> ());
funcdecl! ((vm) <sig> pass_2args_by_stack);
funcdef! ((vm) <sig> pass_2args_by_stack VERSION pass_2args_by_stack_v1);
typedef! ((vm) type_funcref_foo8 = mu_funcref(foo8_sig));
constdef! ((vm) <type_funcref_foo8> const_funcref_foo8 = Constant::FuncRef(vm.id_of("foo8")));
// blk_entry
consta! ((vm, pass_2args_by_stack_v1) int64_0_local = int64_0);
consta! ((vm, pass_2args_by_stack_v1) int64_1_local = int64_1);
consta! ((vm, pass_2args_by_stack_v1) int64_2_local = int64_2);
block! ((vm, pass_2args_by_stack_v1) blk_entry);
block! ((vm, pass_2args_by_stack_v1) blk_main);
inst! ((vm, pass_2args_by_stack_v1) blk_entry_branch:
BRANCH blk_main (
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_1_local,
int64_2_local
)
);
define_block!((vm, pass_2args_by_stack_v1) blk_entry() {blk_entry_branch});
// blk_main
ssa! ((vm, pass_2args_by_stack_v1) <int64> a0);
ssa! ((vm, pass_2args_by_stack_v1) <int64> a1);
ssa! ((vm, pass_2args_by_stack_v1) <int64> a2);
ssa! ((vm, pass_2args_by_stack_v1) <int64> a3);