Commit c70cd85a authored by qinsoon's avatar qinsoon

another test for passing arguments by stack

parent e0be1d23
......@@ -2703,14 +2703,12 @@ impl <'a> InstructionSelection {
// add x, rbp -> rbp (x is negative, however we do not know x now)
self.backend.emit_frame_grow();
// unload arguments
// unload arguments by registers
let mut gpr_arg_count = 0;
let mut fpr_arg_count = 0;
// initial stack arg is at RBP+16
// arg <- RBP + 16
// return addr
// old RBP <- RBP
let mut stack_arg_offset : i32 = 16;
let mut arg_by_stack = vec![];
for arg in args {
if arg.is_int_reg() {
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
......@@ -2725,14 +2723,15 @@ impl <'a> InstructionSelection {
gpr_arg_count += 1;
} else {
// unload from stack
let stack_slot = self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
self.current_frame.as_mut().unwrap().add_argument_by_stack(arg.id(), stack_slot);
// move stack_arg_offset by the size of 'arg'
let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
stack_arg_offset += arg_size as i32;
arg_by_stack.push(arg.clone());
// // unload from stack
// let stack_slot = self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
//
// self.current_frame.as_mut().unwrap().add_argument_by_stack(arg.id(), stack_slot);
//
// // move stack_arg_offset by the size of 'arg'
// let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
// stack_arg_offset += arg_size as i32;
}
} else if arg.is_fp_reg() {
if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() {
......@@ -2743,14 +2742,15 @@ impl <'a> InstructionSelection {
fpr_arg_count += 1;
} else {
// unload from stack
let stack_slot = self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
self.current_frame.as_mut().unwrap().add_argument_by_stack(arg.id(), stack_slot);
// move stack_arg_offset by the size of 'arg'
let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
stack_arg_offset += arg_size as i32;
arg_by_stack.push(arg.clone());
// // unload from stack
// let stack_slot = self.emit_load_base_offset(&arg, &x86_64::RBP.clone(), stack_arg_offset, vm);
//
// self.current_frame.as_mut().unwrap().add_argument_by_stack(arg.id(), stack_slot);
//
// // move stack_arg_offset by the size of 'arg'
// let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
// stack_arg_offset += arg_size as i32;
}
} else {
// args that are not fp or int (possibly struct/array/etc)
......@@ -2758,6 +2758,24 @@ impl <'a> InstructionSelection {
}
}
// deal with arguments passed by stack
// initial stack arg is at RBP+16
// arg <- RBP + 16
// return addr
// old RBP <- RBP
let mut stack_arg_base_offset : i32 = 16;
let arg_by_stack_tys = arg_by_stack.iter().map(|x| x.ty.clone()).collect();
let (_, _, stack_arg_offsets) = backend::sequetial_layout(&arg_by_stack_tys, vm);
// unload the args
let mut i = 0;
for arg in arg_by_stack {
let stack_slot = self.emit_load_base_offset(&arg, &x86_64::RBP, (stack_arg_base_offset + stack_arg_offsets[i] as i32), vm);
self.current_frame.as_mut().unwrap().add_argument_by_stack(arg.id(), stack_slot);
i += 1;
}
self.backend.end_block(block_name);
}
......
......@@ -573,6 +573,153 @@ fn pass_2_int8_args_by_stack() -> VM {
vm
}
#[test]
fn test_pass_mixed_args_by_stack() {
VM::start_logging_trace();
let vm = Arc::new(pass_mixed_args_by_stack());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let func_foo = vm.id_of("foo8");
let func_main = vm.id_of("pass_mixed_args_by_stack");
{
let funcs = vm.funcs().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
{
let func = funcs.get(&func_foo).unwrap().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
{
let func = funcs.get(&func_main).unwrap().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
}
vm.make_primordial_thread(func_main, true, vec![]);
backend::emit_context(&vm);
let executable = aot::link_primordial(vec![Mu("foo8"), Mu("pass_mixed_args_by_stack")], "test_pass_mixed_args_by_stack", &vm);
let output = aot::execute_nocheck(executable);
// exit with (2)
assert!(output.status.code().is_some());
assert_eq!(output.status.code().unwrap(), 2);
}
fn pass_mixed_args_by_stack() -> VM {
let vm = VM::new_with_opts("init_mu --disable-inline");
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) int8 = mu_int(8));
constdef! ((vm) <int64> int64_0 = Constant::Int(0));
constdef! ((vm) <int64> int64_1 = Constant::Int(1));
constdef! ((vm) <int64> int64_2 = Constant::Int(2));
constdef! ((vm) <int8> int8_1 = Constant::Int(1));
constdef! ((vm) <int8> int8_2 = Constant::Int(2));
// foo8
funcsig! ((vm) foo8_sig = (int64, int64, int64, int64, int64, int64, int8, int64) -> (int64));
funcdecl! ((vm) <foo8_sig> foo8);
funcdef! ((vm) <foo8_sig> foo8 VERSION foo8_v1);
// blk_entry
ssa! ((vm, foo8_v1) <int64> v0);
ssa! ((vm, foo8_v1) <int64> v1);
ssa! ((vm, foo8_v1) <int64> v2);
ssa! ((vm, foo8_v1) <int64> v3);
ssa! ((vm, foo8_v1) <int64> v4);
ssa! ((vm, foo8_v1) <int64> v5);
ssa! ((vm, foo8_v1) <int8> v6);
ssa! ((vm, foo8_v1) <int64> v7);
block! ((vm, foo8_v1) blk_entry);
ssa! ((vm, foo8_v1) <int64> res);
inst! ((vm, foo8_v1) blk_entry_ret:
RET (v7)
);
define_block!((vm, foo8_v1) blk_entry(v0, v1, v2, v3, v4, v5, v6, v7) {
blk_entry_ret
});
define_func_ver!((vm) foo8_v1 (entry: blk_entry) {blk_entry});
// pass_mixed_args_by_stack
funcsig! ((vm) sig = () -> ());
funcdecl! ((vm) <sig> pass_mixed_args_by_stack);
funcdef! ((vm) <sig> pass_mixed_args_by_stack VERSION pass_mixed_args_by_stack_v1);
typedef! ((vm) type_funcref_foo8 = mu_funcref(foo8_sig));
constdef! ((vm) <type_funcref_foo8> const_funcref_foo8 = Constant::FuncRef(vm.id_of("foo8")));
// blk_entry
consta! ((vm, pass_mixed_args_by_stack_v1) int64_0_local = int64_0);
consta! ((vm, pass_mixed_args_by_stack_v1) int64_1_local = int64_1);
consta! ((vm, pass_mixed_args_by_stack_v1) int64_2_local = int64_2);
consta! ((vm, pass_mixed_args_by_stack_v1) int8_1_local = int8_1);
consta! ((vm, pass_mixed_args_by_stack_v1) int8_2_local = int8_2);
block! ((vm, pass_mixed_args_by_stack_v1) blk_entry);
block! ((vm, pass_mixed_args_by_stack_v1) blk_main);
inst! ((vm, pass_mixed_args_by_stack_v1) blk_entry_branch:
BRANCH blk_main (
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int64_0_local,
int8_1_local,
int64_2_local
)
);
define_block!((vm, pass_mixed_args_by_stack_v1) blk_entry() {blk_entry_branch});
// blk_main
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a0);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a1);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a2);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a3);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a4);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a5);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int8> a6);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> a7);
consta! ((vm, pass_mixed_args_by_stack_v1) const_funcref_foo8_local = const_funcref_foo8);
ssa! ((vm, pass_mixed_args_by_stack_v1) <int64> retval);
inst! ((vm, pass_mixed_args_by_stack_v1) blk_main_call:
retval = EXPRCALL (CallConvention::Mu, is_abort: false) const_funcref_foo8_local (a0, a1, a2, a3, a4, a5, a6, a7)
);
let blk_main_exit = gen_ccall_exit(retval.clone(), &mut pass_mixed_args_by_stack_v1, &vm);
inst! ((vm, pass_mixed_args_by_stack_v1) blk_main_ret:
RET
);
define_block!((vm, pass_mixed_args_by_stack_v1) blk_main(a0, a1, a2, a3, a4, a5, a6, a7) {
blk_main_call,
blk_main_exit,
blk_main_ret
});
define_func_ver!((vm) pass_mixed_args_by_stack_v1 (entry: blk_entry) {
blk_entry,
blk_main
});
vm
}
#[test]
fn test_pass_fp_arg() {
let lib = testutil::compile_fncs("pass_fp_arg", vec!["pass_fp_arg", "foo"], &pass_fp_arg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment