Commit 971b9430 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Merge branch 'swapstack' of gitlab.anu.edu.au:mu/mu-impl-fast into swapstack

parents be144f4f a51a3282
......@@ -1025,6 +1025,9 @@ pub enum MemoryOrder {
SeqCst
}
pub const C_CALL_CONVENTION: CallConvention = CallConvention::Foreign(ForeignFFI::C);
pub const MU_CALL_CONVENTION: CallConvention = CallConvention::Mu;
#[derive(Copy, Clone, Debug)]
pub enum CallConvention {
Mu,
......
......@@ -15,7 +15,6 @@
#![allow(unused_variables)]
use compiler::backend::AOT_EMIT_CONTEXT_FILE;
use compiler::backend::AOT_EMIT_SYM_TABLE_FILE;
use compiler::backend::RegGroup;
use utils::ByteSize;
use utils::Address;
......
use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
use compiler::backend::RegGroup;
use compiler::backend::x86_64;
use compiler::backend::BackendType;
use utils::ByteSize;
use vm::VM;
#[derive(Clone, Debug)]
pub enum CallConvResult {
GPR(P<Value>),
GPREX(P<Value>, P<Value>),
FPR(P<Value>),
STACK
}
pub mod mu {
pub use super::c::*;
}
pub mod c {
use super::*;
/// computes arguments for the function signature,
/// returns a vector of CallConvResult for each argument type
pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> {
let mut ret = vec![];
let mut gpr_arg_count = 0;
let mut fpr_arg_count = 0;
for ty in sig.arg_tys.iter() {
let arg_reg_group = RegGroup::get_from_ty(ty);
if arg_reg_group == RegGroup::GPR {
if gpr_arg_count < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr = {
let ref reg64 = x86_64::ARGUMENT_GPRS[gpr_arg_count];
let expected_len = ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
ret.push(CallConvResult::GPR(arg_gpr));
gpr_arg_count += 1;
} else {
// use stack to pass argument
ret.push(CallConvResult::STACK);
}
} else if arg_reg_group == RegGroup::GPREX {
// need two regsiters for this, otherwise, we need to pass on stack
if gpr_arg_count + 1 < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr1 = x86_64::ARGUMENT_GPRS[gpr_arg_count].clone();
let arg_gpr2 = x86_64::ARGUMENT_GPRS[gpr_arg_count + 1].clone();
ret.push(CallConvResult::GPREX(arg_gpr1, arg_gpr2));
gpr_arg_count += 2;
} else {
ret.push(CallConvResult::STACK);
}
} else if arg_reg_group == RegGroup::FPR {
if fpr_arg_count < x86_64::ARGUMENT_FPRS.len() {
let arg_fpr = x86_64::ARGUMENT_FPRS[fpr_arg_count].clone();
ret.push(CallConvResult::FPR(arg_fpr));
fpr_arg_count += 1;
} else {
ret.push(CallConvResult::STACK);
}
} else {
// fp const, struct, etc
unimplemented!();
}
}
ret
}
/// computes the return values for the function signature,
/// returns a vector of CallConvResult for each return type
pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> {
let mut ret = vec![];
let mut gpr_ret_count = 0;
let mut fpr_ret_count = 0;
for ty in sig.ret_tys.iter() {
if RegGroup::get_from_ty(ty) == RegGroup::GPR {
if gpr_ret_count < x86_64::RETURN_GPRS.len() {
let ret_gpr = {
let ref reg64 = x86_64::RETURN_GPRS[gpr_ret_count];
let expected_len = ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
ret.push(CallConvResult::GPR(ret_gpr));
gpr_ret_count += 1;
} else {
// get return value by stack
ret.push(CallConvResult::STACK);
}
} else if RegGroup::get_from_ty(ty) == RegGroup::GPREX {
if gpr_ret_count + 1 < x86_64::RETURN_GPRS.len() {
let ret_gpr1 = x86_64::RETURN_GPRS[gpr_ret_count].clone();
let ret_gpr2 = x86_64::RETURN_GPRS[gpr_ret_count + 1].clone();
ret.push(CallConvResult::GPREX(ret_gpr1, ret_gpr2));
} else {
ret.push(CallConvResult::STACK);
}
} else if RegGroup::get_from_ty(ty) == RegGroup::FPR {
// floating point register
if fpr_ret_count < x86_64::RETURN_FPRS.len() {
let ref ret_fpr = x86_64::RETURN_FPRS[fpr_ret_count];
ret.push(CallConvResult::FPR(ret_fpr.clone()));
fpr_ret_count += 1;
} else {
ret.push(CallConvResult::STACK);
}
} else {
// other type of return alue
unimplemented!()
}
}
ret
}
/// computes the return area on the stack for the function signature,
/// returns a tuple of (size, callcand offset for each stack arguments)
pub fn compute_stack_args(
stack_arg_tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_arg_tys, vm);
// "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment)
let mut stack_arg_size_with_padding = stack_arg_size;
if stack_arg_size % 16 == 0 {
// do not need to adjust rsp
} else if stack_arg_size % 8 == 0 {
// adjust rsp by -8
stack_arg_size_with_padding += 8;
} else {
let rem = stack_arg_size % 16;
let stack_arg_padding = 16 - rem;
stack_arg_size_with_padding += stack_arg_padding;
}
(stack_arg_size_with_padding, stack_arg_offsets)
}
}
......@@ -26,6 +26,9 @@ use compiler::backend::x86_64::codegen::CodeGenerator;
mod asm_backend;
use compiler::backend::x86_64::asm_backend::ASMCodeGen;
/// call conventions
pub mod callconv;
// re-export a few functions for AOT compilation
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::emit_code;
......
......@@ -14,28 +14,26 @@
#include "asm_common_x64.S.inc"
# swap_stack_to(new_sp: Address, entry: Address, old_sp_loc: Address)
# %rdi %rsi %rdx
begin_func muthread_start_pass
# muthread_start_normal(new_sp: Address, old_sp_loc: Address)
# %rdi %rsi
begin_func muthread_start_normal
# -- on old stack --
# C calling convention
# C calling convention - enter frame
pushq %rbp
movq %rsp, %rbp
# other callee-saved registers
# save callee saved registers
pushq %rbx
pushq %r12
pushq %r13
pushq %r14
pushq %r15
# save sp to %rbx
movq %rsp, 0(%rdx)
# save sp to old_sp_loc
movq %rsp, 0(%rsi)
# switch to new stack
movq %rdi, %rsp
# save entry function in %rax
movq %rsi, %rax
# -- on new stack --
# arguments (reverse order of thread.rs - runtime_load_args)
......@@ -56,19 +54,22 @@ begin_func muthread_start_pass
add $64, %rsp
# at this point new stack is clean (no intermediate values)
movq %rsp, %rbp
# on stack it look like this
# SP -> 0
# entry_func
# push an empty pointer to stack, if entry fucntion tries to return, it causes a segfault
pushq $0
# push entry function and start it
pushq %rax
ret
end_func muthread_start_pass
# pop 0 as rbp
popq %rbp
# pop entry func to r10 (this is a caller saved register, it is save for us to use)
popq %r10
call *%r10
end_func muthread_start_normal
# _swap_back_to_native_stack(sp_loc: Address)
# muentry_thread_exit(old_sp: Address)
# %rdi
begin_func muentry_swap_back_to_native_stack
movq 0(%rdi), %rsp
begin_func muentry_thread_exit
movq %rdi, %rsp
popq %r15
popq %r14
......@@ -78,7 +79,7 @@ begin_func muentry_swap_back_to_native_stack
popq %rbp
ret
end_func muentry_swap_back_to_native_stack
end_func muentry_thread_exit
# _get_current_frame_bp() -> Address
begin_func get_current_frame_bp
......
......@@ -1424,7 +1424,13 @@ fn create_empty_func_foo6(vm: &VM) {
RET
);
define_block! ((vm, foo6_v1) blk_entry() {
ssa! ((vm, foo6_v1) <int64> t0);
ssa! ((vm, foo6_v1) <int64> t1);
ssa! ((vm, foo6_v1) <int64> t2);
ssa! ((vm, foo6_v1) <int64> t3);
ssa! ((vm, foo6_v1) <int64> t4);
ssa! ((vm, foo6_v1) <int64> t5);
define_block! ((vm, foo6_v1) blk_entry(t0, t1, t2, t3, t4, t5) {
blk_entry_ret
});
......
......@@ -109,7 +109,7 @@ int main(int argc, char** argv) {
id_943 = bldr_60->gen_sym(bldr_60, "@c_bufsz");
bldr_60->new_const_int(bldr_60, id_943, id_929, 0x0000000000000006ull);
id_944 = bldr_60->gen_sym(bldr_60, "@sig__i64");
bldr_60->new_funcsig(bldr_60, id_944, (MuTypeNode [2]){id_931, id_929}, 2, (MuTypeNode [1]){id_929}, 1);
bldr_60->new_funcsig(bldr_60, id_944, NULL, 0, (MuTypeNode [1]){id_929}, 1);
id_945 = bldr_60->gen_sym(bldr_60, "@sig_i32voidpi64_i64");
bldr_60->new_funcsig(bldr_60, id_945, (MuTypeNode [3]){id_928, id_931, id_929}, 3, (MuTypeNode [1]){id_929}, 1);
id_946 = bldr_60->gen_sym(bldr_60, "@fnpsig_i32voidpi64_i64");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment