Commit b70468ea authored by qinsoon's avatar qinsoon

more refactoring on x86_64 callconv

parent 34025009
...@@ -3,6 +3,9 @@ use ast::ptr::*; ...@@ -3,6 +3,9 @@ use ast::ptr::*;
use ast::types::*; use ast::types::*;
use compiler::backend::RegGroup; use compiler::backend::RegGroup;
use compiler::backend::x86_64; use compiler::backend::x86_64;
use compiler::backend::BackendType;
use utils::ByteSize;
use vm::VM;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum CallConvResult { pub enum CallConvResult {
...@@ -19,6 +22,8 @@ pub mod mu { ...@@ -19,6 +22,8 @@ pub mod mu {
pub mod c { pub mod c {
use super::*; use super::*;
/// computes arguments for the function signature,
/// returns a vector of CallConvResult for each argument type
pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> { pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> {
let mut ret = vec![]; let mut ret = vec![];
...@@ -71,6 +76,8 @@ pub mod c { ...@@ -71,6 +76,8 @@ pub mod c {
ret ret
} }
/// computes the return values for the function signature,
/// returns a vector of CallConvResult for each return type
pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> { pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> {
let mut ret = vec![]; let mut ret = vec![];
...@@ -119,4 +126,33 @@ pub mod c { ...@@ -119,4 +126,33 @@ pub mod c {
ret ret
} }
/// computes the return area on the stack for the function signature,
/// returns a tuple of (size, callcand offset for each stack arguments)
pub fn compute_stack_args(
stack_arg_tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_arg_tys, vm);
// "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment)
let mut stack_arg_size_with_padding = stack_arg_size;
if stack_arg_size % 16 == 0 {
// do not need to adjust rsp
} else if stack_arg_size % 8 == 0 {
// adjust rsp by -8
stack_arg_size_with_padding += 8;
} else {
let rem = stack_arg_size % 16;
let stack_arg_padding = 16 - rem;
stack_arg_size_with_padding += stack_arg_padding;
}
(stack_arg_size_with_padding, stack_arg_offsets)
}
} }
...@@ -28,7 +28,6 @@ use runtime::entrypoints; ...@@ -28,7 +28,6 @@ use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint; use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass; use compiler::CompilerPass;
use compiler::backend::BackendType;
use compiler::backend::RegGroup; use compiler::backend::RegGroup;
use compiler::PROLOGUE_BLOCK_NAME; use compiler::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64; use compiler::backend::x86_64;
...@@ -3634,27 +3633,15 @@ impl<'a> InstructionSelection { ...@@ -3634,27 +3633,15 @@ impl<'a> InstructionSelection {
} }
if !stack_args.is_empty() { if !stack_args.is_empty() {
// "The end of the input argument area shall be aligned on a 16 use compiler::backend::x86_64::callconv;
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment)
let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect(); let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect();
let (stack_arg_size, _, stack_arg_offsets) = let (stack_arg_size_with_padding, stack_arg_offsets) = match conv {
BackendType::sequential_layout(&stack_arg_tys, vm); CallConvention::Mu => callconv::mu::compute_stack_args(&stack_arg_tys, vm),
CallConvention::Foreign(ForeignFFI::C) => {
let mut stack_arg_size_with_padding = stack_arg_size; callconv::c::compute_stack_args(&stack_arg_tys, vm)
if stack_arg_size % 16 == 0 {
// do not need to adjust rsp
} else if stack_arg_size % 8 == 0 {
// adjust rsp by -8
stack_arg_size_with_padding += 8;
} else {
let rem = stack_arg_size % 16;
let stack_arg_padding = 16 - rem;
stack_arg_size_with_padding += stack_arg_padding;
} }
};
// now, we just put all the args on the stack // now, we just put all the args on the stack
{ {
...@@ -4197,9 +4184,11 @@ impl<'a> InstructionSelection { ...@@ -4197,9 +4184,11 @@ impl<'a> InstructionSelection {
// arg <- RBP + 16 // arg <- RBP + 16
// return addr // return addr
// old RBP <- RBP // old RBP <- RBP
{
use compiler::backend::x86_64::callconv::mu;
let stack_arg_base_offset: i32 = 16; let stack_arg_base_offset: i32 = 16;
let arg_by_stack_tys = arg_by_stack.iter().map(|x| x.ty.clone()).collect(); let arg_by_stack_tys = arg_by_stack.iter().map(|x| x.ty.clone()).collect();
let (_, _, stack_arg_offsets) = BackendType::sequential_layout(&arg_by_stack_tys, vm); let (_, stack_arg_offsets) = mu::compute_stack_args(&arg_by_stack_tys, vm);
// unload the args // unload the args
let mut i = 0; let mut i = 0;
...@@ -4218,6 +4207,7 @@ impl<'a> InstructionSelection { ...@@ -4218,6 +4207,7 @@ impl<'a> InstructionSelection {
i += 1; i += 1;
} }
} }
}
self.backend.end_block(block_name); self.backend.end_block(block_name);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment