Commit b70468ea authored by qinsoon's avatar qinsoon

more refactoring on x86_64 callconv

parent 34025009
...@@ -3,6 +3,9 @@ use ast::ptr::*; ...@@ -3,6 +3,9 @@ use ast::ptr::*;
use ast::types::*; use ast::types::*;
use compiler::backend::RegGroup; use compiler::backend::RegGroup;
use compiler::backend::x86_64; use compiler::backend::x86_64;
use compiler::backend::BackendType;
use utils::ByteSize;
use vm::VM;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum CallConvResult { pub enum CallConvResult {
...@@ -19,6 +22,8 @@ pub mod mu { ...@@ -19,6 +22,8 @@ pub mod mu {
pub mod c { pub mod c {
use super::*; use super::*;
/// computes arguments for the function signature,
/// returns a vector of CallConvResult for each argument type
pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> { pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> {
let mut ret = vec![]; let mut ret = vec![];
...@@ -71,6 +76,8 @@ pub mod c { ...@@ -71,6 +76,8 @@ pub mod c {
ret ret
} }
/// computes the return values for the function signature,
/// returns a vector of CallConvResult for each return type
pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> { pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> {
let mut ret = vec![]; let mut ret = vec![];
...@@ -119,4 +126,33 @@ pub mod c { ...@@ -119,4 +126,33 @@ pub mod c {
ret ret
} }
}
\ No newline at end of file /// computes the return area on the stack for the function signature,
/// returns a tuple of (size, callcand offset for each stack arguments)
pub fn compute_stack_args(
stack_arg_tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_arg_tys, vm);
// "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment)
let mut stack_arg_size_with_padding = stack_arg_size;
if stack_arg_size % 16 == 0 {
// do not need to adjust rsp
} else if stack_arg_size % 8 == 0 {
// adjust rsp by -8
stack_arg_size_with_padding += 8;
} else {
let rem = stack_arg_size % 16;
let stack_arg_padding = 16 - rem;
stack_arg_size_with_padding += stack_arg_padding;
}
(stack_arg_size_with_padding, stack_arg_offsets)
}
}
...@@ -28,7 +28,6 @@ use runtime::entrypoints; ...@@ -28,7 +28,6 @@ use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint; use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass; use compiler::CompilerPass;
use compiler::backend::BackendType;
use compiler::backend::RegGroup; use compiler::backend::RegGroup;
use compiler::PROLOGUE_BLOCK_NAME; use compiler::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64; use compiler::backend::x86_64;
...@@ -3634,27 +3633,15 @@ impl<'a> InstructionSelection { ...@@ -3634,27 +3633,15 @@ impl<'a> InstructionSelection {
} }
if !stack_args.is_empty() { if !stack_args.is_empty() {
// "The end of the input argument area shall be aligned on a 16 use compiler::backend::x86_64::callconv;
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment)
let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect(); let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect();
let (stack_arg_size, _, stack_arg_offsets) = let (stack_arg_size_with_padding, stack_arg_offsets) = match conv {
BackendType::sequential_layout(&stack_arg_tys, vm); CallConvention::Mu => callconv::mu::compute_stack_args(&stack_arg_tys, vm),
CallConvention::Foreign(ForeignFFI::C) => {
let mut stack_arg_size_with_padding = stack_arg_size; callconv::c::compute_stack_args(&stack_arg_tys, vm)
}
if stack_arg_size % 16 == 0 { };
// do not need to adjust rsp
} else if stack_arg_size % 8 == 0 {
// adjust rsp by -8
stack_arg_size_with_padding += 8;
} else {
let rem = stack_arg_size % 16;
let stack_arg_padding = 16 - rem;
stack_arg_size_with_padding += stack_arg_padding;
}
// now, we just put all the args on the stack // now, we just put all the args on the stack
{ {
...@@ -4197,25 +4184,28 @@ impl<'a> InstructionSelection { ...@@ -4197,25 +4184,28 @@ impl<'a> InstructionSelection {
// arg <- RBP + 16 // arg <- RBP + 16
// return addr // return addr
// old RBP <- RBP // old RBP <- RBP
let stack_arg_base_offset: i32 = 16; {
let arg_by_stack_tys = arg_by_stack.iter().map(|x| x.ty.clone()).collect(); use compiler::backend::x86_64::callconv::mu;
let (_, _, stack_arg_offsets) = BackendType::sequential_layout(&arg_by_stack_tys, vm); let stack_arg_base_offset: i32 = 16;
let arg_by_stack_tys = arg_by_stack.iter().map(|x| x.ty.clone()).collect();
// unload the args let (_, stack_arg_offsets) = mu::compute_stack_args(&arg_by_stack_tys, vm);
let mut i = 0;
for arg in arg_by_stack { // unload the args
let stack_slot = self.emit_load_base_offset( let mut i = 0;
&arg, for arg in arg_by_stack {
&x86_64::RBP, let stack_slot = self.emit_load_base_offset(
(stack_arg_base_offset + stack_arg_offsets[i] as i32), &arg,
vm &x86_64::RBP,
); (stack_arg_base_offset + stack_arg_offsets[i] as i32),
self.current_frame vm
.as_mut() );
.unwrap() self.current_frame
.add_argument_by_stack(arg.id(), stack_slot); .as_mut()
.unwrap()
.add_argument_by_stack(arg.id(), stack_slot);
i += 1; i += 1;
}
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment