Commit 3954f23e authored by qinsoon's avatar qinsoon

[wip] implemented swapstack for x64, debugging on it

parent 4812aa9a
......@@ -2127,7 +2127,7 @@ impl<'a> InstructionSelection {
is_exception,
ref args
} => {
trace!("Instruction Selection on SWPASTACK-KILL");
trace!("Instruction Selection on SWAPSTACK-KILL");
self.emit_swapstack(
is_exception, // is_exception
true, // is_kill
......
......@@ -1199,7 +1199,8 @@ impl ASMCodeGen {
&mut self,
code: String,
potentially_excepting: Option<MuName>,
arguments: Vec<P<Value>>,
use_vec: Vec<P<Value>>,
def_vec: Vec<P<Value>>,
target: Option<(MuID, ASMLocation)>
) {
let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
......@@ -1207,20 +1208,13 @@ impl ASMCodeGen {
let (id, loc) = target.unwrap();
uses.insert(id, vec![loc]);
}
for arg in arguments {
uses.insert(arg.id(), vec![]);
for u in use_vec {
uses.insert(u.id(), vec![]);
}
let mut defines: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
for reg in x86_64::CALLER_SAVED_GPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
}
for reg in x86_64::CALLER_SAVED_FPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
for d in def_vec {
defines.insert(d.id(), vec![]);
}
self.add_asm_inst_internal(
......@@ -3291,13 +3285,14 @@ impl CodeGenerator for ASMCodeGen {
callsite: String,
func: MuName,
pe: Option<MuName>,
args: Vec<P<Value>>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool
) -> ValueLocation {
if is_native {
trace!("emit: call /*C*/ {}({:?})", func, args);
trace!("emit: call /*C*/ {}({:?})", func, uses);
} else {
trace!("emit: call {}({:?})", func, args);
trace!("emit: call {}({:?})", func, uses);
}
let func = if is_native {
......@@ -3312,7 +3307,7 @@ impl CodeGenerator for ASMCodeGen {
format!("call {}@PLT", func)
};
self.add_asm_call(asm, pe, args, None);
self.add_asm_call(asm, pe, uses, defs, None);
self.add_asm_global_label(symbol(mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3323,14 +3318,15 @@ impl CodeGenerator for ASMCodeGen {
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation {
trace!("emit: call {}", func);
let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("call *{}", reg);
// the call uses the register
self.add_asm_call(asm, pe, args, Some((id, loc)));
self.add_asm_call(asm, pe, uses, defs, Some((id, loc)));
self.add_asm_global_label(symbol(mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3342,7 +3338,8 @@ impl CodeGenerator for ASMCodeGen {
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation {
trace!("emit: call {}", func);
unimplemented!()
......
......@@ -19,16 +19,19 @@ pub mod mu {
pub use super::c::*;
}
pub mod swapstack {
pub use super::c::compute_arguments;
pub use super::c::compute_stack_args;
pub use super::c::compute_arguments as compute_return_values;
pub use super::c::compute_stack_args as compute_stack_retvals;
}
pub mod c {
use super::*;
/// computes arguments for the function signature,
/// returns a vector of CallConvResult for each argument type
pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> {
compute_arguments_by_type(&sig.arg_tys)
}
pub fn compute_arguments_by_type(tys: &Vec<P<MuType>>) -> Vec<CallConvResult> {
pub fn compute_arguments(tys: &Vec<P<MuType>>) -> Vec<CallConvResult> {
let mut ret = vec![];
let mut gpr_arg_count = 0;
......@@ -80,15 +83,30 @@ pub mod c {
ret
}
pub fn compute_stack_args(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_arguments(tys);
let mut stack_arg_tys = vec![];
for i in 0..callconv.len() {
let ref cc = callconv[i];
match cc {
&CallConvResult::STACK => stack_arg_tys.push(tys[i].clone()),
_ => {}
}
}
compute_stack_locations(&stack_arg_tys, vm)
}
/// computes the return values for the function signature,
/// returns a vector of CallConvResult for each return type
pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> {
pub fn compute_return_values(tys: &Vec<P<MuType>>) -> Vec<CallConvResult> {
let mut ret = vec![];
let mut gpr_ret_count = 0;
let mut fpr_ret_count = 0;
for ty in sig.ret_tys.iter() {
for ty in tys.iter() {
if RegGroup::get_from_ty(ty) == RegGroup::GPR {
if gpr_ret_count < x86_64::RETURN_GPRS.len() {
let ret_gpr = {
......@@ -131,29 +149,29 @@ pub mod c {
ret
}
pub fn compute_stack_args(sig: &MuFuncSig, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_arguments(sig);
pub fn compute_stack_retvals(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_return_values(tys);
let mut stack_arg_tys = vec![];
let mut stack_ret_val_tys = vec![];
for i in 0..callconv.len() {
let ref cc = callconv[i];
match cc {
&CallConvResult::STACK => stack_arg_tys.push(sig.arg_tys[i].clone()),
&CallConvResult::STACK => stack_ret_val_tys.push(tys[i].clone()),
_ => {}
}
}
compute_stack_args_by_type(&stack_arg_tys, vm)
compute_stack_locations(&stack_ret_val_tys, vm)
}
/// computes the return area on the stack for the function signature,
/// returns a tuple of (size, callcand offset for each stack arguments)
pub fn compute_stack_args_by_type(
stack_arg_tys: &Vec<P<MuType>>,
/// computes the area on the stack for a list of types that need to put on stack,
/// returns a tuple of (size, offset for each values on stack)
pub fn compute_stack_locations(
stack_val_tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_arg_tys, vm);
BackendType::sequential_layout(stack_val_tys, vm);
// "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
......
......@@ -229,7 +229,8 @@ pub trait CodeGenerator {
callsite: String,
func: MuName,
pe: Option<MuName>,
args: Vec<P<Value>>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool
) -> ValueLocation;
fn emit_call_near_r64(
......@@ -237,14 +238,16 @@ pub trait CodeGenerator {
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
fn emit_call_near_mem64(
&mut self,
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
fn emit_ret(&mut self);
......
......@@ -501,6 +501,18 @@ lazy_static! {
ret.extend_from_slice(&ALL_USABLE_FPRS);
ret
};
/// all the caller saved registers
pub static ref ALL_CALLER_SAVED_REGS : Vec<P<Value>> = {
let mut ret = vec![];
for r in CALLER_SAVED_GPRS.iter() {
ret.push(r.clone());
}
for r in CALLER_SAVED_FPRS.iter() {
ret.push(r.clone());
}
ret
};
}
/// creates context for each machine register in FunctionContext
......
......@@ -204,6 +204,18 @@ lazy_static! {
jit: RwLock::new(None),
};
pub static ref THROW_EXCEPTION_INTERNAL: RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![],
arg_tys: vec![ADDRESS_TYPE.clone(), ADDRESS_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("throw_exception_internal")),
jit: RwLock::new(None)
};
}
lazy_static!{
// impl/decl: math.rs
pub static ref FREM32 : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment