To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 3954f23e authored by qinsoon's avatar qinsoon
Browse files

[wip] implemented swapstack for x64, debugging on it

parent 4812aa9a
......@@ -2127,7 +2127,7 @@ impl<'a> InstructionSelection {
is_exception,
ref args
} => {
trace!("Instruction Selection on SWPASTACK-KILL");
trace!("Instruction Selection on SWAPSTACK-KILL");
self.emit_swapstack(
is_exception, // is_exception
true, // is_kill
......
......@@ -1199,7 +1199,8 @@ impl ASMCodeGen {
&mut self,
code: String,
potentially_excepting: Option<MuName>,
arguments: Vec<P<Value>>,
use_vec: Vec<P<Value>>,
def_vec: Vec<P<Value>>,
target: Option<(MuID, ASMLocation)>
) {
let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
......@@ -1207,20 +1208,13 @@ impl ASMCodeGen {
let (id, loc) = target.unwrap();
uses.insert(id, vec![loc]);
}
for arg in arguments {
uses.insert(arg.id(), vec![]);
for u in use_vec {
uses.insert(u.id(), vec![]);
}
let mut defines: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
for reg in x86_64::CALLER_SAVED_GPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
}
for reg in x86_64::CALLER_SAVED_FPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
for d in def_vec {
defines.insert(d.id(), vec![]);
}
self.add_asm_inst_internal(
......@@ -3291,13 +3285,14 @@ impl CodeGenerator for ASMCodeGen {
callsite: String,
func: MuName,
pe: Option<MuName>,
args: Vec<P<Value>>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool
) -> ValueLocation {
if is_native {
trace!("emit: call /*C*/ {}({:?})", func, args);
trace!("emit: call /*C*/ {}({:?})", func, uses);
} else {
trace!("emit: call {}({:?})", func, args);
trace!("emit: call {}({:?})", func, uses);
}
let func = if is_native {
......@@ -3312,7 +3307,7 @@ impl CodeGenerator for ASMCodeGen {
format!("call {}@PLT", func)
};
self.add_asm_call(asm, pe, args, None);
self.add_asm_call(asm, pe, uses, defs, None);
self.add_asm_global_label(symbol(mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3323,14 +3318,15 @@ impl CodeGenerator for ASMCodeGen {
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation {
trace!("emit: call {}", func);
let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("call *{}", reg);
// the call uses the register
self.add_asm_call(asm, pe, args, Some((id, loc)));
self.add_asm_call(asm, pe, uses, defs, Some((id, loc)));
self.add_asm_global_label(symbol(mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3342,7 +3338,8 @@ impl CodeGenerator for ASMCodeGen {
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation {
trace!("emit: call {}", func);
unimplemented!()
......
......@@ -19,16 +19,19 @@ pub mod mu {
pub use super::c::*;
}
pub mod swapstack {
pub use super::c::compute_arguments;
pub use super::c::compute_stack_args;
pub use super::c::compute_arguments as compute_return_values;
pub use super::c::compute_stack_args as compute_stack_retvals;
}
pub mod c {
use super::*;
/// computes arguments for the function signature,
/// returns a vector of CallConvResult for each argument type
pub fn compute_arguments(sig: &MuFuncSig) -> Vec<CallConvResult> {
compute_arguments_by_type(&sig.arg_tys)
}
pub fn compute_arguments_by_type(tys: &Vec<P<MuType>>) -> Vec<CallConvResult> {
pub fn compute_arguments(tys: &Vec<P<MuType>>) -> Vec<CallConvResult> {
let mut ret = vec![];
let mut gpr_arg_count = 0;
......@@ -80,15 +83,30 @@ pub mod c {
ret
}
pub fn compute_stack_args(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_arguments(tys);
let mut stack_arg_tys = vec![];
for i in 0..callconv.len() {
let ref cc = callconv[i];
match cc {
&CallConvResult::STACK => stack_arg_tys.push(tys[i].clone()),
_ => {}
}
}
compute_stack_locations(&stack_arg_tys, vm)
}
/// computes the return values for the function signature,
/// returns a vector of CallConvResult for each return type
pub fn compute_return_values(sig: &MuFuncSig) -> Vec<CallConvResult> {
pub fn compute_return_values(tys: &Vec<P<MuType>>) -> Vec<CallConvResult> {
let mut ret = vec![];
let mut gpr_ret_count = 0;
let mut fpr_ret_count = 0;
for ty in sig.ret_tys.iter() {
for ty in tys.iter() {
if RegGroup::get_from_ty(ty) == RegGroup::GPR {
if gpr_ret_count < x86_64::RETURN_GPRS.len() {
let ret_gpr = {
......@@ -131,29 +149,29 @@ pub mod c {
ret
}
pub fn compute_stack_args(sig: &MuFuncSig, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_arguments(sig);
pub fn compute_stack_retvals(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_return_values(tys);
let mut stack_arg_tys = vec![];
let mut stack_ret_val_tys = vec![];
for i in 0..callconv.len() {
let ref cc = callconv[i];
match cc {
&CallConvResult::STACK => stack_arg_tys.push(sig.arg_tys[i].clone()),
&CallConvResult::STACK => stack_ret_val_tys.push(tys[i].clone()),
_ => {}
}
}
compute_stack_args_by_type(&stack_arg_tys, vm)
compute_stack_locations(&stack_ret_val_tys, vm)
}
/// computes the return area on the stack for the function signature,
/// returns a tuple of (size, callcand offset for each stack arguments)
pub fn compute_stack_args_by_type(
stack_arg_tys: &Vec<P<MuType>>,
/// computes the area on the stack for a list of types that need to put on stack,
/// returns a tuple of (size, offset for each values on stack)
pub fn compute_stack_locations(
stack_val_tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_arg_tys, vm);
BackendType::sequential_layout(stack_val_tys, vm);
// "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI
......
......@@ -229,7 +229,8 @@ pub trait CodeGenerator {
callsite: String,
func: MuName,
pe: Option<MuName>,
args: Vec<P<Value>>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool
) -> ValueLocation;
fn emit_call_near_r64(
......@@ -237,14 +238,16 @@ pub trait CodeGenerator {
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
fn emit_call_near_mem64(
&mut self,
callsite: String,
func: &P<Value>,
pe: Option<MuName>,
args: Vec<P<Value>>
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
) -> ValueLocation;
fn emit_ret(&mut self);
......
......@@ -33,13 +33,16 @@ use compiler::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
use compiler::backend::x86_64::callconv;
use compiler::backend::x86_64::callconv::CallConvResult;
use compiler::backend::x86_64::CALLEE_SAVED_COUNT;
use compiler::backend::make_block_name;
use compiler::machine_code::CompiledFunction;
use compiler::frame::Frame;
use utils::math;
use utils::{POINTER_SIZE, WORD_SIZE};
use utils::BitSize;
use utils::{BitSize, ByteSize};
use std::collections::HashMap;
use std::collections::LinkedList;
......@@ -1895,8 +1898,8 @@ impl<'a> InstructionSelection {
};
let tmp_stack_arg_size = {
use compiler::backend::x86_64::callconv;
let (size, _) = callconv::mu::compute_stack_args(&sig, vm);
use compiler::backend::x86_64::callconv::swapstack;
let (size, _) = swapstack::compute_stack_args(&sig.arg_tys, vm);
self.make_int_const(size as u64, vm)
};
......@@ -1956,7 +1959,7 @@ impl<'a> InstructionSelection {
// 2. the register arguments will be pushed to current SP, the start
// function will consume them.
{
use compiler::backend::x86_64::callconv;
use compiler::backend::x86_64::callconv::swapstack;
use compiler::backend::x86_64::callconv::CallConvResult;
use compiler::backend::x86_64::{ARGUMENT_GPRS, ARGUMENT_FPRS};
......@@ -1965,7 +1968,7 @@ impl<'a> InstructionSelection {
// compute call convention
let arg_tys = arg_values.iter().map(|x| x.ty.clone()).collect();
let callconv = callconv::mu::compute_arguments_by_type(&arg_tys);
let callconv = swapstack::compute_arguments(&arg_tys);
let mut gpr_args = vec![];
let mut fpr_args = vec![];
......@@ -2080,6 +2083,65 @@ impl<'a> InstructionSelection {
);
}
Instruction_::SwapStackExpr {
stack,
is_exception,
ref args
} => {
trace!("instsel on SWAPSTACK_EXPR");
self.emit_swapstack(
is_exception,
false,
&node,
&inst,
stack,
args,
None,
f_content,
f_context,
vm
);
}
Instruction_::SwapStackExc {
stack,
is_exception,
ref args,
ref resume
} => {
trace!("instsel on SWAPSTACK_EXC");
self.emit_swapstack(
is_exception,
false,
&node,
&inst,
stack,
args,
Some(resume),
f_content,
f_context,
vm
);
}
Instruction_::SwapStackKill {
stack,
is_exception,
ref args
} => {
trace!("instsel on SWAPSTACK_KILL");
self.emit_swapstack(
is_exception,
true,
&node,
&inst,
stack,
args,
None,
f_content,
f_context,
vm
);
}
Instruction_::PrintHex(index) => {
trace!("instsel on PRINTHEX");
......@@ -2180,6 +2242,106 @@ impl<'a> InstructionSelection {
})
}
/// makes a symbolic memory operand for global values
fn make_memory_symbolic_global(
&mut self,
name: MuName,
ty: P<MuType>,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, true, false, f_context, vm)
}
/// makes a symbolic memory operand for native values
fn make_memory_symbolic_native(
&mut self,
name: MuName,
ty: P<MuType>,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, false, true, f_context, vm)
}
/// makes a symbolic memory operand for a normal value (not global, not native)
fn make_memory_symbolic_normal(
&mut self,
name: MuName,
ty: P<MuType>,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, false, false, f_context, vm)
}
/// makes a symbolic memory operand
fn make_memory_symbolic(
&mut self,
name: MuName,
ty: P<MuType>,
is_global: bool,
is_native: bool,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
if cfg!(feature = "sel4-rumprun") {
// Same as Linux:
// for a(%RIP), we need to load its address from a@GOTPCREL(%RIP)
// then load from the address.
// asm_backend will emit a@GOTPCREL(%RIP) for a(%RIP)
let got_loc = P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ADDRESS_TYPE.clone(),
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
label: name,
is_global: is_global,
is_native: is_native
})
});
// mov (got_loc) -> actual_loc
let actual_loc = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_move_value_to_value(&actual_loc, &got_loc);
self.make_memory_op_base_offset(&actual_loc, 0, ty, vm)
} else if cfg!(target_os = "macos") {
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty,
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
label: name,
is_global: is_global,
is_native: is_native
})
})
} else if cfg!(target_os = "linux") {
// for a(%RIP), we need to load its address from a@GOTPCREL(%RIP)
// then load from the address.
// asm_backend will emit a@GOTPCREL(%RIP) for a(%RIP)
let got_loc = P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ADDRESS_TYPE.clone(),
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
label: name,
is_global: is_global,
is_native: is_native
})
});
// mov (got_loc) -> actual_loc
let actual_loc = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.emit_move_value_to_value(&actual_loc, &got_loc);
self.make_memory_op_base_offset(&actual_loc, 0, ty, vm)
} else {
panic!("unsupported OS")
}
}
/// makes a memory operand P<Value> from MemoryLocation
fn make_memory_from_location(&mut self, loc: MemoryLocation, vm: &VM) -> P<Value> {
P(Value {
......@@ -3777,17 +3939,40 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext,
vm: &VM
) -> (usize, Vec<P<Value>>) {
use compiler::backend::x86_64::callconv;
use compiler::backend::x86_64::callconv::CallConvResult;
let callconv = {
match conv {
CallConvention::Mu => callconv::mu::compute_arguments(sig),
CallConvention::Foreign(ForeignFFI::C) => callconv::c::compute_arguments(sig)
CallConvention::Mu => callconv::mu::compute_arguments(&sig.arg_tys),
CallConvention::Foreign(ForeignFFI::C) => {
callconv::c::compute_arguments(&sig.arg_tys)
}
}
};
assert!(callconv.len() == args.len());
let (reg_args, stack_args) =
self.emit_precall_convention_regs_only(args, &callconv, f_context, vm);
if !stack_args.is_empty() {
// store stack arguments
let size = self.emit_store_stack_values(&stack_args, None, conv, vm);
// offset RSP
self.backend.emit_sub_r_imm(&x86_64::RSP, size as i32);
(size, reg_args)
} else {
(0, reg_args)
}
}
/// emits calling convention to pass argument registers before a call instruction
/// returns a tuple of (machine registers used, pass-by-stack arguments)
fn emit_precall_convention_regs_only(
&mut self,
args: &Vec<P<Value>>,
callconv: &Vec<CallConvResult>,
f_context: &mut FunctionContext,
vm: &VM
) -> (Vec<P<Value>>, Vec<P<Value>>) {
let mut stack_args = vec![];
let mut reg_args = vec![];
......@@ -3845,43 +4030,58 @@ impl<'a> InstructionSelection {
}
}
if !stack_args.is_empty() {
use compiler::backend::x86_64::callconv;
(reg_args, stack_args)
}
let stack_arg_tys = stack_args.iter().map(|x| x.ty.clone()).collect();
let (stack_arg_size_with_padding, stack_arg_offsets) = match conv {
CallConvention::Mu => callconv::mu::compute_stack_args_by_type(&stack_arg_tys, vm),
CallConvention::Foreign(ForeignFFI::C) => {
callconv::c::compute_stack_args_by_type(&stack_arg_tys, vm)
}
};
/// emits code that store values to the stack, returns the space required on the stack
/// * if base is None, save values to RSP (starting from RSP-stack_val_size),
/// growing upwards (to higher stack address)
/// * if base is Some, save values to base, growing upwards
fn emit_store_stack_values(
&mut self,
stack_vals: &Vec<P<Value>>,
base: Option<(&P<Value>, i32)>,
conv: CallConvention,
vm: &VM
) -> ByteSize {
use compiler::backend::x86_64::callconv;
// now, we just put all the args on the stack
{
if stack_arg_size_with_padding != 0 {
let mut index = 0;
let stack_arg_tys = stack_vals.iter().map(|x| x.ty.clone()).collect();
let (stack_arg_size_with_padding, stack_arg_offsets) = match conv {
CallConvention::Mu => callconv::mu::compute_stack_locations(&stack_arg_tys, vm),
CallConvention::Foreign(ForeignFFI::C) => {
callconv::c::compute_stack_locations(&stack_arg_tys, vm)
}
};
let rsp_offset_before_call = -(stack_arg_size_with_padding as i32);
// now, we just put all the args on the stack
{
if stack_arg_size_with_padding != 0 {
let mut index = 0;
let rsp_offset_before_call = -(stack_arg_size_with_padding as i32);
for arg in stack_args {
for arg in stack_vals {
if let Some((base, offset)) = base {
self.emit_store_base_offset(
base,
offset + (stack_arg_offsets[index]) as i32,
&arg,
vm
);
} else {
self.emit_store_base_offset(
&x86_64::RSP,
rsp_offset_before_call + (stack_arg_offsets[index]) as i32,
rsp_offset_before_call + (stack_arg_offsets[index] as i32),
&arg,
vm
);
index += 1;
}
self.backend
.emit_sub_r_imm(&x86_64::RSP, stack_arg_size_with_padding as i32);
index += 1;
}
}
(stack_arg_size_with_padding, reg_args)
} else {
(0, reg_args)
}
stack_arg_size_with_padding
}
/// emits calling convention after a call instruction
......@@ -3896,18 +4096,14 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext,
vm: &VM
) -> Vec<P<Value>> {
use compiler::backend::x86_64::callconv;
use compiler::backend::x86_64::callconv::CallConvResult;
let callconv = {
match conv {
CallConvention::Mu => callconv::mu::compute_return_values(sig),
CallConvention::Foreign(ForeignFFI::C) => callconv::c::compute_return_values(sig)
CallConvention::Mu => callconv::mu::compute_return_values(&sig.ret_tys),
CallConvention::Foreign(ForeignFFI::C) => {
callconv::c::compute_return_values(&sig.ret_tys)
}
}
};
if rets.is_some() {
assert!(callconv.len() == rets.as_ref().unwrap().len());