GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 36b2a71c authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Update exception handling to fix bugs (and make new ones!)

parent 031f7018
Pipeline #679 passed with stages
in 24 minutes and 56 seconds
......@@ -34,7 +34,7 @@ gcc = "*"
ast = {path = "src/ast"}
utils = {path = "src/utils"}
gc = {path = "src/gc"}
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
libc="*"
field-offset = "*"
libloading = "*"
......
......@@ -29,6 +29,7 @@ rm -rf $MU_ZEBU/tests/test_jit/emit
#cargo clean
cargo test --release --no-run --color=always 2>&1 | tee build_out.txt
$(exit ${PIPESTATUS[0]}) # this command will exit the shell but only if the above cargo test failed
/usr/bin/time -f "finished in %e secs" -a -o cargo_test_out.txt ./test-release --color=always 2>/dev/null | tee cargo_test_out.txt
......@@ -53,4 +54,4 @@ else
git -C ./RPySOM submodule update
fi
pytest test_*.py -v --color=yes 2>&1 | tee $MU_ZEBU/pytest_out.txt
\ No newline at end of file
pytest test_*.py -v --color=yes 2>&1 | tee $MU_ZEBU/pytest_out.txt
......@@ -25,4 +25,4 @@ utils = {path = "../utils"}
lazy_static = "*"
log = "*"
simple_logger = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
......@@ -131,7 +131,17 @@ pub struct MuFunctionVersion {
pub block_trace: Option<Vec<MuID>> // only available after Trace Generation Pass
}
rodal_struct!(Callsite{name, exception_destination, stack_arg_size});
pub struct Callsite {
pub name: MuName,
pub exception_destination: Option<MuName>,
pub stack_arg_size: usize,
}
impl Callsite {
pub fn new(name: MuName, exception_destination: Option<MuName>, stack_arg_size: usize)->Callsite {
Callsite{name: name, exception_destination: exception_destination, stack_arg_size: stack_arg_size}
}
}
impl fmt::Display for MuFunctionVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FuncVer {} of Func #{}", self.hdr, self.func_id)
......
......@@ -793,8 +793,8 @@ pub fn get_return_address(frame_pointer: Address) -> Address {
// Gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address) -> Address {
frame_pointer.plus(16)
pub fn get_previous_stack_pointer(frame_pointer: Address, stack_arg_size: usize) -> Address {
frame_pointer.plus(16 + stack_arg_size)
}
#[inline(always)]
......
......@@ -125,9 +125,10 @@ pub struct InstructionSelection {
current_block_in_ir: Option<MuName>,
current_func_start: Option<ValueLocation>,
// Technically this is a map in that each Key is unique, but we will never try and add duplicate
// keys, or look things up, so a list of pairs is faster than a Map.
// A list of pairs, the first is the name of a callsite the second
current_callsites: LinkedList<(MuName, MuID)>,
// keys, or look things up, so a list of tuples is faster than a Map.
// A list of tuples, the first is the name of a callsite, the next is the callsite destination, the last is the
// size of arguments pushed on the stack
current_callsites: LinkedList<(MuName, MuID, usize)>,
// key: block id, val: block location
current_exn_blocks: HashMap<MuID, MuName>,
......@@ -3186,7 +3187,7 @@ impl <'a> InstructionSelection {
self.backend.emit_call_near_rel32(callsite.clone(), func_name, None); // assume ccall wont throw exception
// TODO: What if theres an exception block?
self.current_callsites.push_back((callsite, 0));
self.current_callsites.push_back((callsite, 0, stack_arg_size));
// record exception block (CCall may have an exception block)
if cur_node.is_some() {
......@@ -3368,7 +3369,7 @@ impl <'a> InstructionSelection {
let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
let target_block = exn_dest.target;
self.current_callsites.push_back((callsite.to_relocatable(), target_block));
self.current_callsites.push_back((callsite.to_relocatable(), target_block, stack_arg_size));
// insert an intermediate block to branch to normal
// the branch is inserted later (because we need to deal with postcall convention)
......@@ -3376,7 +3377,7 @@ impl <'a> InstructionSelection {
let fv_id = self.current_fv_id;
self.start_block(format!("normal_cont_for_call_{}_{}", fv_id, cur_node.id()));
} else {
self.current_callsites.push_back((callsite.to_relocatable(), 0));
self.current_callsites.push_back((callsite.to_relocatable(), 0, stack_arg_size));
}
// deal with ret vals, collapse stack etc.
......@@ -4937,14 +4938,14 @@ impl CompilerPass for InstructionSelection {
Some(frame) => frame,
None => panic!("no current_frame for function {} that is being compiled", func_name)
};
for &(ref callsite, block_id) in self.current_callsites.iter() {
for &(ref callsite, block_id, stack_arg_size) in self.current_callsites.iter() {
let block_loc = if block_id == 0 {
String::new()
None
} else {
self.current_exn_blocks.get(&block_id).unwrap().clone()
Some(self.current_exn_blocks.get(&block_id).unwrap().clone())
};
vm.add_exception_callsite(callsite.clone(), block_loc, self.current_fv_id);
vm.add_exception_callsite(Callsite::new(callsite.clone(), block_loc, stack_arg_size), self.current_fv_id);
}
......
......@@ -481,8 +481,8 @@ pub fn get_return_address(frame_pointer: Address) -> Address {
// Gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address) -> Address {
frame_pointer.plus(16)
pub fn get_previous_stack_pointer(frame_pointer: Address, stack_arg_size: usize) -> Address {
frame_pointer.plus(16 + stack_arg_size)
}
#[inline(always)]
......
......@@ -43,7 +43,6 @@ pub struct Frame {
pub allocated: HashMap<MuID, FrameSlot>,
// Maping from callee saved id (i.e. the position in the list of callee saved registers) and offset from the frame pointer
pub callee_saved: HashMap<isize, isize>,
// (callsite, destination address)
}
impl fmt::Display for Frame {
......
......@@ -18,6 +18,9 @@ use compiler::frame::*;
use runtime::ValueLocation;
use rodal;
use utils::Address;
use std::sync::Arc;
use runtime::resolve_symbol;
use std::ops;
use std::collections::HashMap;
use std::collections::HashSet;
......@@ -87,6 +90,27 @@ impl CompiledFunction {
}
}
// Contains information about a callsite (needed for exception handling)
pub struct CompiledCallsite {
pub exceptional_destination: Option<Address>,
pub stack_args_size: usize,
pub callee_saved_registers: Arc<HashMap<isize, isize>>,
pub function_version: MuID
}
impl CompiledCallsite {
pub fn new(callsite: &Callsite, fv: MuID, callee_saved_registers: Arc<HashMap<isize, isize>>) -> CompiledCallsite {
CompiledCallsite {
exceptional_destination: match &callsite.exception_destination {
&Some(ref name) => Some(resolve_symbol(name.clone())),
&None => None
},
stack_args_size: callsite.stack_arg_size,
callee_saved_registers: callee_saved_registers,
function_version: fv,
}
}
}
use std::any::Any;
pub trait MachineCode {
......
......@@ -38,4 +38,4 @@ simple_logger = "*"
aligned_alloc = "*"
crossbeam = "*"
field-offset = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
......@@ -15,6 +15,9 @@
use compiler::backend::*;
use utils::Address;
use utils::POINTER_SIZE;
use std::collections::HashMap;
use std::ops::Deref;
use compiler::machine_code::CompiledCallsite;
use runtime::*;
// muentry_throw_exception should call this function,
......@@ -50,30 +53,31 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
let mut previous_frame_pointer = get_previous_frame_pointer(current_frame_pointer); // thrower::fp, the starting point of the previous frame
// acquire lock for exception table
let compiled_exception_table = vm.compiled_exception_table.read().unwrap();
let compiled_callsite_table = vm.compiled_callsite_table.read().unwrap();
loop {
// Lookup the table for the callsite
trace!("Callsite: 0x{:x}", callsite);
trace!("\tprevious_frame_pointer: 0x{:x}", previous_frame_pointer);
trace!("\tcurrent_frame_pointer: 0x{:x}", current_frame_pointer);
let &(catch_address, compiled_func) = {
let table_entry = compiled_exception_table.get(&callsite);
//CompiledCallsite
let callsite_info = {
let table_entry = compiled_callsite_table.get(&callsite);
if table_entry.is_none() {
error!("Cannot find Mu callsite (i.e. we have reached a native frame), either there isn't a catch block to catch the exception or your catch block is above a native function call");
print_backtrace(frame_cursor);
// The above function will not return
print_backtrace(frame_cursor, compiled_callsite_table.deref()); // This function may segfault
panic!("Uncaught Mu Exception");
}
table_entry.unwrap()
};
// Check for a catch block at this callsite (there won't be one on the first iteration of this loop)
if !catch_address.is_zero() {
if callsite_info.exceptional_destination.is_some() {
let catch_address = callsite_info.exceptional_destination.unwrap();
trace!("Found catch block: 0x{:x}", catch_address);
let sp = get_previous_stack_pointer(current_frame_pointer);
let sp = get_previous_stack_pointer(current_frame_pointer, callsite_info.stack_args_size);
trace!("\tRestoring SP to: 0x{:x}", sp);
if cfg!(debug_assertions) {
......@@ -82,15 +86,13 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
}
// Found a catch block, branch to it
drop(compiled_exception_table); // drop the lock first
// drop(compiled_callsite_table); // TODO: Work out how to make the borrow checker let me do this
unsafe { thread::exception_restore(catch_address, frame_cursor.to_ptr(), sp); }
}
// Restore callee saved registers
unsafe {
let ref cf = *compiled_func;
let ref callee_saved = cf.frame.callee_saved;
for (target_offset, source_offset) in callee_saved {
for (target_offset, source_offset) in callsite_info.callee_saved_registers.iter() {
// *(frame_cursor + target_offset) = *(frame_pointer + source_offset)
let val = previous_frame_pointer.offset(*source_offset).load::<Address>();
frame_cursor.offset(*target_offset).store::<Address>(val);
......@@ -121,32 +123,28 @@ fn print_frame(base: Address) {
}
}
// This function may segfault or panic when it reaches the bottom of the stack
// This function may segfault when it reaches the bottom of the stack
// (TODO: Determine where the bottom is without segfaulting)
fn print_backtrace(base: Address) -> !{
fn print_backtrace(base: Address, compiled_callsite_table: &HashMap<Address, CompiledCallsite>) {
error!("BACKTRACE: ");
let cur_thread = thread::MuThread::current();
let ref vm = cur_thread.vm;
// compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>;
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut frame_pointer = base;
let mut frame_count = 0;
let compiled_exception_table = vm.compiled_exception_table.read().unwrap();
loop {
let callsite = get_return_address(frame_pointer);
if compiled_exception_table.contains_key(&callsite) {
let &(_, compiled_func_ptr) = compiled_exception_table.get(&callsite).unwrap();
if compiled_callsite_table.contains_key(&callsite) {
let function_version = compiled_callsite_table.get(&callsite).unwrap().function_version;
let compiled_func = compiled_funcs.get(&function_version).unwrap().read().unwrap();
unsafe {
let ref compiled_func = *compiled_func_ptr;
error!("\tframe {:2}: 0x{:x} - {} (fid: #{}, fvid: #{}) at 0x{:x}", frame_count,
compiled_func.start.to_address(), vm.name_of(compiled_func.func_id),
compiled_func.func_id, compiled_func.func_ver_id, callsite);
}
error!("\tframe {:2}: 0x{:x} - {} (fid: #{}, fvid: #{}) at 0x{:x}", frame_count,
compiled_func.start.to_address(), vm.name_of(compiled_func.func_id),
compiled_func.func_id, compiled_func.func_ver_id, callsite);
} else {
let (func_name, func_start) = get_function_info(callsite);
error!("\tframe {:2}: 0x{:x} - {} at 0x{:x}", frame_count, func_start, func_name, callsite);
......@@ -154,7 +152,7 @@ fn print_backtrace(base: Address) -> !{
frame_pointer = get_previous_frame_pointer(frame_pointer);
if frame_pointer.is_zero() {
panic!("Uncaught Mu Exception");
return;
}
frame_count += 1;
}
......
......@@ -394,8 +394,7 @@ impl MuThread {
pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) -> bool {
use std::usize;
// build exception table
vm.build_exception_table();
vm.build_callsite_table();
if ! unsafe{muentry_get_thread_local()}.is_zero() {
warn!("current thread has a thread local (has a muthread to it)");
......
......@@ -24,5 +24,5 @@ crate-type = ["rlib"]
memmap = "*"
memsec = "0.1.9"
byteorder = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
log = "*"
......@@ -23,7 +23,7 @@ use ast::types::*;
use compiler::{Compiler, CompilerPolicy};
use compiler::backend;
use compiler::backend::BackendTypeInfo;
use compiler::machine_code::CompiledFunction;
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
use runtime::thread::*;
use runtime::*;
......@@ -65,9 +65,9 @@ pub struct VM { // The comments are the offset into the struct
// ---partially serialize---
compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
// Maps each callsite to a tuple of the corresponding catch blocks label (or ""_
// and the id of the containing function-version
exception_table: RwLock<HashMap<MuID, HashMap<MuName, MuName>>>, // +784
// Match each functions version to a map, mapping each of it's containing callsites
// to the name of the catch block
callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
is_running: AtomicBool, // +952
// ---do not serialize---
......@@ -78,13 +78,10 @@ pub struct VM { // The comments are the offset into the struct
// however the store may happen before we have an actual address to the func (in AOT scenario)
aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,
// TODO: What should the function version refer to? (It has to refer to something that has callee saved registers...)
// TODO: probably we should remove the pointer (its unsafe), thats why we need Sync/Send for VM
// we can make a copy of callee_saved_register location
pub compiled_exception_table: RwLock<HashMap<Address, (Address, *const CompiledFunction)>> // 896
pub compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>> // 896
}
unsafe impl rodal::Dump for VM {
fn dump<D: ?Sized + rodal::Dumper>(&self, dumper: &mut D) {
fn dump<D: ? Sized + rodal::Dumper>(&self, dumper: &mut D) {
dumper.debug_record("VM", "dump");
dumper.dump_object(&self.next_id);
......@@ -99,7 +96,7 @@ unsafe impl rodal::Dump for VM {
dumper.dump_object(&self.primordial);
dumper.dump_object(&self.vm_options);
dumper.dump_object(&self.compiled_funcs);
dumper.dump_object(&self.exception_table);
dumper.dump_object(&self.callsite_table);
// Dump empty maps so that we can safely read and modify them once loaded
dumper.dump_padding(&self.global_locations);
......@@ -112,17 +109,14 @@ unsafe impl rodal::Dump for VM {
dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<Address, ValueLocation>::new()));
// Dump an emepty hashmap for the other hashmaps
dumper.dump_padding(&self.compiled_exception_table);
dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<Address, (Address, *const CompiledFunction)>::new()));
dumper.dump_padding(&self.compiled_callsite_table);
dumper.dump_object_here(&RwLock::new(rodal::EmptyHashMap::<Address, CompiledCallsite>::new()));
// This field is actually stored at the end of the struct, the others all have the same allignment so are not reordered
dumper.dump_object(&self.is_running);
}
}
unsafe impl Sync for VM {}
unsafe impl Send for VM {}
use std::u64;
const PENDING_FUNCREF : u64 = u64::MAX;
......@@ -174,11 +168,11 @@ impl <'a> VM {
func_vers: RwLock::new(HashMap::new()),
funcs: RwLock::new(HashMap::new()),
compiled_funcs: RwLock::new(HashMap::new()),
exception_table: RwLock::new(HashMap::new()),
callsite_table: RwLock::new(HashMap::new()),
primordial: RwLock::new(None),
aot_pending_funcref_store: RwLock::new(HashMap::new()),
compiled_exception_table: RwLock::new(HashMap::new()),
compiled_callsite_table: RwLock::new(HashMap::new()),
};
// insert all internal types
......@@ -262,16 +256,13 @@ impl <'a> VM {
}
}
pub fn add_exception_callsite(&self, callsite: MuName, catch: MuName, fv: MuID) {
let mut table = self.exception_table.write().unwrap();
pub fn add_exception_callsite(&self, callsite: Callsite, fv: MuID) {
let mut table = self.callsite_table.write().unwrap();
if table.contains_key(&fv) {
let mut map = table.get_mut(&fv).unwrap();
map.insert(callsite, catch);
table.get_mut(&fv).unwrap().push(callsite);
} else {
let mut new_map = HashMap::new();
new_map.insert(callsite, catch);
table.insert(fv, new_map);
table.insert(fv, vec![callsite]);
};
}
......@@ -311,27 +302,21 @@ impl <'a> VM {
}
// construct exception table
vm.build_exception_table();
vm.build_callsite_table();
vm
}
pub fn build_exception_table(&self) {
let exception_table = self.exception_table.read().unwrap();
pub fn build_callsite_table(&self) {
let callsite_table = self.callsite_table.read().unwrap();
let compiled_funcs = self.compiled_funcs.read().unwrap();
let mut compiled_exception_table = self.compiled_exception_table.write().unwrap();
for (fv, map) in exception_table.iter() {
let ref compiled_func = *compiled_funcs.get(fv).unwrap().read().unwrap();
for (callsite, catch_block) in map.iter() {
let catch_addr = if catch_block.is_empty() {
unsafe {Address::zero()}
} else {
resolve_symbol(catch_block.clone())
};
let mut compiled_callsite_table = self.compiled_callsite_table.write().unwrap();
compiled_exception_table.insert(resolve_symbol(callsite.clone()), (catch_addr, &*compiled_func));
for (fv, callsite_list) in callsite_table.iter() {
let compiled_func = compiled_funcs.get(fv).unwrap().read().unwrap();
let callee_saved_table = Arc::new(compiled_func.frame.callee_saved.clone());
for callsite in callsite_list.iter() {
compiled_callsite_table.insert(resolve_symbol(callsite.name.clone()), CompiledCallsite::new(&callsite, compiled_func.func_ver_id, callee_saved_table.clone()));
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment