WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 36b2a71c authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Update exception handling to fix bugs (and make new ones!)

parent 031f7018
......@@ -34,7 +34,7 @@ gcc = "*"
ast = {path = "src/ast"}
utils = {path = "src/utils"}
gc = {path = "src/gc"}
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
libc="*"
field-offset = "*"
libloading = "*"
......
......@@ -29,6 +29,7 @@ rm -rf $MU_ZEBU/tests/test_jit/emit
#cargo clean
cargo test --release --no-run --color=always 2>&1 | tee build_out.txt
$(exit ${PIPESTATUS[0]}) # this command will exit the shell but only if the above cargo test failed
/usr/bin/time -f "finished in %e secs" -a -o cargo_test_out.txt ./test-release --color=always 2>/dev/null | tee cargo_test_out.txt
......@@ -53,4 +54,4 @@ else
git -C ./RPySOM submodule update
fi
pytest test_*.py -v --color=yes 2>&1 | tee $MU_ZEBU/pytest_out.txt
\ No newline at end of file
pytest test_*.py -v --color=yes 2>&1 | tee $MU_ZEBU/pytest_out.txt
......@@ -25,4 +25,4 @@ utils = {path = "../utils"}
lazy_static = "*"
log = "*"
simple_logger = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
......@@ -131,7 +131,17 @@ pub struct MuFunctionVersion {
pub block_trace: Option<Vec<MuID>> // only available after Trace Generation Pass
}
rodal_struct!(Callsite{name, exception_destination, stack_arg_size});
pub struct Callsite {
pub name: MuName,
pub exception_destination: Option<MuName>,
pub stack_arg_size: usize,
}
impl Callsite {
pub fn new(name: MuName, exception_destination: Option<MuName>, stack_arg_size: usize)->Callsite {
Callsite{name: name, exception_destination: exception_destination, stack_arg_size: stack_arg_size}
}
}
impl fmt::Display for MuFunctionVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "FuncVer {} of Func #{}", self.hdr, self.func_id)
......
......@@ -61,7 +61,7 @@ pub struct InstructionSelection {
// Technically this is a map in that each Key is unique, but we will never try and add duplicate
// keys, or look things up, so a list of pairs is faster than a Map.
current_callsites: LinkedList<(MuName, MuID)>,
current_callsites: LinkedList<(MuName, MuID, usize)>,
// key: block id, val: block location
current_exn_blocks: HashMap<MuID, MuName>,
current_xr_value: Option<P<Value>>, // A temporary that holds to saved XR value (if needed)
......@@ -589,24 +589,79 @@ impl <'a> InstructionSelection {
}
} else {
self.backend.emit_fcvtzu(&tmp_res, &tmp_op);
// We have to emmit code to handle the case when the real result
// overflows to_ty_size, but not to_ty_reg_size
let to_ty_reg_size = check_op_len(&tmp_res.ty); // The size of the aarch64 register
if to_ty_size != to_ty_reg_size {
// Compare the bits of the result after the lower
// to_ty_size bits
self.backend.emit_tst_imm(&tmp_res, bits_ones(to_ty_reg_size-to_ty_size) << to_ty_size);
// If the above condition is true, the an overflow occurred
// So set tmp_res to !0 (i.e. all ones, the maximum value)
self.backend.emit_csinv(&tmp_res, &tmp_res, &get_alias_for_length(XZR.id(), from_ty_size), "EQ");
}
}
},
op::ConvOp::FPTOSI => {
if to_ty_size == 128 {
if from_ty_size == 64 {
self.emit_runtime_entry(&entrypoints::FPTOSI_DOUBLE_I128,
vec![tmp_op.clone()],
Some(vec![tmp_res.clone()]),
Some(node), f_context, vm);
self.emit_runtime_entry(&entrypoints::FPTOSI_DOUBLE_I128, vec![tmp_op.clone()],
Some(vec![tmp_res.clone()]), Some(node), f_context, vm);
} else {
self.emit_runtime_entry(&entrypoints::FPTOSI_FLOAT_I128,
vec![tmp_op.clone()],
Some(vec![tmp_res.clone()]),
Some(node), f_context, vm);
self.emit_runtime_entry(&entrypoints::FPTOSI_FLOAT_I128, vec![tmp_op.clone()],
Some(vec![tmp_res.clone()]), Some(node), f_context, vm);
}
} else {
self.backend.emit_fcvtzs(&tmp_res, &tmp_op);
// TODO This code is horrible and inefficient due to branches and duplication
// is there a better way?
// We have to emmit code to handle the case when the real result
// overflows to_ty_size, but not to_ty_reg_size
let to_ty_reg_size = check_op_len(&tmp_res.ty); // The size of the aarch64 register
if to_ty_size != to_ty_reg_size {
let blk_positive = format!("{}_positive", node.id());
let blk_negative = format!("{}_negative", node.id());
let blk_end = format!("{}_end", node.id());
let tmp = make_temporary(f_context, to_ty.clone(), vm);
self.backend.emit_tbnz(&tmp_res, (to_ty_size - 1) as u8, blk_negative.clone());
self.finish_block();
self.start_block(blk_positive.clone(), &vec![]);
{
// check to see if the higher bits are the same as the
// sign bit (which is 0), if their not there's an overflow
self.backend.emit_tst_imm(&tmp_res, bits_ones(to_ty_reg_size - to_ty_size) << to_ty_size);
self.backend.emit_mov_imm(&tmp, bits_ones(to_ty_size - 1));
// if the above test fails (i.e. results in zero)
// then set temp_res to tmp
self.backend.emit_csel(&tmp_res, &tmp, &tmp_res, "EQ");
self.backend.emit_b(blk_end.clone());
self.finish_block();
}
self.start_block(blk_negative.clone(), &vec![]);
{
self.backend.emit_mvn(&tmp, &tmp_res);
// check to see if the higher bits of temp are the same as the
// sign bit (which is 1), if their not there's an overflow
self.backend.emit_tst_imm(&tmp_res, bits_ones(to_ty_reg_size - to_ty_size) << to_ty_size);
// Set just the sign bit (this is smallest representable signed number)
self.backend.emit_mov_imm(&tmp, 1 << to_ty_size);
// if the above test fails (i.e. results in zero), then set temp_res to tmp
self.backend.emit_csel(&tmp_res, &tmp, &tmp_res, "EQ");
self.finish_block();
}
self.start_block(blk_end.clone(), &vec![]);
}
}
},
......@@ -857,7 +912,6 @@ impl <'a> InstructionSelection {
let res_value = self.get_result_value(node, 0);
let res_success = self.get_result_value(node, 1);
let blk_cmpxchg_start = format!("{}_cmpxchg_start", node.id());
let blk_cmpxchg_failed = format!("{}_cmpxchg_failed", node.id());
let blk_cmpxchg_succeded = format!("{}_cmpxchg_succeded", node.id());
......@@ -1533,13 +1587,13 @@ impl <'a> InstructionSelection {
if output_status {
emit_zext(self.backend.as_mut(), &reg_op1);
if n == 1 {
// adds_ext dosn't support extending 1 bit numbers
emit_zext(self.backend.as_mut(), &reg_op2);
self.backend.emit_adds(&res, &reg_op1, &reg_op2);
} else {
if n == 8 || n == 16 || n == 32 || n == 64 {
// Emit an adds that zero extends op2
self.backend.emit_adds_ext(&res, &reg_op1, &reg_op2, false, 0);
} else {
// adds_ext dosn't support extending other sizes
emit_zext(self.backend.as_mut(), &reg_op2);
self.backend.emit_adds(&res, &reg_op1, &reg_op2);
}
if status.flag_v {
......@@ -1707,13 +1761,13 @@ impl <'a> InstructionSelection {
emit_oext(self.backend.as_mut(), &reg_op2);
self.backend.emit_subs(&res, &reg_op1, &reg_op2);
} else if n == 1 {
} else if n == 8 || n == 16 || n == 32 || n == 64 {
// Emit an subs that zero extends op2
self.backend.emit_subs_ext(&res, &reg_op1, &reg_op2, false, 0);
} else {
// if the carry flag isn't been computed, just zero extend op2
emit_zext(self.backend.as_mut(), &reg_op2);
self.backend.emit_subs(&res, &reg_op1, &reg_op2);
} else {
// Emit an subs that zero extends op2
self.backend.emit_subs_ext(&res, &reg_op1, &reg_op2, false, 0);
}
......@@ -1993,27 +2047,38 @@ impl <'a> InstructionSelection {
emit_zext(self.backend.as_mut(), &reg_op2);
if status.flag_c || status.flag_v {
if n < 32 {
if n <= 16 {
// A normal multiply will give the correct upper 'n' bits
self.backend.emit_mul(&res, &reg_op1, &reg_op2);
// Test the upper 'n' bits of the result
self.backend.emit_tst_imm(&res, (bits_ones(n) << n));
} else if n == 32 {
self.backend.emit_tst_imm(&res, bits_ones(n) << n);
} else if n <= 32 {
// the 64-bit register version of res
let res_64 = cast_value(&res, &UINT64_TYPE);
// Compute the full 64-bit product of reg_op1 and reg_op2
self.backend.emit_umull(&res_64, &reg_op1, &reg_op2);
// Test the upper n bits of the result
self.backend.emit_tst_imm(&res, 0xFFFFFFFF00000000);
} else if n <= 64 {
self.backend.emit_tst_imm(&res, bits_ones(n) << n);
} else if n < 64 {
// Compute the full 2n-bit product
let tmp_upper = make_temporary(f_context, UINT64_TYPE.clone(), vm);
// res = the lower 64-bits of the product
self.backend.emit_mul(&res, &reg_op1, &reg_op2);
// tmp_upper = the upper (2n-64) bits of the product
self.backend.emit_umulh(&tmp_upper, &reg_op1, &reg_op2);
// Get the upper part of the product
// (i.e. set tmp_upper to be the full 128-bit product right shifted by n)
self.backend.emit_extr(&tmp_upper, &tmp_upper, &res, n as u8);
// Compare the first n bits (i.e. the upper n bits of
// the 2n-bits of the true product)
self.backend.emit_tst_imm(&tmp_upper, bits_ones(n));
} else if n == 64 {
// Compute the upper 64-bits of the true product
self.backend.emit_umulh(&res, &reg_op1, &reg_op2);
// Test the 64-bits of res, equivalent to TST res, 0xFFFFFFFFFFFFFFFF
if n == 64 {
self.backend.emit_cmp_imm(&res, 0, false);
} else {
self.backend.emit_tst_imm(&res, (bits_ones(n - 32)));
}
self.backend.emit_cmp_imm(&res, 0, false);
// Get the lower 64-bits of the true product
self.backend.emit_mul(&res, &reg_op1, &reg_op2);
} else {
......@@ -3282,7 +3347,7 @@ impl <'a> InstructionSelection {
self.backend.emit_bl(callsite.clone(), func_name, None); // assume ccall wont throw exception
// TODO: What if theres an exception block?
self.current_callsites.push_back((callsite, 0));
self.current_callsites.push_back((callsite, 0, stack_arg_size));
// record exception block (CCall may have an exception block)
if cur_node.is_some() {
......@@ -3449,9 +3514,9 @@ impl <'a> InstructionSelection {
let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
let target_block = exn_dest.target;
self.current_callsites.push_back((callsite.to_relocatable(), target_block));
self.current_callsites.push_back((callsite.to_relocatable(), target_block, stack_arg_size));
} else {
self.current_callsites.push_back((callsite.to_relocatable(), 0));
self.current_callsites.push_back((callsite.to_relocatable(), 0, stack_arg_size));
}
// deal with ret vals
......@@ -4303,6 +4368,7 @@ impl <'a> InstructionSelection {
self.backend.end_block(cur_block.clone());
}
// TODO: Do we need live_in
fn start_block(&mut self, block: String, live_in: &Vec<P<Value>>) {
self.current_block = Some(block.clone());
self.backend.start_block(block.clone());
......@@ -4428,14 +4494,14 @@ impl CompilerPass for InstructionSelection {
None => panic!("no current_frame for function {} that is being compiled", func_name)
};
for &(ref callsite, block_id) in self.current_callsites.iter() {
for &(ref callsite, block_id, stack_arg_size) in self.current_callsites.iter() {
let block_loc = if block_id == 0 {
String::new()
None
} else {
self.current_exn_blocks.get(&block_id).unwrap().clone()
Some(self.current_exn_blocks.get(&block_id).unwrap().clone())
};
vm.add_exception_callsite(callsite.clone(), block_loc, self.current_fv_id);
vm.add_exception_callsite(Callsite::new(callsite.clone(), block_loc, stack_arg_size), self.current_fv_id);
}
let compiled_func = CompiledFunction::new(func.func_id, func.id(), mc,
......
......@@ -793,8 +793,8 @@ pub fn get_return_address(frame_pointer: Address) -> Address {
// Gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address) -> Address {
frame_pointer.plus(16)
pub fn get_previous_stack_pointer(frame_pointer: Address, stack_arg_size: usize) -> Address {
frame_pointer.plus(16 + stack_arg_size)
}
#[inline(always)]
......
......@@ -125,9 +125,10 @@ pub struct InstructionSelection {
current_block_in_ir: Option<MuName>,
current_func_start: Option<ValueLocation>,
// Technically this is a map in that each Key is unique, but we will never try and add duplicate
// keys, or look things up, so a list of pairs is faster than a Map.
// A list of pairs, the first is the name of a callsite the second
current_callsites: LinkedList<(MuName, MuID)>,
// keys, or look things up, so a list of tuples is faster than a Map.
// A list of tuples, the first is the name of a callsite, the next is the callsite destination, the last is the
// size of arguments pushed on the stack
current_callsites: LinkedList<(MuName, MuID, usize)>,
// key: block id, val: block location
current_exn_blocks: HashMap<MuID, MuName>,
......@@ -3186,7 +3187,7 @@ impl <'a> InstructionSelection {
self.backend.emit_call_near_rel32(callsite.clone(), func_name, None); // assume ccall wont throw exception
// TODO: What if theres an exception block?
self.current_callsites.push_back((callsite, 0));
self.current_callsites.push_back((callsite, 0, stack_arg_size));
// record exception block (CCall may have an exception block)
if cur_node.is_some() {
......@@ -3368,7 +3369,7 @@ impl <'a> InstructionSelection {
let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
let target_block = exn_dest.target;
self.current_callsites.push_back((callsite.to_relocatable(), target_block));
self.current_callsites.push_back((callsite.to_relocatable(), target_block, stack_arg_size));
// insert an intermediate block to branch to normal
// the branch is inserted later (because we need to deal with postcall convention)
......@@ -3376,7 +3377,7 @@ impl <'a> InstructionSelection {
let fv_id = self.current_fv_id;
self.start_block(format!("normal_cont_for_call_{}_{}", fv_id, cur_node.id()));
} else {
self.current_callsites.push_back((callsite.to_relocatable(), 0));
self.current_callsites.push_back((callsite.to_relocatable(), 0, stack_arg_size));
}
// deal with ret vals, collapse stack etc.
......@@ -4937,14 +4938,14 @@ impl CompilerPass for InstructionSelection {
Some(frame) => frame,
None => panic!("no current_frame for function {} that is being compiled", func_name)
};
for &(ref callsite, block_id) in self.current_callsites.iter() {
for &(ref callsite, block_id, stack_arg_size) in self.current_callsites.iter() {
let block_loc = if block_id == 0 {
String::new()
None
} else {
self.current_exn_blocks.get(&block_id).unwrap().clone()
Some(self.current_exn_blocks.get(&block_id).unwrap().clone())
};
vm.add_exception_callsite(callsite.clone(), block_loc, self.current_fv_id);
vm.add_exception_callsite(Callsite::new(callsite.clone(), block_loc, stack_arg_size), self.current_fv_id);
}
......
......@@ -481,8 +481,8 @@ pub fn get_return_address(frame_pointer: Address) -> Address {
// Gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address) -> Address {
frame_pointer.plus(16)
pub fn get_previous_stack_pointer(frame_pointer: Address, stack_arg_size: usize) -> Address {
frame_pointer.plus(16 + stack_arg_size)
}
#[inline(always)]
......
......@@ -43,7 +43,6 @@ pub struct Frame {
pub allocated: HashMap<MuID, FrameSlot>,
// Maping from callee saved id (i.e. the position in the list of callee saved registers) and offset from the frame pointer
pub callee_saved: HashMap<isize, isize>,
// (callsite, destination address)
}
impl fmt::Display for Frame {
......
......@@ -18,6 +18,9 @@ use compiler::frame::*;
use runtime::ValueLocation;
use rodal;
use utils::Address;
use std::sync::Arc;
use runtime::resolve_symbol;
use std::ops;
use std::collections::HashMap;
use std::collections::HashSet;
......@@ -87,6 +90,27 @@ impl CompiledFunction {
}
}
// Contains information about a callsite (needed for exception handling)
pub struct CompiledCallsite {
pub exceptional_destination: Option<Address>,
pub stack_args_size: usize,
pub callee_saved_registers: Arc<HashMap<isize, isize>>,
pub function_version: MuID
}
impl CompiledCallsite {
pub fn new(callsite: &Callsite, fv: MuID, callee_saved_registers: Arc<HashMap<isize, isize>>) -> CompiledCallsite {
CompiledCallsite {
exceptional_destination: match &callsite.exception_destination {
&Some(ref name) => Some(resolve_symbol(name.clone())),
&None => None
},
stack_args_size: callsite.stack_arg_size,
callee_saved_registers: callee_saved_registers,
function_version: fv,
}
}
}
use std::any::Any;
pub trait MachineCode {
......
......@@ -38,4 +38,4 @@ simple_logger = "*"
aligned_alloc = "*"
crossbeam = "*"
field-offset = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
......@@ -15,6 +15,9 @@
use compiler::backend::*;
use utils::Address;
use utils::POINTER_SIZE;
use std::collections::HashMap;
use std::ops::Deref;
use compiler::machine_code::CompiledCallsite;
use runtime::*;
// muentry_throw_exception should call this function,
......@@ -50,30 +53,31 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
let mut previous_frame_pointer = get_previous_frame_pointer(current_frame_pointer); // thrower::fp, the starting point of the previous frame
// acquire lock for exception table
let compiled_exception_table = vm.compiled_exception_table.read().unwrap();
let compiled_callsite_table = vm.compiled_callsite_table.read().unwrap();
loop {
// Lookup the table for the callsite
trace!("Callsite: 0x{:x}", callsite);
trace!("\tprevious_frame_pointer: 0x{:x}", previous_frame_pointer);
trace!("\tcurrent_frame_pointer: 0x{:x}", current_frame_pointer);
let &(catch_address, compiled_func) = {
let table_entry = compiled_exception_table.get(&callsite);
//CompiledCallsite
let callsite_info = {
let table_entry = compiled_callsite_table.get(&callsite);
if table_entry.is_none() {
error!("Cannot find Mu callsite (i.e. we have reached a native frame), either there isn't a catch block to catch the exception or your catch block is above a native function call");
print_backtrace(frame_cursor);
// The above function will not return
print_backtrace(frame_cursor, compiled_callsite_table.deref()); // This function may segfault
panic!("Uncaught Mu Exception");
}
table_entry.unwrap()
};
// Check for a catch block at this callsite (there won't be one on the first iteration of this loop)
if !catch_address.is_zero() {
if callsite_info.exceptional_destination.is_some() {
let catch_address = callsite_info.exceptional_destination.unwrap();
trace!("Found catch block: 0x{:x}", catch_address);
let sp = get_previous_stack_pointer(current_frame_pointer);
let sp = get_previous_stack_pointer(current_frame_pointer, callsite_info.stack_args_size);
trace!("\tRestoring SP to: 0x{:x}", sp);
if cfg!(debug_assertions) {
......@@ -82,15 +86,13 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
}
// Found a catch block, branch to it
drop(compiled_exception_table); // drop the lock first
// drop(compiled_callsite_table); // TODO: Work out how to make the borrow checker let me do this
unsafe { thread::exception_restore(catch_address, frame_cursor.to_ptr(), sp); }
}
// Restore callee saved registers
unsafe {
let ref cf = *compiled_func;
let ref callee_saved = cf.frame.callee_saved;
for (target_offset, source_offset) in callee_saved {
for (target_offset, source_offset) in callsite_info.callee_saved_registers.iter() {
// *(frame_cursor + target_offset) = *(frame_pointer + source_offset)
let val = previous_frame_pointer.offset(*source_offset).load::<Address>();
frame_cursor.offset(*target_offset).store::<Address>(val);
......@@ -121,32 +123,28 @@ fn print_frame(base: Address) {
}
}
// This function may segfault or panic when it reaches the bottom of the stack
// This function may segfault when it reaches the bottom of the stack
// (TODO: Determine where the bottom is without segfaulting)
fn print_backtrace(base: Address) -> !{
fn print_backtrace(base: Address, compiled_callsite_table: &HashMap<Address, CompiledCallsite>) {
error!("BACKTRACE: ");
let cur_thread = thread::MuThread::current();
let ref vm = cur_thread.vm;
// compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>;
let compiled_funcs = vm.compiled_funcs().read().unwrap();
let mut frame_pointer = base;
let mut frame_count = 0;
let compiled_exception_table = vm.compiled_exception_table.read().unwrap();
loop {
let callsite = get_return_address(frame_pointer);
if compiled_exception_table.contains_key(&callsite) {
let &(_, compiled_func_ptr) = compiled_exception_table.get(&callsite).unwrap();
if compiled_callsite_table.contains_key(&callsite) {
let function_version = compiled_callsite_table.get(&callsite).unwrap().function_version;
let compiled_func = compiled_funcs.get(&function_version).unwrap().read().unwrap();
unsafe {
let ref compiled_func = *compiled_func_ptr;
error!("\tframe {:2}: 0x{:x} - {} (fid: #{}, fvid: #{}) at 0x{:x}", frame_count,
compiled_func.start.to_address(), vm.name_of(compiled_func.func_id),
compiled_func.func_id, compiled_func.func_ver_id, callsite);
}
error!("\tframe {:2}: 0x{:x} - {} (fid: #{}, fvid: #{}) at 0x{:x}", frame_count,
compiled_func.start.to_address(), vm.name_of(compiled_func.func_id),
compiled_func.func_id, compiled_func.func_ver_id, callsite);
} else {
let (func_name, func_start) = get_function_info(callsite);
error!("\tframe {:2}: 0x{:x} - {} at 0x{:x}", frame_count, func_start, func_name, callsite);
......@@ -154,7 +152,7 @@ fn print_backtrace(base: Address) -> !{
frame_pointer = get_previous_frame_pointer(frame_pointer);
if frame_pointer.is_zero() {
panic!("Uncaught Mu Exception");
return;
}
frame_count += 1;
}
......
......@@ -394,8 +394,7 @@ impl MuThread {
pub unsafe fn current_thread_as_mu_thread(threadlocal: Address, vm: Arc<VM>) -> bool {
use std::usize;
// build exception table
vm.build_exception_table();
vm.build_callsite_table();
if ! unsafe{muentry_get_thread_local()}.is_zero() {
warn!("current thread has a thread local (has a muthread to it)");
......
......@@ -24,5 +24,5 @@ crate-type = ["rlib"]
memmap = "*"
memsec = "0.1.9"
byteorder = "*"
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.2" }
rodal = { git = "https://gitlab.anu.edu.au/mu/rodal", version = "0.0.3" }
log = "*"
......@@ -23,7 +23,7 @@ use ast::types::*;
use compiler::{Compiler, CompilerPolicy};
use compiler::backend;
use compiler::backend::BackendTypeInfo;
use compiler::machine_code::CompiledFunction;
use compiler::machine_code::{CompiledFunction, CompiledCallsite};
use runtime::thread::*;
use runtime::*;
......@@ -65,9 +65,9 @@ pub struct VM { // The comments are the offset into the struct
// ---partially serialize---
compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, // +728
// Maps each callsite to a tuple of the corresponding catch blocks label (or ""_
// and the id of the containing function-version
exception_table: RwLock<HashMap<MuID, HashMap<MuName, MuName>>>, // +784
// Match each functions version to a map, mapping each of it's containing callsites
// to the name of the catch block
callsite_table: RwLock<HashMap<MuID, Vec<Callsite>>>, // +784
is_running: AtomicBool, // +952
// ---do not serialize---
......@@ -78,13 +78,10 @@ pub struct VM { // The comments are the offset into the struct
// however the store may happen before we have an actual address to the func (in AOT scenario)
aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>,
// TODO: What should the function version refer to? (It has to refer to something that has callee saved registers...)
// TODO: probably we should remove the pointer (its unsafe), thats why we need Sync/Send for VM
// we can make a copy of callee_saved_register location
pub compiled_exception_table: RwLock<HashMap<Address, (Address, *const CompiledFunction)>> // 896
pub compiled_callsite_table: RwLock<HashMap<Address, CompiledCallsite>> // 896
}
unsafe impl rodal::Dump for VM {