GitLab will be partched to the latest stable version on 15 July 2020 at 2.00pm (AEDT) to 2.30pm (AEDT) due to Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit e1844c8a authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Implemented NEWTHREAD on aarch64

parent ae1e02a5
...@@ -23,7 +23,7 @@ extern crate gcc; ...@@ -23,7 +23,7 @@ extern crate gcc;
fn main() { fn main() {
gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_x64_sysv.c"]); gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_x64_sysv.c"]);
gcc::Config::new() gcc::Build::new()
.flag("-O3") .flag("-O3")
.flag("-c") .flag("-c")
.file("src/runtime/runtime_asm_x64_sysv.S") .file("src/runtime/runtime_asm_x64_sysv.S")
...@@ -38,7 +38,7 @@ fn main() { ...@@ -38,7 +38,7 @@ fn main() {
fn main() { fn main() {
gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_aarch64_sysv.c"]); gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_aarch64_sysv.c"]);
gcc::Config::new() gcc::Build::new()
.flag("-O3") .flag("-O3")
.flag("-c") .flag("-c")
.file("src/runtime/runtime_asm_aarch64_sysv.S") .file("src/runtime/runtime_asm_aarch64_sysv.S")
...@@ -59,19 +59,19 @@ fn main() { ...@@ -59,19 +59,19 @@ fn main() {
use std::path::Path; use std::path::Path;
let mut compiler_name = String::new(); let mut compiler_name = String::new();
compiler_name.push_str("x86_64-rumprun-netbsd-gcc"); compiler_name.push_str("x86_64-rumprun-netbsd-gcc");
gcc::Config::new() gcc::Build::new()
.flag("-O3") .flag("-O3")
.flag("-c") .flag("-c")
.compiler(Path::new(compiler_name.as_str())) .compiler(Path::new(compiler_name.as_str()))
.file("src/runtime/runtime_x64_sel4_rumprun_sysv.c") .file("src/runtime/runtime_x64_sel4_rumprun_sysv.c")
.compile("libruntime_c.a"); .compile("libruntime_c.a");
gcc::Config::new() gcc::Build::new()
.flag("-O3") .flag("-O3")
.flag("-c") .flag("-c")
.compiler(Path::new(compiler_name.as_str())) .compiler(Path::new(compiler_name.as_str()))
.file("src/runtime/runtime_asm_x64_sel4_rumprun_sysv.S") .file("src/runtime/runtime_asm_x64_sel4_rumprun_sysv.S")
.compile("libruntime_asm.a"); .compile("libruntime_asm.a");
gcc::Config::new() gcc::Build::new()
.flag("-O3") .flag("-O3")
.flag("-c") .flag("-c")
.compiler(Path::new(compiler_name.as_str())) .compiler(Path::new(compiler_name.as_str()))
......
...@@ -92,8 +92,7 @@ impl Instruction { ...@@ -92,8 +92,7 @@ impl Instruction {
NewHybrid(_, _) | NewHybrid(_, _) |
AllocAHybrid(_, _) | AllocAHybrid(_, _) |
NewStack(_) | NewStack(_) |
NewThread(_, _) | NewThread{ .. } |
NewThreadExn(_, _) |
NewFrameCursor(_) | NewFrameCursor(_) |
GetIRef(_) | GetIRef(_) |
GetFieldIRef { .. } | GetFieldIRef { .. } |
...@@ -150,8 +149,7 @@ impl Instruction { ...@@ -150,8 +149,7 @@ impl Instruction {
NewHybrid(_, _) | NewHybrid(_, _) |
AllocAHybrid(_, _) | AllocAHybrid(_, _) |
NewStack(_) | NewStack(_) |
NewThread(_, _) | NewThread{ .. } |
NewThreadExn(_, _) |
NewFrameCursor(_) | NewFrameCursor(_) |
Fence(_) | Fence(_) |
Return(_) | Return(_) |
...@@ -228,8 +226,7 @@ impl Instruction { ...@@ -228,8 +226,7 @@ impl Instruction {
NewHybrid(_, _) | NewHybrid(_, _) |
AllocAHybrid(_, _) | AllocAHybrid(_, _) |
NewStack(_) | NewStack(_) |
NewThread(_, _) | NewThread{ .. } |
NewThreadExn(_, _) |
NewFrameCursor(_) | NewFrameCursor(_) |
GetIRef(_) | GetIRef(_) |
GetFieldIRef { .. } | GetFieldIRef { .. } |
...@@ -301,8 +298,7 @@ impl Instruction { ...@@ -301,8 +298,7 @@ impl Instruction {
NewHybrid(_, _) | NewHybrid(_, _) |
AllocAHybrid(_, _) | AllocAHybrid(_, _) |
NewStack(_) | NewStack(_) |
NewThread(_, _) | NewThread{ .. } |
NewThreadExn(_, _) |
NewFrameCursor(_) | NewFrameCursor(_) |
GetIRef(_) | GetIRef(_) |
GetFieldIRef { .. } | GetFieldIRef { .. } |
...@@ -459,11 +455,12 @@ pub enum Instruction_ { ...@@ -459,11 +455,12 @@ pub enum Instruction_ {
/// create a new Mu thread, yields thread reference /// create a new Mu thread, yields thread reference
/// args: stackref of a Mu stack, a list of arguments /// args: stackref of a Mu stack, a list of arguments
NewThread(OpIndex, Vec<OpIndex>), // stack, args NewThread {
stack: OpIndex,
/// create a new Mu thread, yields thread reference (thread resumes with exceptional value) thread_local: Option<OpIndex>,
/// args: stackref of a Mu stack, an exceptional value is_exception: bool,
NewThreadExn(OpIndex, OpIndex), // stack, exception args: Vec<OpIndex>
},
/// create a frame cursor reference /// create a frame cursor reference
/// args: stackref of a Mu stack /// args: stackref of a Mu stack
...@@ -724,16 +721,16 @@ impl Instruction_ { ...@@ -724,16 +721,16 @@ impl Instruction_ {
&Instruction_::NewHybrid(ref ty, len) => format!("NEWHYBRID {} {}", ty, ops[len]), &Instruction_::NewHybrid(ref ty, len) => format!("NEWHYBRID {} {}", ty, ops[len]),
&Instruction_::AllocAHybrid(ref ty, len) => format!("ALLOCAHYBRID {} {}", ty, ops[len]), &Instruction_::AllocAHybrid(ref ty, len) => format!("ALLOCAHYBRID {} {}", ty, ops[len]),
&Instruction_::NewStack(func) => format!("NEW_STACK {}", ops[func]), &Instruction_::NewStack(func) => format!("NEW_STACK {}", ops[func]),
&Instruction_::NewThread(stack, ref args) => { &Instruction_::NewThread{stack, thread_local, is_exception, ref args} => {
let thread_local = thread_local.map(|t| format!("{}", ops[t])).unwrap_or("NULL".to_string());
format!( format!(
"NEWTHREAD {} PASS_VALUES {}", "SWAPSTACK {} THREADLOCAL({}) {} {}",
ops[stack], ops[stack],
op_vector_str(args, ops) thread_local,
is_exception,
op_vector_str(args, ops),
) )
} }
&Instruction_::NewThreadExn(stack, exn) => {
format!("NEWTHREAD {} THROW_EXC {}", ops[stack], ops[exn])
}
&Instruction_::NewFrameCursor(stack) => format!("NEWFRAMECURSOR {}", ops[stack]), &Instruction_::NewFrameCursor(stack) => format!("NEWFRAMECURSOR {}", ops[stack]),
&Instruction_::GetIRef(reference) => format!("GETIREF {}", ops[reference]), &Instruction_::GetIRef(reference) => format!("GETIREF {}", ops[reference]),
&Instruction_::GetFieldIRef { &Instruction_::GetFieldIRef {
......
...@@ -53,6 +53,7 @@ use std::collections::HashMap; ...@@ -53,6 +53,7 @@ use std::collections::HashMap;
// Number of nromal callee saved registers (excluding FP and LR, and SP) // Number of nromal callee saved registers (excluding FP and LR, and SP)
pub const CALLEE_SAVED_COUNT: usize = 18; pub const CALLEE_SAVED_COUNT: usize = 18;
pub const ARGUMENT_REG_COUNT: usize = 16;
macro_rules! REGISTER { macro_rules! REGISTER {
($id:expr, $name: expr, $ty: ident) => { ($id:expr, $name: expr, $ty: ident) => {
...@@ -885,18 +886,18 @@ pub fn get_callee_saved_offset(reg: MuID) -> isize { ...@@ -885,18 +886,18 @@ pub fn get_callee_saved_offset(reg: MuID) -> isize {
(id as isize + 1) * (-8) (id as isize + 1) * (-8)
} }
// Returns the callee saved register with the id... // Gets the offset of the argument register when passed on the stack
/*pub fn get_callee_saved_register(offset: isize) -> P<Value> { pub fn get_argument_reg_offset(reg: MuID) -> isize {
debug_assert!(offset <= -8 && (-offset) % 8 == 0); let reg = get_color_for_precolored(reg);
let id = ((offset/-8) - 1) as usize;
if id < CALLEE_SAVED_GPRs.len() { let id = if reg >= FPR_ID_START {
CALLEE_SAVED_GPRs[id].clone() (reg - ARGUMENT_FPRS[0].id()) / 2
} else if id - CALLEE_SAVED_GPRs.len() < CALLEE_SAVED_FPRs.len() {
CALLEE_SAVED_FPRs[id - CALLEE_SAVED_GPRs.len()].clone()
} else { } else {
panic!("There is no callee saved register with id {}", offset) (reg - ARGUMENT_GPRS[0].id()) / 2 + ARGUMENT_FPRS.len()
} };
}*/
(id as isize + 1) * (-8)
}
pub fn is_callee_saved(reg_id: MuID) -> bool { pub fn is_callee_saved(reg_id: MuID) -> bool {
for reg in CALLEE_SAVED_GPRS.iter() { for reg in CALLEE_SAVED_GPRS.iter() {
...@@ -959,7 +960,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize { ...@@ -959,7 +960,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime // runtime
New(_) | NewHybrid(_, _) => 10, New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10, NewStack(_) | NewThread { .. } | NewFrameCursor(_) => 10,
ThreadExit => 10, ThreadExit => 10,
CurrentStack => 10, CurrentStack => 10,
KillStack(_) => 10, KillStack(_) => 10,
...@@ -1526,6 +1527,14 @@ pub fn make_value_int_const(val: u64, vm: &VM) -> P<Value> { ...@@ -1526,6 +1527,14 @@ pub fn make_value_int_const(val: u64, vm: &VM) -> P<Value> {
}) })
} }
pub fn make_value_nullref(vm: &VM) -> P<Value> {
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: REF_VOID_TYPE.clone(),
v: Value_::Constant(Constant::NullRef)
})
}
// Replaces the zero register with a temporary whose value is zero (or returns the orignal register) // Replaces the zero register with a temporary whose value is zero (or returns the orignal register)
/* TODO use this function for the following arguments: /* TODO use this function for the following arguments:
......
...@@ -657,7 +657,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize { ...@@ -657,7 +657,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime call // runtime call
New(_) | NewHybrid(_, _) => 10, New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10, NewStack(_) | NewThread { .. } | NewFrameCursor(_) => 10,
ThreadExit => 10, ThreadExit => 10,
CurrentStack => 10, CurrentStack => 10,
KillStack(_) => 10, KillStack(_) => 10,
......
...@@ -466,13 +466,6 @@ fn copy_inline_blocks( ...@@ -466,13 +466,6 @@ fn copy_inline_blocks(
let ref ops = inst.ops; let ref ops = inst.ops;
let ref v = inst.v; let ref v = inst.v;
trace!(
"ISAAC: Inlining [{} -> {}] : {} -> {}",
old_block.name(),
block_name,
inst_name,
hdr.name()
);
match v { match v {
&Instruction_::Return(ref vec) => { &Instruction_::Return(ref vec) => {
// change RET to a branch // change RET to a branch
......
...@@ -56,7 +56,7 @@ fn main() { ...@@ -56,7 +56,7 @@ fn main() {
use std::path::Path; use std::path::Path;
let mut compiler_name = String::new(); let mut compiler_name = String::new();
compiler_name.push_str("x86_64-rumprun-netbsd-gcc"); compiler_name.push_str("x86_64-rumprun-netbsd-gcc");
gcc::Config::new().flag("-O3").flag("-c") gcc::Build::new().flag("-O3").flag("-c")
.compiler(Path::new(compiler_name.as_str())) .compiler(Path::new(compiler_name.as_str()))
.file("src/heap/gc/clib_x64_sel4_rumprun.c") .file("src/heap/gc/clib_x64_sel4_rumprun.c")
.compile("libgc_clib_x64.a"); .compile("libgc_clib_x64.a");
......
...@@ -90,6 +90,30 @@ lazy_static! { ...@@ -90,6 +90,30 @@ lazy_static! {
jit: RwLock::new(None), jit: RwLock::new(None),
}; };
// impl/decl: thread.rs
pub static ref NEW_THREAD_NORMAL: RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
arg_tys: vec![STACKREF_TYPE.clone(), REF_VOID_TYPE.clone()],
ret_tys: vec![THREADREF_TYPE.clone()],
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_new_thread_normal")),
jit: RwLock::new(None),
};
// impl/decl: thread.rs
pub static ref NEW_THREAD_EXCEPTIONAL: RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
arg_tys: vec![STACKREF_TYPE.clone(), REF_VOID_TYPE.clone(), REF_VOID_TYPE.clone()],
ret_tys: vec![THREADREF_TYPE.clone()],
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_new_thread_exceptional")),
jit: RwLock::new(None),
};
// impl/decl: gc/lib.rs // impl/decl: gc/lib.rs
pub static ref ALLOC_FAST : RuntimeEntrypoint = RuntimeEntrypoint { pub static ref ALLOC_FAST : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig { sig: P(MuFuncSig {
......
...@@ -299,14 +299,22 @@ pub extern "C" fn mu_main( ...@@ -299,14 +299,22 @@ pub extern "C" fn mu_main(
}; };
// FIXME: currently assumes no user defined thread local - See Issue #48 // FIXME: currently assumes no user defined thread local - See Issue #48
let thread = thread::MuThread::new_thread_normal( thread::MuThread::new_thread_normal(
stack, stack,
unsafe { Address::zero() }, unsafe { Address::zero() },
args, args,
vm.clone() vm.clone()
); );
thread.join().unwrap(); loop {
let thread = vm.pop_join_handle();
if thread.is_none() {
break;
}
thread.unwrap().join().unwrap();
}
trace!("All threads have exited, quiting...");
} }
} }
......
...@@ -60,10 +60,10 @@ begin_func muthread_start_normal ...@@ -60,10 +60,10 @@ begin_func muthread_start_normal
MOV SP, X0 MOV SP, X0
// Pop the argument registers from the new stack // Pop the argument registers from the new stack
LDP D1, D0 , [SP, #14*8] LDP D1, D0, [SP, #14*8 ]
LDP D3, D2, [SP, #12*8] LDP D3, D2, [SP, #12*8 ]
LDP D5, D4, [SP, #10*8] LDP D5, D4, [SP, #10*8 ]
LDP D7, D6, [SP, #8*8] LDP D7, D6, [SP, #8*8 ]
LDP X1, X0, [SP, #6*8] LDP X1, X0, [SP, #6*8]
LDP X3, X2, [SP, #4*8] LDP X3, X2, [SP, #4*8]
LDP X5, X4, [SP, #2*8] LDP X5, X4, [SP, #2*8]
......
...@@ -316,6 +316,8 @@ pub struct MuThread { ...@@ -316,6 +316,8 @@ pub struct MuThread {
/// a pointer to the virtual machine /// a pointer to the virtual machine
pub vm: Arc<VM> pub vm: Arc<VM>
} }
unsafe impl Sync for MuThread {}
unsafe impl Send for MuThread {}
// a few field offsets the compiler uses // a few field offsets the compiler uses
lazy_static! { lazy_static! {
...@@ -383,6 +385,7 @@ extern "C" { ...@@ -383,6 +385,7 @@ extern "C" {
/// new_sp: stack pointer for the mu stack /// new_sp: stack pointer for the mu stack
/// old_sp_loc: the location to store native stack pointer so we can later swap back /// old_sp_loc: the location to store native stack pointer so we can later swap back
fn muthread_start_normal(new_sp: Address, old_sp_loc: Address); fn muthread_start_normal(new_sp: Address, old_sp_loc: Address);
fn muthread_start_exceptional(exception: Address, new_sp: Address, old_sp_loc: Address);
/// gets base poniter for current frame /// gets base poniter for current frame
pub fn get_current_frame_bp() -> Address; pub fn get_current_frame_bp() -> Address;
...@@ -451,30 +454,34 @@ impl MuThread { ...@@ -451,30 +454,34 @@ impl MuThread {
threadlocal: Address, threadlocal: Address,
vals: Vec<ValueLocation>, vals: Vec<ValueLocation>,
vm: Arc<VM> vm: Arc<VM>
) -> JoinHandle<()> { ) {
// set up arguments on stack // set up arguments on stack
stack.setup_args(vals); stack.setup_args(vals);
let (join_handle, _) = MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, None, vm.clone());
MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm) vm.push_join_handle(join_handle);
} }
/// creates and launches a mu thread, returns a JoinHandle /// creates and launches a mu thread, returns a JoinHandle and address to its MuThread structure
#[no_mangle] fn mu_thread_launch(
pub extern "C" fn mu_thread_launch(
id: MuID, id: MuID,
stack: Box<MuStack>, stack: Box<MuStack>,
user_tls: Address, user_tls: Address,
exception: Option<Address>,
vm: Arc<VM> vm: Arc<VM>
) -> JoinHandle<()> { ) -> (JoinHandle<()>, *mut MuThread) {
let new_sp = stack.sp; let new_sp = stack.sp;
match thread::Builder::new() // The conversions between boxes and ptrs are needed here as a '*mut MuThread* can't be sent between threads
// but a Box can be. Also converting a Box to a ptr consumes it.
let muthread_ptr = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
let muthread = unsafe {Box::from_raw(muthread_ptr)};
(match thread::Builder::new()
.name(format!("Mu Thread #{}", id)) .name(format!("Mu Thread #{}", id))
.spawn(move || { .spawn(move || {
let mut muthread = MuThread::new(id, mm::new_mutator(), stack, user_tls, vm); let muthread = Box::into_raw(muthread);
// set thread local // set thread local
unsafe { set_thread_local(&mut muthread) }; unsafe { set_thread_local(muthread) };
let addr = unsafe { muentry_get_thread_local() }; let addr = unsafe { muentry_get_thread_local() };
let sp_threadlocal_loc = addr + *NATIVE_SP_LOC_OFFSET; let sp_threadlocal_loc = addr + *NATIVE_SP_LOC_OFFSET;
...@@ -482,14 +489,18 @@ impl MuThread { ...@@ -482,14 +489,18 @@ impl MuThread {
debug!("sp_store: 0x{:x}", sp_threadlocal_loc); debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
unsafe { unsafe {
muthread_start_normal(new_sp, sp_threadlocal_loc); match exception {
Some(e) => muthread_start_exceptional(e, new_sp, sp_threadlocal_loc),
None => muthread_start_normal(new_sp, sp_threadlocal_loc)
} }
debug!("returned to Rust stack. Going to quit"); // Thread finished, delete it's data
Box::from_raw(muthread);
}
}) { }) {
Ok(handle) => handle, Ok(handle) => handle,
Err(_) => panic!("failed to create a thread") Err(_) => panic!("failed to create a thread")
} }, muthread_ptr)
} }
/// creates metadata for a Mu thread /// creates metadata for a Mu thread
...@@ -644,3 +655,21 @@ pub unsafe extern "C" fn muentry_kill_stack(stack: *mut MuStack) { ...@@ -644,3 +655,21 @@ pub unsafe extern "C" fn muentry_kill_stack(stack: *mut MuStack) {
// This new box will be destroyed upon returning // This new box will be destroyed upon returning
Box::from_raw(stack); Box::from_raw(stack);
} }
// Creates a new thread
#[no_mangle]
pub unsafe extern "C" fn muentry_new_thread_exceptional(stack: *mut MuStack, thread_local: Address, exception: Address) -> *mut MuThread {
let vm = MuThread::current_mut().vm.clone();
let (join_handle, muthread) = MuThread::mu_thread_launch(vm.next_id(), Box::from_raw(stack), thread_local, Some(exception), vm.clone());
vm.push_join_handle(join_handle);
muthread
}
// Creates a new thread
#[no_mangle]
pub unsafe extern "C" fn muentry_new_thread_normal(stack: *mut MuStack, thread_local: Address) -> *mut MuThread {
let vm = MuThread::current_mut().vm.clone();
let (join_handle, muthread) = MuThread::mu_thread_launch(vm.next_id(), Box::from_raw(stack), thread_local, None, vm.clone());
vm.push_join_handle(join_handle);
muthread
}
\ No newline at end of file
...@@ -1269,6 +1269,7 @@ struct BundleLoader<'lb, 'lvm> { ...@@ -1269,6 +1269,7 @@ struct BundleLoader<'lb, 'lvm> {
built_ref_void: Option<P<MuType>>, built_ref_void: Option<P<MuType>>,
built_tagref64: Option<P<MuType>>, built_tagref64: Option<P<MuType>>,
built_stackref: Option<P<MuType>>, built_stackref: Option<P<MuType>>,
built_threadref: Option<P<MuType>>,
built_funcref_of: IdPMap<MuType>, built_funcref_of: IdPMap<MuType>,
built_ref_of: IdPMap<MuType>, built_ref_of: IdPMap<MuType>,
...@@ -1307,6 +1308,7 @@ fn load_bundle(b: &mut MuIRBuilder) { ...@@ -1307,6 +1308,7 @@ fn load_bundle(b: &mut MuIRBuilder) {
built_ref_void: Default::default(), built_ref_void: Default::default(),
built_tagref64: Default::default(), built_tagref64: Default::default(),
built_stackref: Default::default(), built_stackref: Default::default(),
built_threadref: Default::default(),
built_funcref_of: Default::default(), built_funcref_of: Default::default(),
built_ref_of: Default::default(), built_ref_of: Default::default(),
built_iref_of: Default::default(), built_iref_of: Default::default(),
...@@ -1444,6 +1446,25 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -1444,6 +1446,25 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
impl_ty impl_ty
} }
fn ensure_threadref(&mut self) -> P<MuType> {
if let Some(ref impl_ty) = self.built_threadref {
return impl_ty.clone();
}
let id = self.vm.next_id();
let impl_ty = P(MuType {
hdr: MuEntityHeader::unnamed(id),
v: MuType_::ThreadRef
});
trace!("Ensure threadref is defined: {} {:?}", id, impl_ty);
self.built_types.insert(id, impl_ty.clone());
self.built_threadref = Some(impl_ty.clone());
impl_ty
}
fn ensure_i6(&mut self) -> P<MuType> { fn ensure_i6(&mut self) -> P<MuType> {
if let Some(ref impl_ty) = self.built_i6 { if let Some(ref impl_ty) = self.built_i6 {
...@@ -2368,13 +2389,14 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -2368,13 +2389,14 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
.iter() .iter()
.map(|iid| self.build_inst(fcb, *iid, blocks)) .map(|iid| self.build_inst(fcb, *iid, blocks))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
assert_ir!(
res.last() let n = res.len();
.as_ref() for i in 0..(n - 1) {
.unwrap() // None of the internal instruction should be a terminator
.as_inst_ref() assert_ir!(!res[i].as_inst_ref().is_terminal_inst());
.is_terminal_inst() }
); // The last instruction should be a terminator
assert_ir!(res[n - 1].as_inst_ref().is_terminal_inst());
res res
} }
...@@ -3261,7 +3283,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -3261,7 +3283,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
let impl_ord = self.build_mem_ord(ord); let impl_ord = self.build_mem_ord(ord);
let impl_loc = self.get_treenode(fcb, loc); let impl_loc = self.get_treenode(fcb, loc);
let impl_rvtype = self.get_built_type(refty); let impl_rvtype = self.get_built_type(refty);
let impl_rv = self.new_ssa(fcb, result_id, impl_rvtype).clone_value(); let impl_rv = self.new_ssa(fcb, result_id, self.vm.make_strong_type(impl_rvtype)).clone_value();
let impl_refty = self.get_built_type(refty); let impl_refty = self.get_built_type(refty);
assert_ir!(impl_ord != MemoryOrder::Release && impl_ord != MemoryOrder::AcqRel); assert_ir!(impl_ord != MemoryOrder::Release && impl_ord != MemoryOrder::AcqRel);
...@@ -3382,10 +3404,6 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -3382,10 +3404,6 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
exc_clause, //Option<MuExcClause>, exc_clause, //Option<MuExcClause>,
keepalive_clause // Option<MuKeepaliveClause>, keepalive_clause // Option<MuKeepaliveClause>,
} => { } => {
// TODO: Validate IR
/*
validate cur_stack_clause and new_stack_clause
*/
let mut ops: Vec<P<TreeNode>> = vec![self.get_treenode(fcb, swappee)]; let mut ops: Vec<P<TreeNode>> = vec![self.get_treenode(fcb, swappee)];
assert_ir!(ops[0].ty().is_stackref()); assert_ir!(ops[0].ty().is_stackref());
...@@ -3407,30 +3425,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -3407,30 +3425,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let (is_exception, args) = match **new_stack_clause { let (is_exception, args) = self.build_new_stack_clause(new_stack_clause, fcb, &mut ops);
NodeNewStackClause::PassValues {
ref tys, ref vars, ..
} => {
let args_begin_index = ops.len();
let args = self.add_opnds(fcb, &mut ops, vars);
assert_ir!(
args.len() == tys.len() &&
args.iter()
.zip(tys)
.all(|(arg, tid)| arg.ty() == self.get_built_type(*tid))
);
(
false,
(args_begin_index..(vars.len() + 1)).collect::<Vec<_>>()
)
}
NodeNewStackClause::ThrowExc { ref exc, .. } => {
let exc_arg = ops.len();
self.add_opnd(fcb, &mut ops, *exc);
(true, vec![exc_arg])
}
};
match exc_clause { match exc_clause {
Some(ecid) => { Some(ecid) => {
...@@ -3442,9 +3437,8 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -3442,9 +3437,8 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
self.build_destination(fcb, ecnode.exc, &mut ops, &[], blocks); self.build_destination(fcb, ecnode.exc, &mut ops, &[], blocks);
assert_ir!(match **cur_stack_clause { assert_ir!(match **cur_stack_clause {
// Can't have an exception // Can't have an exception clause
NodeCurrentStackClause::KillOld { .. } => false, NodeCurrentStackClause::KillOld { .. } => false,
// clause
_ => true _ => true
}); });
...@@ -3489,6 +3483,52 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> { ...@@ -3489,6 +3483,52 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
} }
} }
} }
NodeInst::NodeNewThread {
id: _,
result_id,
stack,
threadlocal,
new_stack_clause,
exc_clause
} => {
if exc_clause.is_some() {
unimplemented!();
}
let mut ops: Vec<P<TreeNode>> = vec![self.get_treenode(fcb, stack)];
assert_ir!(ops[0].ty().is_stackref());
let new_stack_clause = self.b.bundle.ns_clauses.get(&new_stack_clause).unwrap();
let impl_threadref = self.ensure_threadref();