Commit e1844c8a authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Implemented NEWTHREAD on aarch64

parent ae1e02a5
......@@ -23,7 +23,7 @@ extern crate gcc;
fn main() {
gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_x64_sysv.c"]);
gcc::Config::new()
gcc::Build::new()
.flag("-O3")
.flag("-c")
.file("src/runtime/runtime_asm_x64_sysv.S")
......@@ -38,7 +38,7 @@ fn main() {
fn main() {
gcc::compile_library("libruntime_c.a", &["src/runtime/runtime_c_aarch64_sysv.c"]);
gcc::Config::new()
gcc::Build::new()
.flag("-O3")
.flag("-c")
.file("src/runtime/runtime_asm_aarch64_sysv.S")
......@@ -59,19 +59,19 @@ fn main() {
use std::path::Path;
let mut compiler_name = String::new();
compiler_name.push_str("x86_64-rumprun-netbsd-gcc");
gcc::Config::new()
gcc::Build::new()
.flag("-O3")
.flag("-c")
.compiler(Path::new(compiler_name.as_str()))
.file("src/runtime/runtime_x64_sel4_rumprun_sysv.c")
.compile("libruntime_c.a");
gcc::Config::new()
gcc::Build::new()
.flag("-O3")
.flag("-c")
.compiler(Path::new(compiler_name.as_str()))
.file("src/runtime/runtime_asm_x64_sel4_rumprun_sysv.S")
.compile("libruntime_asm.a");
gcc::Config::new()
gcc::Build::new()
.flag("-O3")
.flag("-c")
.compiler(Path::new(compiler_name.as_str()))
......
......@@ -92,8 +92,7 @@ impl Instruction {
NewHybrid(_, _) |
AllocAHybrid(_, _) |
NewStack(_) |
NewThread(_, _) |
NewThreadExn(_, _) |
NewThread{ .. } |
NewFrameCursor(_) |
GetIRef(_) |
GetFieldIRef { .. } |
......@@ -150,8 +149,7 @@ impl Instruction {
NewHybrid(_, _) |
AllocAHybrid(_, _) |
NewStack(_) |
NewThread(_, _) |
NewThreadExn(_, _) |
NewThread{ .. } |
NewFrameCursor(_) |
Fence(_) |
Return(_) |
......@@ -228,8 +226,7 @@ impl Instruction {
NewHybrid(_, _) |
AllocAHybrid(_, _) |
NewStack(_) |
NewThread(_, _) |
NewThreadExn(_, _) |
NewThread{ .. } |
NewFrameCursor(_) |
GetIRef(_) |
GetFieldIRef { .. } |
......@@ -301,8 +298,7 @@ impl Instruction {
NewHybrid(_, _) |
AllocAHybrid(_, _) |
NewStack(_) |
NewThread(_, _) |
NewThreadExn(_, _) |
NewThread{ .. } |
NewFrameCursor(_) |
GetIRef(_) |
GetFieldIRef { .. } |
......@@ -459,11 +455,12 @@ pub enum Instruction_ {
/// create a new Mu thread, yields thread reference
/// args: stackref of a Mu stack, a list of arguments
NewThread(OpIndex, Vec<OpIndex>), // stack, args
/// create a new Mu thread, yields thread reference (thread resumes with exceptional value)
/// args: stackref of a Mu stack, an exceptional value
NewThreadExn(OpIndex, OpIndex), // stack, exception
NewThread {
stack: OpIndex,
thread_local: Option<OpIndex>,
is_exception: bool,
args: Vec<OpIndex>
},
/// create a frame cursor reference
/// args: stackref of a Mu stack
......@@ -724,16 +721,16 @@ impl Instruction_ {
&Instruction_::NewHybrid(ref ty, len) => format!("NEWHYBRID {} {}", ty, ops[len]),
&Instruction_::AllocAHybrid(ref ty, len) => format!("ALLOCAHYBRID {} {}", ty, ops[len]),
&Instruction_::NewStack(func) => format!("NEW_STACK {}", ops[func]),
&Instruction_::NewThread(stack, ref args) => {
&Instruction_::NewThread{stack, thread_local, is_exception, ref args} => {
let thread_local = thread_local.map(|t| format!("{}", ops[t])).unwrap_or("NULL".to_string());
format!(
"NEWTHREAD {} PASS_VALUES {}",
"SWAPSTACK {} THREADLOCAL({}) {} {}",
ops[stack],
op_vector_str(args, ops)
thread_local,
is_exception,
op_vector_str(args, ops),
)
}
&Instruction_::NewThreadExn(stack, exn) => {
format!("NEWTHREAD {} THROW_EXC {}", ops[stack], ops[exn])
}
&Instruction_::NewFrameCursor(stack) => format!("NEWFRAMECURSOR {}", ops[stack]),
&Instruction_::GetIRef(reference) => format!("GETIREF {}", ops[reference]),
&Instruction_::GetFieldIRef {
......
......@@ -1813,9 +1813,8 @@ impl<'a> InstructionSelection {
CALLER_SAVED_REGS.to_vec(),
true
);
// Record the callsitte
self.current_callsites
.push_back((callsite.unwrap().to_relocatable(), 0, 0));
self.record_callsite(None, callsite.unwrap(), 0);
}
// Runtime Entry
......@@ -2142,6 +2141,77 @@ impl<'a> InstructionSelection {
vm
);
}
Instruction_::NewThread{stack, thread_local, is_exception, ref args} => {
trace!("Instruction Selection on NEWTHREAD");
let ref ops = inst.ops;
let res = self.get_result_value(node, 0);
let stack = self.emit_ireg(&*ops[stack], f_content, f_context, vm);
let tl = match thread_local {
Some(op) => self.emit_ireg(&*ops[op], f_content, f_context, vm),
None => make_value_nullref(vm)
};
if is_exception {
let exc = self.emit_ireg(&*ops[args[0]], f_content, f_context, vm);
self.emit_runtime_entry(
&entrypoints::NEW_THREAD_EXCEPTIONAL,
vec![stack, tl, exc],
Some(vec![res.clone()]),
Some(node),
f_context,
vm
);
} else {
// Load the new stack pointer
let new_sp = make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
emit_load_base_offset(
self.backend.as_mut(),
&new_sp,
&stack,
*thread::MUSTACK_SP_OFFSET as i64,
f_context,
vm
);
let arg_values = self.emit_arg_values(&args, ops, f_content, f_context, vm);
// Pass the arguments, stack arguments are placed below the new_sp,
// register arguments are placed above it
self.emit_precall_convention(
&new_sp,
// The frame contains space for the FP and LR
(2 * POINTER_SIZE) as isize,
false,
&arg_values,
&arg_values.iter().map(|a| a.ty.clone()).collect::<Vec<_>>(),
0,
false,
true,
true,
Some(&new_sp),
f_context,
vm
);
emit_sub_u64(self.backend.as_mut(), &new_sp, &new_sp, (ARGUMENT_REG_COUNT*WORD_SIZE) as u64);
emit_store_base_offset(
self.backend.as_mut(),
&stack,
*thread::MUSTACK_SP_OFFSET as i64,
&new_sp,
f_context,
vm
);
self.emit_runtime_entry(
&entrypoints::NEW_THREAD_NORMAL,
vec![stack, tl],
Some(vec![res.clone()]),
Some(node),
f_context,
vm
);
}
}
_ => unimplemented!()
} // main switch
}
......@@ -3808,6 +3878,7 @@ impl<'a> InstructionSelection {
modify_arg_base: bool,
reg_args: bool, // Whether to pass register arguments
stack_args: bool, // Whether to pass stack arguments
reg_arg_base: Option<&P<Value>>, // If this is none put reg arguments in registers, otherwise store them at an offset from reg_arg_base
f_context: &mut FunctionContext,
vm: &VM
) -> (usize, Vec<P<Value>>) {
......@@ -3869,12 +3940,21 @@ impl<'a> InstructionSelection {
let arg_val =
emit_reg_value(self.backend.as_mut(), &arg_val, f_context, vm);
let (val_l, val_h) = split_int128(&arg_val, f_context, vm);
let arg_loc_h = get_register_from_id(arg_loc.id() + 2);
let arg_loc_h_id = arg_loc.id() + 2; //get_register_from_id();
let (arg_loc_l, arg_loc_h) = match reg_arg_base {
Some(ref b) => (
make_value_base_offset(b, get_argument_reg_offset(arg_loc.id()) as i64, &arg_loc.ty, vm),
make_value_base_offset(b, get_argument_reg_offset(arg_loc_h_id) as i64, &arg_loc.ty, vm)
),
None => (arg_loc.clone(), get_register_from_id(arg_loc_h_id))
};
arg_regs.push(arg_loc_h.clone());
emit_move_value_to_value(
self.backend.as_mut(),
&arg_loc,
&arg_loc_l,
&val_l,
f_context,
vm
......@@ -3888,6 +3968,11 @@ impl<'a> InstructionSelection {
);
} else {
if (reg_args && arg_loc.is_reg()) || (stack_args && !arg_loc.is_reg()) {
let arg_loc = match reg_arg_base {
Some(ref b) if arg_loc.is_reg() => make_value_base_offset(b, get_argument_reg_offset(arg_loc.id()) as i64, &arg_loc.ty, vm),
_ => arg_loc.clone(),
};
emit_move_value_to_value(
self.backend.as_mut(),
&arg_loc,
......@@ -4230,6 +4315,7 @@ impl<'a> InstructionSelection {
true,
true,
true,
None,
f_context,
vm
);
......@@ -4274,22 +4360,10 @@ impl<'a> InstructionSelection {
)
}
#[allow(unused_variables)] // resumption not implemented
fn emit_c_call_ir(
&mut self,
inst: &Instruction,
calldata: &CallData,
resumption: Option<&ResumptionData>,
cur_node: &TreeNode,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM
) {
let ref ops = inst.ops;
fn emit_arg_values(&mut self, args: &Vec<OpIndex>, ops: &Vec<P<TreeNode>>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> Vec<P<Value>> {
// prepare args (they could be instructions, we need to emit inst and get value)
let mut arg_values = vec![];
for arg_index in calldata.args.iter() {
for arg_index in args {
let ref arg = ops[*arg_index];
if match_node_imm(arg) {
......@@ -4302,7 +4376,23 @@ impl<'a> InstructionSelection {
unimplemented!();
}
}
let arg_values = arg_values;
arg_values
}
#[allow(unused_variables)] // resumption not implemented
fn emit_c_call_ir(
&mut self,
inst: &Instruction,
calldata: &CallData,
resumption: Option<&ResumptionData>,
cur_node: &TreeNode,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM
) {
let ref ops = inst.ops;
let arg_values = self.emit_arg_values(&calldata.args, ops, f_content, f_context, vm);
trace!("generating ccall");
let ref func = ops[calldata.func];
......@@ -4368,18 +4458,7 @@ impl<'a> InstructionSelection {
};
// Compute all the arguments...
let mut arg_values = vec![];
let arg_nodes = args.iter().map(|a| ops[*a].clone()).collect::<Vec<_>>();
for ref arg in &arg_nodes {
if match_node_imm(arg) {
arg_values.push(node_imm_to_value(arg))
} else if self.match_reg(arg) {
arg_values.push(self.emit_reg(arg, f_content, f_context, vm))
} else {
unimplemented!()
};
}
let mut arg_values = self.emit_arg_values(&args, ops, f_content, f_context, vm);
let tl = self.emit_get_threadlocal(f_context, vm);
let cur_stackref = make_temporary(f_context, STACKREF_TYPE.clone(), vm);
......@@ -4461,7 +4540,7 @@ impl<'a> InstructionSelection {
let arg_tys = arg_values.iter().map(|a| a.ty.clone()).collect::<Vec<_>>();
// Pass stack arguments before the old stack is killed
let (stack_arg_size, _) = self.emit_precall_convention(
self.emit_precall_convention(
&SP,
// The frame contains space for the FP and LR
(2 * POINTER_SIZE) as isize,
......@@ -4472,6 +4551,7 @@ impl<'a> InstructionSelection {
false,
false, // don't pass reg args
true, // pass stack args
None,
f_context,
vm
);
......@@ -4500,6 +4580,7 @@ impl<'a> InstructionSelection {
false,
true, // don't pass stack args
false, // pass reg args
None,
f_context,
vm
);
......@@ -4518,14 +4599,7 @@ impl<'a> InstructionSelection {
self.backend.emit_pop_pair(&FP, &LR, &SP);
}
let potentially_excepting = {
if resumption.is_some() {
let target_id = resumption.unwrap().exn_dest.target;
Some(f_content.get_block(target_id).name())
} else {
None
}
};
let potentially_excepting = Self::get_potentially_excepting(resumption, f_content);
// Call the function that swaps the stack
let callsite = {
......@@ -4556,21 +4630,9 @@ impl<'a> InstructionSelection {
}
};
if resumption.is_some() {
let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
let target_block = exn_dest.target;
self.current_callsites.push_back((
callsite.unwrap().to_relocatable(),
target_block,
stack_arg_size
));
} else if !is_kill {
self.current_callsites
.push_back((callsite.unwrap().to_relocatable(), 0, stack_arg_size));
}
if !is_kill {
self.record_callsite(resumption, callsite.unwrap(), res_stack_size);
if resumption.is_some() {
self.finish_block();
let block_name = make_block_name(&node.name(), "stack_resumption");
......@@ -4582,6 +4644,28 @@ impl<'a> InstructionSelection {
}
}
fn get_potentially_excepting(resumption: Option<&ResumptionData>, f_content: &FunctionContent) -> Option<MuName> {
if resumption.is_some() {
let target_id = resumption.unwrap().exn_dest.target;
Some(f_content.get_block(target_id).name())
} else {
None
}
}
fn record_callsite(&mut self, resumption: Option<&ResumptionData>, callsite: ValueLocation, stack_arg_size: usize) {
let target_block = match resumption {
Some(rd) => rd.exn_dest.target,
None => 0
};
self.current_callsites.push_back((
callsite.to_relocatable(),
target_block,
stack_arg_size
));
}
fn emit_mu_call(
&mut self,
is_tail: bool, // For tail calls
......@@ -4625,20 +4709,7 @@ impl<'a> InstructionSelection {
}
// prepare args (they could be instructions, we need to emit inst and get value)
let mut arg_values = vec![];
for arg_index in calldata.args.iter() {
let ref arg = ops[*arg_index];
if match_node_imm(arg) {
let arg = node_imm_to_value(arg);
arg_values.push(arg);
} else if self.match_reg(arg) {
let arg = self.emit_reg(arg, f_content, f_context, vm);
arg_values.push(arg);
} else {
unimplemented!();
}
}
let arg_values = self.emit_arg_values(&calldata.args, ops, f_content, f_context, vm);
let return_type = self.combine_return_types(&func_sig, vm);
let return_size = self.compute_return_allocation(&return_type, &vm);
let (stack_arg_size, arg_regs) = self.emit_precall_convention(
......@@ -4651,19 +4722,13 @@ impl<'a> InstructionSelection {
!is_tail,
true,
true,
None,
f_context,
vm
);
// check if this call has exception clause - need to tell backend about this
let potentially_excepting = {
if resumption.is_some() {
let target_id = resumption.unwrap().exn_dest.target;
Some(f_content.get_block(target_id).name())
} else {
None
}
};
let potentially_excepting = Self::get_potentially_excepting(resumption, f_content);
if is_tail {
// Restore callee saved registers and pop the frame
......@@ -4723,17 +4788,7 @@ impl<'a> InstructionSelection {
}
};
// record exception branch
if resumption.is_some() {
let ref exn_dest = resumption.as_ref().unwrap().exn_dest;
let target_block = exn_dest.target;
self.current_callsites
.push_back((callsite.to_relocatable(), target_block, stack_arg_size));
} else {
self.current_callsites
.push_back((callsite.to_relocatable(), 0, stack_arg_size));
}
self.record_callsite(resumption, callsite, stack_arg_size);
// deal with ret vals
self.emit_postcall_convention(
......
......@@ -53,6 +53,7 @@ use std::collections::HashMap;
// Number of nromal callee saved registers (excluding FP and LR, and SP)
pub const CALLEE_SAVED_COUNT: usize = 18;
pub const ARGUMENT_REG_COUNT: usize = 16;
macro_rules! REGISTER {
($id:expr, $name: expr, $ty: ident) => {
......@@ -885,18 +886,18 @@ pub fn get_callee_saved_offset(reg: MuID) -> isize {
(id as isize + 1) * (-8)
}
// Returns the callee saved register with the id...
/*pub fn get_callee_saved_register(offset: isize) -> P<Value> {
debug_assert!(offset <= -8 && (-offset) % 8 == 0);
let id = ((offset/-8) - 1) as usize;
if id < CALLEE_SAVED_GPRs.len() {
CALLEE_SAVED_GPRs[id].clone()
} else if id - CALLEE_SAVED_GPRs.len() < CALLEE_SAVED_FPRs.len() {
CALLEE_SAVED_FPRs[id - CALLEE_SAVED_GPRs.len()].clone()
// Gets the offset of the argument register when passed on the stack
pub fn get_argument_reg_offset(reg: MuID) -> isize {
let reg = get_color_for_precolored(reg);
let id = if reg >= FPR_ID_START {
(reg - ARGUMENT_FPRS[0].id()) / 2
} else {
panic!("There is no callee saved register with id {}", offset)
}
}*/
(reg - ARGUMENT_GPRS[0].id()) / 2 + ARGUMENT_FPRS.len()
};
(id as isize + 1) * (-8)
}
pub fn is_callee_saved(reg_id: MuID) -> bool {
for reg in CALLEE_SAVED_GPRS.iter() {
......@@ -959,7 +960,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
NewStack(_) | NewThread { .. } | NewFrameCursor(_) => 10,
ThreadExit => 10,
CurrentStack => 10,
KillStack(_) => 10,
......@@ -1526,6 +1527,14 @@ pub fn make_value_int_const(val: u64, vm: &VM) -> P<Value> {
})
}
pub fn make_value_nullref(vm: &VM) -> P<Value> {
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: REF_VOID_TYPE.clone(),
v: Value_::Constant(Constant::NullRef)
})
}
// Replaces the zero register with a temporary whose value is zero (or returns the orignal register)
/* TODO use this function for the following arguments:
......
......@@ -657,7 +657,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime call
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
NewStack(_) | NewThread { .. } | NewFrameCursor(_) => 10,
ThreadExit => 10,
CurrentStack => 10,
KillStack(_) => 10,
......
......@@ -466,13 +466,6 @@ fn copy_inline_blocks(
let ref ops = inst.ops;
let ref v = inst.v;
trace!(
"ISAAC: Inlining [{} -> {}] : {} -> {}",
old_block.name(),
block_name,
inst_name,
hdr.name()
);
match v {
&Instruction_::Return(ref vec) => {
// change RET to a branch
......
......@@ -56,7 +56,7 @@ fn main() {
use std::path::Path;
let mut compiler_name = String::new();
compiler_name.push_str("x86_64-rumprun-netbsd-gcc");
gcc::Config::new().flag("-O3").flag("-c")
gcc::Build::new().flag("-O3").flag("-c")
.compiler(Path::new(compiler_name.as_str()))
.file("src/heap/gc/clib_x64_sel4_rumprun.c")
.compile("libgc_clib_x64.a");
......
......@@ -90,6 +90,30 @@ lazy_static! {
jit: RwLock::new(None),
};
// impl/decl: thread.rs
pub static ref NEW_THREAD_NORMAL: RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
arg_tys: vec![STACKREF_TYPE.clone(), REF_VOID_TYPE.clone()],
ret_tys: vec![THREADREF_TYPE.clone()],
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_new_thread_normal")),
jit: RwLock::new(None),
};
// impl/decl: thread.rs
pub static ref NEW_THREAD_EXCEPTIONAL: RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
arg_tys: vec![STACKREF_TYPE.clone(), REF_VOID_TYPE.clone(), REF_VOID_TYPE.clone()],
ret_tys: vec![THREADREF_TYPE.clone()],
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_new_thread_exceptional")),
jit: RwLock::new(None),
};
// impl/decl: gc/lib.rs
pub static ref ALLOC_FAST : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
......
......@@ -299,14 +299,22 @@ pub extern "C" fn mu_main(
};
// FIXME: currently assumes no user defined thread local - See Issue #48
let thread = thread::MuThread::new_thread_normal(
thread::MuThread::new_thread_normal(
stack,
unsafe { Address::zero() },
args,
vm.clone()
);
thread.join().unwrap();
loop {
let thread = vm.pop_join_handle();
if thread.is_none() {
break;
}
thread.unwrap().join().unwrap();
}
trace!("All threads have exited, quiting...");
}
}
......
......@@ -60,10 +60,10 @@ begin_func muthread_start_normal
MOV SP, X0
// Pop the argument registers from the new stack
LDP D1, D0 , [SP, #14*8]
LDP D3, D2, [SP, #12*8]
LDP D5, D4, [SP, #10*8]
LDP D7, D6, [SP, #8*8]
LDP D1, D0, [SP, #14*8 ]
LDP D3, D2, [SP, #12*8 ]
LDP D5, D4, [SP, #10*8 ]
LDP D7, D6, [SP, #8*8 ]
LDP X1, X0, [SP, #6*8]
LDP X3, X2, [SP, #4*8]
LDP X5, X4, [SP, #2*8]
......
......@@ -316,6 +316,8 @@ pub struct MuThread {
/// a pointer to the virtual machine
pub vm: Arc<VM>
}
unsafe impl Sync for MuThread {}
unsafe impl Send for MuThread {}
// a few field offsets the compiler uses
lazy_static! {
......@@ -383,6 +385,7 @@ extern "C" {
/// new_sp: stack pointer for the mu stack
/// old_sp_loc: the location to store native stack pointer so we can later swap back
fn muthread_start_normal(new_sp: Address, old_sp_loc: Address);
fn muthread_start_exceptional(exception: Address, new_sp: Address, old_sp_loc: Address);
/// gets base poniter for current frame
pub fn get_current_frame_bp() -> Address;
......@@ -451,30 +454,34 @@ impl MuThread {
threadlocal: Address,
vals: Vec<ValueLocation>,
vm: Arc<VM>
) -> JoinHandle<()> {
) {
// set up arguments on stack
stack.setup_args(vals);
MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, vm)
let (join_handle, _) = MuThread::mu_thread_launch(vm.next_id(), stack, threadlocal, None, vm.clone());
vm.push_join_handle(join_handle);
}
/// creates and launches a mu thread, returns a JoinHandle
#[no_mangle]
pub extern "C" fn mu_thread_launch(
/// creates and launches a mu thread, returns a JoinHandle and address to its MuThread structure
fn mu_thread_launch(
id: MuID,
stack: Box<MuStack>,
user_tls: Address,
exception: Option<Address>,
vm: Arc<VM>
) -> JoinHandle<()> {
) -> (JoinHandle<()>, *mut MuThread) {
let new_sp = stack.sp;
match thread::Builder::new()
// The conversions between boxes and ptrs are needed here as a '*mut MuThread* can't be sent between threads
// but a Box can be. Also converting a Box to a ptr consumes it.
let muthread_ptr = Box::into_raw(Box::new(MuThread::new(id, mm::new_mutator(), stack, user_tls, vm)));
let muthread = unsafe {Box::from_raw(muthread_ptr)};
(match thread::Builder::new()
.name(format!("Mu Thread #{}", id))
.spawn(move || {
let mut muthread = MuThread::new(id, mm::new_mutator(), stack, user_tls, vm);