GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit da783a85 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Reimplemented new thread and stack things for aarch64

parent 7d018fc8
Pipeline #914 canceled with stages
......@@ -96,7 +96,8 @@ impl Instruction {
match self.v {
ExprCall { .. } | ExprCCall { .. } | Load { .. } | Store { .. } | CmpXchg { .. } | AtomicRMW { .. } |
New(_) | AllocA(_) | NewHybrid(_, _) | AllocAHybrid(_, _) | NewStack(_) | NewThread(_, _) |
NewThreadExn(_, _) | NewFrameCursor(_) | Fence(_) | Return(_) | ThreadExit | Throw(_) |
NewThreadExn(_, _) | NewFrameCursor(_) | Fence(_) | Return(_) | ThreadExit | KillStack(_) |
Throw(_) |
TailCall(_) | Branch1(_) | Branch2 { .. } | Watchpoint { .. } | WPBranch { .. } |
Call { .. } | CCall { .. }| SwapStackExpr{..}| SwapStackExc { .. } | SwapStackKill { .. } | Switch { .. } | ExnInstruction { .. } |
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) | CommonInst_Pin(_) | CommonInst_Unpin(_) |
......@@ -246,6 +247,12 @@ pub enum Instruction_ {
/// args: functionref of the entry function
NewStack(OpIndex),
/// Kills the given Mu stack
/// args: stackref to kill
KillStack(OpIndex),
CurrentStack,
/// create a new Mu thread, yields thread reference
/// args: stackref of a Mu stack, a list of arguments
NewThread(OpIndex, Vec<OpIndex>), // stack, args
......@@ -511,7 +518,7 @@ impl Instruction_ {
&Instruction_::AllocA(ref ty) => format!("ALLOCA {}", ty),
&Instruction_::NewHybrid(ref ty, len) => format!("NEWHYBRID {} {}", ty, ops[len]),
&Instruction_::AllocAHybrid(ref ty, len) => format!("ALLOCAHYBRID {} {}", ty, ops[len]),
&Instruction_::NewStack(func) => format!("NEWSTACK {}", ops[func]),
&Instruction_::NewStack(func) => format!("NEW_STACK {}", ops[func]),
&Instruction_::NewThread(stack, ref args) => {
format!(
"NEWTHREAD {} PASS_VALUES {}",
......@@ -557,6 +564,8 @@ impl Instruction_ {
&Instruction_::Return(ref vals) => format!("RET {}", op_vector_str(vals, ops)),
&Instruction_::ThreadExit => "THREADEXIT".to_string(),
&Instruction_::CurrentStack => "CURRENT_STACK".to_string(),
&Instruction_::KillStack(s) => format!("RET {}", ops[s]),
&Instruction_::Throw(exn_obj) => format!("THROW {}", ops[exn_obj]),
&Instruction_::TailCall(ref call) => format!("TAILCALL {}", call.debug_str(ops)),
&Instruction_::Branch1(ref dest) => format!("BRANCH {}", dest.debug_str(ops)),
......
......@@ -849,7 +849,6 @@ pub fn get_callee_saved_offset(reg: MuID) -> isize {
}*/
pub fn is_callee_saved(reg_id: MuID) -> bool {
for reg in CALLEE_SAVED_GPRS.iter() {
if reg_id == reg.extract_ssa_id().unwrap() {
return true;
......@@ -864,6 +863,10 @@ pub fn is_callee_saved(reg_id: MuID) -> bool {
false
}
// The stack size needed for a call to the given function signature
pub fn call_stack_size(sig: P<MuFuncSig>, vm: &VM) -> usize {
compute_argument_locations(&sig.ret_tys, &SP, 0, &vm).2
}
// TODO: Check that these numbers are reasonable (THEY ARE ONLY AN ESTIMATE)
use ast::inst::*;
pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
......@@ -907,7 +910,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
ThreadExit => 10,
ThreadExit => 10, CurrentStack => 10, KillStack(_) => 10, NewStack(_) => 10,
Throw(_) => 10,
SwapStackExpr { .. } | SwapStackExc { .. } | SwapStackKill { .. } => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
......@@ -2830,3 +2833,177 @@ fn is_int_ex_reg(val: &P<Value>) -> bool {
fn is_fp_reg(val: &P<Value>) -> bool {
RegGroup::get_from_value(&val) == RegGroup::FPR && (val.is_reg() || val.is_const())
}
// TODO: Thoroughly test this
// (compare with code generated by GCC with variouse different types???)
// The algorithm presented here is derived from the ARM AAPCS64 reference
// Returns a vector indicating whether each should be passed as an IRef (and not directly),
// a vector referencing to the location of each argument (in memory or a register) and
// the amount of stack space used
// NOTE: It currently does not support vectors/SIMD types (or aggregates of such types)
fn compute_argument_locations(
arg_types: &Vec<P<MuType>>,
stack: &P<Value>,
offset: i64,
vm: &VM
) -> (Vec<bool>, Vec<P<Value>>, usize) {
if arg_types.len() == 0 {
// nothing to do
return (vec![], vec![], 0);
}
let mut ngrn = 0 as usize; // The Next General-purpose Register Number
let mut nsrn = 0 as usize; // The Next SIMD and Floating-point Register Number
let mut nsaa = 0 as usize; // The next stacked argument address (an offset from the SP)
use ast::types::MuType_::*;
// reference[i] = true indicates the argument is passed an IRef to a location on the stack
let mut reference: Vec<bool> = vec![];
for t in arg_types {
reference.push(
hfa_length(t) == 0 && // HFA's aren't converted to IRef's
match t.v {
// size can't be statically determined
Hybrid(_) => panic!("Hybrid argument not supported"),
// type is too large
Struct(_) | Array(_, _) if vm.get_backend_type_size(t.id()) > 16 => true,
Vector(_, _) => unimplemented!(),
_ => false
}
);
}
// TODO: How does passing arguments by reference effect the stack size??
let mut locations: Vec<P<Value>> = vec![];
for i in 0..arg_types.len() {
let i = i as usize;
let t = if reference[i] {
P(MuType::new(
new_internal_id(),
MuType_::IRef(arg_types[i].clone())
))
} else {
arg_types[i].clone()
};
let size = align_up(vm.get_backend_type_size(t.id()), 8);
let align = get_type_alignment(&t, vm);
match t.v {
Hybrid(_) => panic!("hybrid argument not supported"),
Vector(_, _) => unimplemented!(),
Float | Double => {
if nsrn < 8 {
locations.push(get_alias_for_length(
ARGUMENT_FPRS[nsrn].id(),
get_bit_size(&t, vm)
));
nsrn += 1;
} else {
nsrn = 8;
locations.push(make_value_base_offset(
&stack,
offset + (nsaa as i64),
&t,
vm
));
nsaa += size;
}
}
Struct(_) | Array(_, _) => {
let hfa_n = hfa_length(&t);
if hfa_n > 0 {
if nsrn + hfa_n <= 8 {
// Note: the argument will occupy succesiv registers
// (one for each element)
locations.push(get_alias_for_length(
ARGUMENT_FPRS[nsrn].id(),
get_bit_size(&t, vm) / hfa_n
));
nsrn += hfa_n;
} else {
nsrn = 8;
locations.push(make_value_base_offset(
&stack,
offset + (nsaa as i64),
&t,
vm
));
nsaa += size;
}
} else {
if align == 16 {
ngrn = align_up(ngrn, 2); // align NGRN to the next even number
}
if size <= 8 * (8 - ngrn) {
// The struct should be packed, starting here
// (note: this may result in multiple struct fields in the same regsiter
// or even floating points in a GPR)
locations.push(ARGUMENT_GPRS[ngrn].clone());
// How many GPRS are taken up by t
ngrn += if size % 8 != 0 {
size / 8 + 1
} else {
size / 8
};
} else {
ngrn = 8;
nsaa = align_up(nsaa, align_up(align, 8));
locations.push(make_value_base_offset(
&stack,
offset + (nsaa as i64) as i64,
&t,
vm
));
nsaa += size;
}
}
}
Void => panic!("void argument not supported"),
// Integral or pointer type
_ => {
if size <= 8 {
if ngrn < 8 {
locations.push(get_alias_for_length(
ARGUMENT_GPRS[ngrn].id(),
get_bit_size(&t, vm)
));
ngrn += 1;
} else {
nsaa = align_up(nsaa, align_up(align, 8));
locations.push(make_value_base_offset(
&stack,
offset + (nsaa as i64) as i64,
&t,
vm
));
nsaa += size;
}
} else if size == 16 {
ngrn = align_up(ngrn, 2); // align NGRN to the next even number
if ngrn < 7 {
locations.push(ARGUMENT_GPRS[ngrn].clone());
ngrn += 2;
} else {
ngrn = 8;
nsaa = align_up(nsaa, 16);
locations.push(make_value_base_offset(
&stack,
offset + (nsaa as i64) as i64,
&t,
vm
));
nsaa += 16;
}
} else {
unimplemented!(); // Integer type is too large
}
}
}
}
(reference, locations, align_up(nsaa, 16) as usize)
}
......@@ -654,7 +654,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime call
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
ThreadExit => 10,
ThreadExit => 10, CurrentStack => 10, KillStack(_) => 10,
Throw(_) => 10,
SwapStackExpr { .. } | SwapStackExc { .. } | SwapStackKill { .. } => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
......@@ -668,3 +668,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
_ => unimplemented!()
}
}
pub fn call_stack_size(sig: P<MuFuncSig>, vm: &VM) -> usize {
0
}
......@@ -116,6 +116,8 @@ pub use compiler::backend::x86_64::spill_rewrite;
pub use compiler::backend::x86_64::ARGUMENT_GPRS;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::ARGUMENT_FPRS;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::call_stack_size;
/// --- aarch64 backend ---
#[cfg(target_arch = "aarch64")]
......@@ -184,6 +186,8 @@ pub use compiler::backend::aarch64::spill_rewrite;
pub use compiler::backend::aarch64::ARGUMENT_GPRS;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::ARGUMENT_FPRS;
#[cfg(target_arch = "aarch64")]
pub use compiler::backend::aarch64::call_stack_size;
use vm::VM;
use ast::types::*;
......
......@@ -94,6 +94,17 @@ pop_pair d3, d2, \stack
pop_pair d1, d0, \stack
.endm
.macro load_arguments stack=SP
LDP D0, D1, [\stack, #-2*8]
LDP D2, D3, [\stack, #-4*8]
LDP D4, D5, [\stack, #-6*8]
LDP D6, D7, [\stack, #-8*8]
LDP X0, X1, [\stack, #-10*8]
LDP X2, X3, [\stack, #-12*8]
LDP X4, X5, [\stack, #-14*8]
LDP X6, X7, [\stack, #-16*8]
.endm
.macro mov_args_to_callee_saved
MOV X19, X0
MOV X20, X1
......
......@@ -57,14 +57,26 @@ lazy_static! {
// impl: runtime_asm_ARCH_OS.s
// decl: thread.rs
pub static ref SWAP_BACK_TO_NATIVE_STACK : RuntimeEntrypoint = RuntimeEntrypoint {
pub static ref MUENTRY_THREAD_EXIT : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![],
arg_tys: vec![ADDRESS_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR,
String::from("muentry_swap_back_to_native_stack")),
String::from("muentry_thread_exit")),
jit: RwLock::new(None),
};
}
lazy_static! {
// impl/decl: thread.rs
pub static ref MUENTRY_NEW_STACK: RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig{
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![STACKREF_TYPE.clone()],
arg_tys: vec![ADDRESS_TYPE.clone(), ADDRESS_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_new_stack")),
jit: RwLock::new(None),
};
......
......@@ -16,75 +16,6 @@
#include "asm_common_aarch64.S.inc"
# swap_stack_to(new_sp: Address, entry: Address, old_sp_loc: Address)
# X0 X1 X2
begin_func swap_to_mu_stack
# -- on old stack --
enter_frame
# push all callee-saved registers
push_callee_saved
# *old_sp_loc = SP
MOV X9, SP
STR X9, [X2]
# sp = new_sp
MOV SP, X0
# x9 = entry
MOV X9, X1
# -- on new stack --
# arguments (reverse order of thread.rs - runtime_load_args)
pop_arguments
# If entry tries to return (IT SHOULDN'T) it will call entry_returned (which will panic!)
ADR LR, entry_returned
# branch to entry
//MOV FP, 0 // End of call frame
RET X9
end_func swap_to_mu_stack
# The error message to print when entry returns
.type .Lentry_returned_message, @object
.section .rodata.str1.1,"aMS",@progbits,1
.Lentry_returned_message:
.asciz "ERROR: The entry function returned\n"
.size .Lentry_returned_message, 36
# Prints an error mesage and calls exit(1)
begin_func entry_returned
ADRP x0, .Lentry_returned_message
ADD x0, x0, :lo12:.Lentry_returned_message // Loads x0 with the adress of .Lentry_returned_message
MOV X1, #35 // Length of string
MOV X2, #1 // Number of elements
ADRP X3, :got:stderr
LDR X3, [X3, :got_lo12:stderr]
LDR X3, [X3] // Load X3 with the address of stderr
BL fwrite // calls fwrite(x0, x1, x2, x3)
MOV W0, 1
BL exit // call exit(1)
end_func entry_returned
// _swap_back_to_native_stack(sp_loc: Address)
// X0
begin_func muentry_swap_back_to_native_stack
# SP = *sp_loc
LDR X9, [X0]
MOV SP, X9
pop_callee_saved
# Restore the previouse frame (and return to its return location)
exit_frame
RET
end_func muentry_swap_back_to_native_stack
# _get_current_frame_bp() -> Address
# X0
begin_func get_current_frame_bp
......@@ -114,40 +45,17 @@ begin_func exception_restore
BR X0
end_func exception_restore
# swap to the new stack whilst passing values and saving the old stack
# muentry_swapstack_ret_pass(new_stack args..., new_sp: Address, old_sp_loc: &mut Adress)
# X0 ... X7 [x8] X9 X10
begin_func muentry_swapstack_ret_pass
enter_frame
push_callee_saved
# Save the current stack pointer
MOV X11, SP
STR X11, [X10]
# Swap to new stack
MOV SP, X9
# Swapstack internals
.macro stack_pass new_sp
MOV SP, \new_sp
# On the new stack, reverse the above
pop_callee_saved
exit_frame
RET
end_func muentry_swapstack_ret_pass
# Same as swapstack_ret_pass except will throw an exception to the new stack instead of passing values
# muentry_swapstack_ret_throw(exception: Address, new_sp: Address, old_sp_loc: &mut Adress)
# X0 X1 X2
begin_func muentry_swapstack_ret_throw
enter_frame
push_callee_saved
# Save the current stack pointer
MOV X3, SP
STR X3, [X2]
# Swap to new stack
MOV SP, X1
.endm
.macro stack_throw new_sp
MOV SP, \new_sp
# The new stack will have the same layout as the stack when muentry_throw_exception
# calls throw_exception_internal, so we can do that directly here
......@@ -155,47 +63,78 @@ begin_func muentry_swapstack_ret_throw
ADD FP, SP, 144
MOV X1, FP
BL throw_exception_internal
.endm
# won't return
end_func muentry_swapstack_ret_throw
.macro stack_ret old_sp
enter_frame
push_callee_saved
# swap to the new stack whilst passing values and killing the old stack
# muentry_swapstack_kill_pass(new_stack args..., new_sp: Address, old_stack: *mut MuStack)
# X0 ... X7 [x8] X9 X10
begin_func muentry_swapstack_kill_pass
# Swap to new stack
MOV SP, X9
MOV X11, SP
STR X11, [\old_sp]
.endm
.macro stack_kill old_sp
mov_args_to_callee_saved
MOV X0, X10
MOV X0, \old_sp
B muentry_kill_stack
mov_callee_saved_to_args
.endm
# On the new stack, reverse muentry_swapstack_ret_pass
pop_callee_saved
exit_frame
RET
end_func muentry_swapstack_kill_pass
# starts a muthread that passes values to the target (with the given memory area containg values of argument registers)
# (new_sp: Address, old_sp_loc: Address)
# X0 X1,
begin_func muthread_start_pass
stack_ret X1
MOV X9, X0 # X1 will be overriden by the next instructions
load_arguments X9
stack_pass X9
end_func muthread_start_pass
# Same as muentry_swapstack_kill_pass except will throw an exception to the new stack instead of passing values
# muentry_swapstack_kill_throw(exception: Address, new_sp: Address, old_stack: *mut MuStack)
# Same as muentry_swapstack_ret_throw
# muthread_start_throw(exception: Address, new_sp: Address, old_sp_loc: &mut Adress)
# X0 X1 X2
begin_func muentry_swapstack_kill_throw
# Swap to new stack
MOV SP, X1
begin_func muthread_start_throw
stack_ret X2
stack_throw X1
end_func muthread_start_throw
# restores the thread
# (new_sp: Address)
# X0
begin_func muentry_thread_exit
# Rust code will be responsible for actually killing the stack
stack_pass X0
end_func muentry_thread_exit
mov_args_to_callee_saved
MOV X0, X9
B muentry_kill_stack
mov_callee_saved_to_args
# swap to the new stack whilst passing values and saving the old stack
# muentry_swapstack_ret_pass(new_stack args..., new_sp: Address, old_sp_loc: &mut Adress)
# X0 ... X7 [x8] X9 X10
begin_func muentry_swapstack_ret_pass
stack_ret X10
stack_pass X9
end_func muentry_swapstack_ret_pass
# The new stack will have the same layout as the stack when muentry_throw_exception
# calls throw_exception_internal, so we can do that directly here
# Same as swapstack_ret_pass except will throw an exception to the new stack instead of passing values
# muentry_swapstack_ret_throw(exception: Address, new_sp: Address, old_sp_loc: &mut Adress)
# X0 X1 X2
begin_func muentry_swapstack_ret_throw
stack_ret X2
stack_throw X1
end_func muentry_swapstack_ret_throw
# Add the total size pushed by 'push_callee_saved' to get the FP for the new stack
ADD FP, SP, 144
MOV X1, FP
BL throw_exception_internal
# swap to the new stack whilst passing values and killing the old stack
# muentry_swapstack_kill_pass(new_stack args..., new_sp: Address, old_stack: *mut MuStack)
# X0 ... X7 [x8] X9 X10
begin_func muentry_swapstack_kill_pass
stack_kill X10
stack_pass X9
end_func muentry_swapstack_kill_pass
# won't return
end_func muentry_swapstack_kill_throw
\ No newline at end of file
# Same as muentry_swapstack_kill_pass except will throw an exception to the new stack instead of passing values
# muentry_swapstack_kill_throw(exception: Address, new_sp: Address, old_stack: *mut MuStack)
# X0 X1 X2
begin_func muentry_swapstack_kill_throw
stack_kill X2
stack_throw X1
end_func muentry_swapstack_kill_throw
......@@ -19,10 +19,13 @@ use vm::VM;
use runtime;
use runtime::ValueLocation;
use runtime::mm;
use compiler::backend::CALLEE_SAVED_COUNT;
use utils::ByteSize;
use utils::Address;
use utils::Word;
use utils::POINTER_SIZE;
use utils::WORD_SIZE;
use utils::mem::memmap;
use utils::mem::memsec;
......@@ -107,7 +110,7 @@ pub struct MuStack {
impl MuStack {
/// creates a new MuStack for given entry function and function address
pub fn new(id: MuID, func_addr: ValueLocation, func: &MuFunction) -> MuStack {
pub fn new(id: MuID, func_addr: Address, stack_arg_size: usize) -> MuStack {
// allocate memory for the stack
let anon_mmap = {
// reserve two guard pages more than we need for the stack
......@@ -141,17 +144,31 @@ impl MuStack {
);
}
debug!("creating stack {} with entry func {:?}", id, func);
debug!("creating stack {} with entry address {:?}", id, func_addr);
debug!("overflow_guard : {}", overflow_guard);
debug!("lower_bound : {}", lower_bound);
debug!("upper_bound : {}", upper_bound);
debug!("underflow_guard: {}", underflow_guard);
// Set up the stack
let mut sp = upper_bound;
sp -= stack_arg_size; // Allocate space for the arguments
// Push entry as the return address
sp -= POINTER_SIZE;
unsafe { sp.store(func_addr); }
// Push a null frame pointer
sp -= POINTER_SIZE;
unsafe { sp.store(Address::zero()); }
// Reserve space for callee saved registers (they will be loaded with undefined values)
sp -= WORD_SIZE*CALLEE_SAVED_COUNT;
MuStack {
hdr: MuEntityHeader::unnamed(id),
func: Some((func_addr, func.id())),
state: MuStackState::Ready(func.sig.arg_tys.clone()),
func: None,
state: MuStackState::Unknown,
size: STACK_SIZE,
overflow_guard: overflow_guard,
......@@ -159,7 +176,7 @@ impl MuStack {
upper_bound: upper_bound,
underflow_guard: upper_bound,
sp: upper_bound,
sp: sp,
bp: upper_bound,
ip: unsafe { Address::zero() },
......@@ -230,9 +247,6 @@ impl MuStack {
}
}
// save it back
self.sp = stack_ptr;
if cfg!(debug_assertions) {
self.print_stack(Some(20));
}
......@@ -277,7 +291,8 @@ pub enum MuStackState {
/// running mu code
Active,
/// can be destroyed
Dead
Dead,
Unknown,
}
/// MuThread represents metadata for a Mu thread.
......@@ -374,14 +389,7 @@ extern "C" {
/// new_sp: stack pointer for the mu stack
/// entry : entry function for the mu stack
/// old_sp_loc: the location to store native stack pointer so we can later swap back
fn swap_to_mu_stack(new_sp: Address, entry: Address, old_sp_loc: Address);
/// swaps from a mu stack back to native stack
/// emitted by the compiler for THREADEXIT IR
/// args:
/// sp_loc: the location of native stack pointer
#[allow(dead_code)] // we are not using this function directly, but compiler will emit calls to it
fn muentry_swap_back_to_native_stack(sp_loc: Address);
fn muthread_start_pass(new_sp: Address, old_sp_loc: Address);
/// gets base poniter for current frame
pub fn get_current_frame_bp() -> Address;
......@@ -466,8 +474,6 @@ impl MuThread {
vm: Arc<VM>
) -> JoinHandle<()> {
let new_sp = stack.sp;
let entry = runtime::resolve_symbol(vm.name_of(stack.func.as_ref().unwrap().1));
debug!("entry : 0x{:x}", entry);
match thread::Builder::new()
.name(format!("Mu Thread #{}", id))
......@@ -483,7 +489,7 @@ impl MuThread {
debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
unsafe {
swap_to_mu_stack(new_sp, entry, sp_threadlocal_loc);
muthread_start_pass(new_sp, sp_threadlocal_loc);
}
debug!("returned to Rust stack. Going to quit");
......@@ -663,6 +669,13 @@ pub unsafe extern "C" fn muentry_prepare_swapstack_kill(new_stack: *mut MuStack)
((*new_stack).sp, cur_stack)
}
#[no_mangle]
pub unsafe extern "C" fn muentry_new_stack(entry: Address, stack_size: usize) -> *mut MuStack {
let ref vm = MuThread::current_mut().vm;
let stack = Box::new(MuStack::new(vm.next_id(), entry, stack_size));
return Box::into_raw(stack);
}
// Kills the given stack. WARNING! do not call this whilst on the given stack
#[no_mangle]
pub unsafe extern "C" fn muentry_kill_stack(stack: *mut MuStack) {
......
......@@ -1269,6 +1269,7 @@ struct BundleLoader<'lb, 'lvm> {
built_i6: Option<P<MuType>>,
built_ref_void: Option<P<MuType>>,
built_tagref64: Option<P<MuType>>,
built_stackref: Option<P<MuType>>,
built_funcref_of: IdPMap<MuType>,
built_ref_of: IdPMap<MuType>,
......@@ -1306,6 +1307,7 @@ fn load_bundle(b: &mut MuIRBuilder) {
built_i6: Default::default(),
built_ref_void: Default::default(),
built_tagref64: Default::default(),
built_stackref: Default::default(),
built_funcref_of: Default::default(),
built_ref_of: Default::default(),
built_iref_of: Default::default(),
......@@ -1423,6 +1425,27 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
impl_ty
}
fn ensure_stackref(&mut self) -> P<MuType> {
if let Some(ref impl_ty) = self.built_stackref {