GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 9c0d8d0a authored by qinsoon's avatar qinsoon

allow storing funcref in the heap, and call via a ssa var

parent 95ee86b9
...@@ -1042,6 +1042,18 @@ impl Decodable for MuEntityHeader { ...@@ -1042,6 +1042,18 @@ impl Decodable for MuEntityHeader {
} }
} }
pub fn name_check(name: MuName) -> MuName {
let name = name.replace('.', "$");
if name.starts_with("@") || name.starts_with("%") {
let (_, name) = name.split_at(1);
return name.to_string();
}
name
}
impl MuEntityHeader { impl MuEntityHeader {
pub fn unnamed(id: MuID) -> MuEntityHeader { pub fn unnamed(id: MuID) -> MuEntityHeader {
MuEntityHeader { MuEntityHeader {
...@@ -1053,7 +1065,7 @@ impl MuEntityHeader { ...@@ -1053,7 +1065,7 @@ impl MuEntityHeader {
pub fn named(id: MuID, name: MuName) -> MuEntityHeader { pub fn named(id: MuID, name: MuName) -> MuEntityHeader {
MuEntityHeader { MuEntityHeader {
id: id, id: id,
name: RwLock::new(Some(MuEntityHeader::name_check(name))) name: RwLock::new(Some(name_check(name)))
} }
} }
...@@ -1067,19 +1079,7 @@ impl MuEntityHeader { ...@@ -1067,19 +1079,7 @@ impl MuEntityHeader {
pub fn set_name(&self, name: MuName) { pub fn set_name(&self, name: MuName) {
let mut name_guard = self.name.write().unwrap(); let mut name_guard = self.name.write().unwrap();
*name_guard = Some(MuEntityHeader::name_check(name)); *name_guard = Some(name_check(name));
}
pub fn name_check(name: MuName) -> MuName {
let name = name.replace('.', "$");
if name.starts_with("@") || name.starts_with("%") {
let (_, name) = name.split_at(1);
return name.to_string();
}
name
} }
fn abbreviate_name(&self) -> Option<MuName> { fn abbreviate_name(&self) -> Option<MuName> {
......
...@@ -1008,26 +1008,23 @@ impl ASMCodeGen { ...@@ -1008,26 +1008,23 @@ impl ASMCodeGen {
fn prepare_machine_regs(&self, regs: Iter<P<Value>>) -> Vec<MuID> { fn prepare_machine_regs(&self, regs: Iter<P<Value>>) -> Vec<MuID> {
regs.map(|x| self.prepare_machine_reg(x)).collect() regs.map(|x| self.prepare_machine_reg(x)).collect()
} }
fn add_asm_call(&mut self, code: String, potentially_excepting: Option<MuName>) { fn add_asm_call_with_extra_uses(&mut self,
// a call instruction will use all the argument registers code: String,
// do not need extra_uses: LinkedHashMap<MuID, Vec<ASMLocation>>,
let uses : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new(); potentially_excepting: Option<MuName>) {
// for reg in x86_64::ARGUMENT_GPRs.iter() { let uses = extra_uses;
// uses.insert(reg.id(), vec![]);
// } // defines
// for reg in x86_64::ARGUMENT_FPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// defines: return registers
let mut defines : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new(); let mut defines : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
// return registers get defined
for reg in x86_64::RETURN_GPRs.iter() { for reg in x86_64::RETURN_GPRs.iter() {
defines.insert(reg.id(), vec![]); defines.insert(reg.id(), vec![]);
} }
for reg in x86_64::RETURN_FPRs.iter() { for reg in x86_64::RETURN_FPRs.iter() {
defines.insert(reg.id(), vec![]); defines.insert(reg.id(), vec![]);
} }
// caller saved register will be destroyed
for reg in x86_64::CALLER_SAVED_GPRs.iter() { for reg in x86_64::CALLER_SAVED_GPRs.iter() {
if !defines.contains_key(&reg.id()) { if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]); defines.insert(reg.id(), vec![]);
...@@ -1038,7 +1035,7 @@ impl ASMCodeGen { ...@@ -1038,7 +1035,7 @@ impl ASMCodeGen {
defines.insert(reg.id(), vec![]); defines.insert(reg.id(), vec![]);
} }
} }
self.add_asm_inst_internal(code, defines, uses, false, { self.add_asm_inst_internal(code, defines, uses, false, {
if potentially_excepting.is_some() { if potentially_excepting.is_some() {
ASMBranchTarget::PotentiallyExcepting(potentially_excepting.unwrap()) ASMBranchTarget::PotentiallyExcepting(potentially_excepting.unwrap())
...@@ -1048,6 +1045,10 @@ impl ASMCodeGen { ...@@ -1048,6 +1045,10 @@ impl ASMCodeGen {
}, None) }, None)
} }
fn add_asm_call(&mut self, code: String, potentially_excepting: Option<MuName>) {
self.add_asm_call_with_extra_uses(code, LinkedHashMap::new(), potentially_excepting);
}
fn add_asm_ret(&mut self, code: String) { fn add_asm_ret(&mut self, code: String) {
// return instruction does not use anything (not RETURN REGS) // return instruction does not use anything (not RETURN REGS)
// otherwise it will keep RETURN REGS alive // otherwise it will keep RETURN REGS alive
...@@ -2839,7 +2840,18 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2839,7 +2840,18 @@ impl CodeGenerator for ASMCodeGen {
fn emit_call_near_r64(&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation { fn emit_call_near_r64(&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation {
trace!("emit: call {}", func); trace!("emit: call {}", func);
unimplemented!()
let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("call *{}", reg);
self.add_asm_call_with_extra_uses(asm, linked_hashmap!{id => vec![loc]}, pe);
let callsite_symbol = symbol(callsite.clone());
self.add_asm_symbolic(directive_globl(callsite_symbol.clone()));
self.add_asm_symbolic(format!("{}:", callsite_symbol.clone()));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
} }
fn emit_call_near_mem64(&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation { fn emit_call_near_mem64(&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation {
......
...@@ -35,7 +35,7 @@ extern "C" { ...@@ -35,7 +35,7 @@ extern "C" {
pub fn resolve_symbol(symbol: String) -> Address { pub fn resolve_symbol(symbol: String) -> Address {
use std::ptr; use std::ptr;
let symbol = MuEntityHeader::name_check(symbol); let symbol = name_check(symbol);
let rtld_default = unsafe {dlopen(ptr::null(), 0)}; let rtld_default = unsafe {dlopen(ptr::null(), 0)};
let ret = unsafe {dlsym(rtld_default, CString::new(symbol.clone()).unwrap().as_ptr())}; let ret = unsafe {dlsym(rtld_default, CString::new(symbol.clone()).unwrap().as_ptr())};
......
...@@ -45,7 +45,7 @@ pub enum APIHandleValue { ...@@ -45,7 +45,7 @@ pub enum APIHandleValue {
Ref (P<MuType>, Address), // referenced type Ref (P<MuType>, Address), // referenced type
IRef(P<MuType>, Address), IRef(P<MuType>, Address),
TagRef64(u64), TagRef64(u64),
FuncRef, FuncRef(MuID),
ThreadRef, ThreadRef,
StackRef, StackRef,
FCRef, // frame cursor ref FCRef, // frame cursor ref
...@@ -62,7 +62,6 @@ pub enum APIHandleValue { ...@@ -62,7 +62,6 @@ pub enum APIHandleValue {
// GenRef->IR->Child->Var->Global // GenRef->IR->Child->Var->Global
Global(MuID), Global(MuID),
Func(MuID),
ExpFunc, ExpFunc,
// GenRef->IR->Child->Var->Local // GenRef->IR->Child->Var->Local
...@@ -92,7 +91,7 @@ impl fmt::Debug for APIHandleValue { ...@@ -92,7 +91,7 @@ impl fmt::Debug for APIHandleValue {
&Ref(ref ty, addr) => write!(f, "ref<{}> to {}", ty, addr), &Ref(ref ty, addr) => write!(f, "ref<{}> to {}", ty, addr),
&IRef(ref ty, addr) => write!(f, "iref<{}> to {}", ty, addr), &IRef(ref ty, addr) => write!(f, "iref<{}> to {}", ty, addr),
&TagRef64(val) => write!(f, "tagref64 0x{:x}", val), &TagRef64(val) => write!(f, "tagref64 0x{:x}", val),
&FuncRef => write!(f, "funcref"), &FuncRef(id) => write!(f, "funcref to #{}", id),
&ThreadRef => write!(f, "threadref"), &ThreadRef => write!(f, "threadref"),
&StackRef => write!(f, "stackref"), &StackRef => write!(f, "stackref"),
&FCRef => write!(f, "framecursorref"), &FCRef => write!(f, "framecursorref"),
...@@ -103,7 +102,6 @@ impl fmt::Debug for APIHandleValue { ...@@ -103,7 +102,6 @@ impl fmt::Debug for APIHandleValue {
&BB => write!(f, "IR.BB"), &BB => write!(f, "IR.BB"),
&Inst => write!(f, "IR.inst"), &Inst => write!(f, "IR.inst"),
&Global(id) => write!(f, "IR.global to #{}", id), &Global(id) => write!(f, "IR.global to #{}", id),
&Func(id) => write!(f, "IR.func to #{}", id),
&ExpFunc => write!(f, "IR.expfunc"), &ExpFunc => write!(f, "IR.expfunc"),
&NorParam => write!(f, "IR.norparam"), &NorParam => write!(f, "IR.norparam"),
&ExcParam => write!(f, "IR.excparam"), &ExcParam => write!(f, "IR.excparam"),
...@@ -180,9 +178,9 @@ impl APIHandleValue { ...@@ -180,9 +178,9 @@ impl APIHandleValue {
} }
} }
pub fn as_func(&self) -> MuID { pub fn as_funcref(&self) -> MuID {
match self { match self {
&APIHandleValue::Func(id) => id, &APIHandleValue::FuncRef(id) => id,
_ => panic!("expected FuncRef") _ => panic!("expected FuncRef")
} }
} }
......
...@@ -32,7 +32,7 @@ use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_BOOL_INIT, ATOMIC_USIZE_ ...@@ -32,7 +32,7 @@ use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_BOOL_INIT, ATOMIC_USIZE_
// possibly INTERNAL_ID in ir.rs, internal types, etc // possibly INTERNAL_ID in ir.rs, internal types, etc
pub struct VM { pub struct VM {
// serialize // ---serialize---
// 0 // 0
next_id: AtomicUsize, next_id: AtomicUsize,
// 1 // 1
...@@ -61,11 +61,20 @@ pub struct VM { ...@@ -61,11 +61,20 @@ pub struct VM {
// 12 // 12
pub vm_options: VMOptions, pub vm_options: VMOptions,
// partially serialize // ---partially serialize---
// 13 // 13
compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>, compiled_funcs: RwLock<HashMap<MuID, RwLock<CompiledFunction>>>,
// ---do not serialize---
// client may try to store funcref to the heap, so that they can load it later, and call it
// however the store may happen before we have an actual address to the func (in AOT scenario)
aot_pending_funcref_store: RwLock<HashMap<Address, ValueLocation>>
} }
use std::u64;
const PENDING_FUNCREF : u64 = u64::MAX;
const VM_SERIALIZE_FIELDS : usize = 14; const VM_SERIALIZE_FIELDS : usize = 14;
impl Encodable for VM { impl Encodable for VM {
...@@ -371,7 +380,8 @@ impl Decodable for VM { ...@@ -371,7 +380,8 @@ impl Decodable for VM {
primordial: RwLock::new(primordial), primordial: RwLock::new(primordial),
is_running: ATOMIC_BOOL_INIT, is_running: ATOMIC_BOOL_INIT,
vm_options: vm_options, vm_options: vm_options,
compiled_funcs: RwLock::new(compiled_funcs) compiled_funcs: RwLock::new(compiled_funcs),
aot_pending_funcref_store: RwLock::new(HashMap::new())
}; };
vm.next_id.store(next_id, Ordering::SeqCst); vm.next_id.store(next_id, Ordering::SeqCst);
...@@ -431,7 +441,9 @@ impl <'a> VM { ...@@ -431,7 +441,9 @@ impl <'a> VM {
funcs: RwLock::new(HashMap::new()), funcs: RwLock::new(HashMap::new()),
compiled_funcs: RwLock::new(HashMap::new()), compiled_funcs: RwLock::new(HashMap::new()),
primordial: RwLock::new(None) primordial: RwLock::new(None),
aot_pending_funcref_store: RwLock::new(HashMap::new())
}; };
// insert all intenral types // insert all intenral types
...@@ -1023,7 +1035,7 @@ impl <'a> VM { ...@@ -1023,7 +1035,7 @@ impl <'a> VM {
panic!("Zebu doesnt support creating primordial thread through a stack, name a entry function instead") panic!("Zebu doesnt support creating primordial thread through a stack, name a entry function instead")
} else { } else {
// extract func id // extract func id
let func_id = primordial_func.unwrap().v.as_func(); let func_id = primordial_func.unwrap().v.as_funcref();
// make primordial thread in vm // make primordial thread in vm
self.make_primordial_thread(func_id, false, vec![]); // do not pass const args, use argc/argv self.make_primordial_thread(func_id, false, vec![]); // do not pass const args, use argc/argv
...@@ -1034,7 +1046,7 @@ impl <'a> VM { ...@@ -1034,7 +1046,7 @@ impl <'a> VM {
let mut ret = hashmap!{}; let mut ret = hashmap!{};
for i in 0..sym_fields.len() { for i in 0..sym_fields.len() {
let addr = sym_fields[i].v.as_address(); let addr = sym_fields[i].v.as_address();
ret.insert(addr, sym_strings[i].clone()); ret.insert(addr, name_check(sym_strings[i].clone()));
} }
ret ret
}; };
...@@ -1042,10 +1054,21 @@ impl <'a> VM { ...@@ -1042,10 +1054,21 @@ impl <'a> VM {
assert_eq!(reloc_fields.len(), reloc_strings.len()); assert_eq!(reloc_fields.len(), reloc_strings.len());
let fields = { let fields = {
let mut ret = hashmap!{}; let mut ret = hashmap!{};
// client supplied relocation fields
for i in 0..reloc_fields.len() { for i in 0..reloc_fields.len() {
let addr = reloc_fields[i].v.as_address(); let addr = reloc_fields[i].v.as_address();
ret.insert(addr, reloc_strings[i].clone()); ret.insert(addr, name_check(reloc_strings[i].clone()));
}
// pending funcrefs - we want to replace them as symbol
{
let mut pending_funcref = self.aot_pending_funcref_store.write().unwrap();
for (addr, vl) in pending_funcref.drain() {
ret.insert(addr, name_check(vl.to_relocatable()));
}
} }
ret ret
}; };
...@@ -1144,7 +1167,7 @@ impl <'a> VM { ...@@ -1144,7 +1167,7 @@ impl <'a> VM {
v : APIHandleValue::IRef(inner_ty, addr) v : APIHandleValue::IRef(inner_ty, addr)
}) })
}, },
APIHandleValue::FuncRef => unimplemented!(), APIHandleValue::FuncRef(_) => unimplemented!(),
_ => panic!("unexpected operand for refcast: {:?}", from_op) _ => panic!("unexpected operand for refcast: {:?}", from_op)
} }
...@@ -1320,13 +1343,29 @@ impl <'a> VM { ...@@ -1320,13 +1343,29 @@ impl <'a> VM {
APIHandleValue::Ref(_, aval) APIHandleValue::Ref(_, aval)
| APIHandleValue::IRef(_, aval) => addr.store::<Address>(aval), | APIHandleValue::IRef(_, aval) => addr.store::<Address>(aval),
_ => unimplemented!() // if we are JITing, we can store the address of the function
// but if we are doing AOT, we pend the store, and resolve the store when making boot image
APIHandleValue::FuncRef(id) => self.store_funcref(addr, id),
_ => panic!("unimplemented store for handle {}", val.v)
} }
} }
trace!("API: store value {:?} to location {:?}", val, loc); trace!("API: store value {:?} to location {:?}", val, loc);
} }
#[cfg(feature = "aot")]
fn store_funcref(&self, addr: Address, func_id: MuID) {
// put a pending funcref in the address
unsafe {addr.store::<u64>(PENDING_FUNCREF)};
// and record this funcref
let symbol = self.name_of(func_id);
let mut pending_funcref_guard = self.aot_pending_funcref_store.write().unwrap();
pending_funcref_guard.insert(addr, ValueLocation::Relocatable(backend::RegGroup::GPR, symbol));
}
// this function and the following two make assumption that GC will not move object // this function and the following two make assumption that GC will not move object
// they need to be reimplemented if we have a moving GC // they need to be reimplemented if we have a moving GC
pub fn handle_pin_object(&self, loc: APIHandleArg) -> APIHandleResult { pub fn handle_pin_object(&self, loc: APIHandleArg) -> APIHandleResult {
...@@ -1363,7 +1402,7 @@ impl <'a> VM { ...@@ -1363,7 +1402,7 @@ impl <'a> VM {
self.new_handle(APIHandle { self.new_handle(APIHandle {
id: handle_id, id: handle_id,
v : APIHandleValue::Func(id) v : APIHandleValue::FuncRef(id)
}) })
} }
......
#[macro_use]
extern crate mu; extern crate mu;
#[macro_use]
extern crate utils; extern crate utils;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
#[macro_use]
extern crate maplit; extern crate maplit;
#[macro_use] #[macro_use]
......
...@@ -637,4 +637,141 @@ fn persist_hybrid(vm: &VM) { ...@@ -637,4 +637,141 @@ fn persist_hybrid(vm: &VM) {
define_func_ver! ((vm) persist_hybrid_v1 (entry: blk_entry) { define_func_ver! ((vm) persist_hybrid_v1 (entry: blk_entry) {
blk_entry, blk_loop_head, blk_loop_body, blk_exit blk_entry, blk_loop_head, blk_loop_body, blk_exit
}); });
}
#[test]
fn test_persist_funcref() {
VM::start_logging_trace();
let vm = Arc::new(VM::new_with_opts("init_mu --disable-inline"));
unsafe {
MuThread::current_thread_as_mu_thread(Address::zero(), vm.clone());
}
persist_funcref(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let func_ret42_id = vm.id_of("ret42");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_ret42_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
let func_my_main_id = vm.id_of("my_main");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_my_main_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
// store funcref to ret42 in the global
{
let global_id = vm.id_of("my_global");
let global_handle = vm.handle_from_global(global_id);
let func_ret42_handle = vm.handle_from_func(func_ret42_id);
debug!("write {:?} to location {:?}", func_ret42_handle, global_handle);
vm.handle_store(MemoryOrder::Relaxed, &global_handle, &func_ret42_handle);
}
let my_main_handle = vm.handle_from_func(func_my_main_id);
// make boot image
vm.make_boot_image(
vec![func_ret42_id, func_my_main_id], // whitelist
Some(&my_main_handle), None, // primoridal func, stack
None, // threadlocal
vec![], vec![], // sym fields/strings
vec![], vec![], // reloc fields/strings
"test_persist_funcref".to_string()
);
// link
let executable = {
use std::path;
let mut path = path::PathBuf::new();
path.push(&vm.vm_options.flag_aot_emit_dir);
path.push("test_persist_funcref");
path
};
let output = aot::execute_nocheck(executable);
assert!(output.status.code().is_some());
let ret_code = output.status.code().unwrap();
println!("return code: {}", ret_code);
assert!(ret_code == 42);
}
fn persist_funcref(vm: &VM) {
typedef! ((vm) int64 = mu_int(64));
constdef! ((vm) <int64> int64_42 = Constant::Int(42));
funcsig! ((vm) ret42_sig = () -> (int64));
funcdecl! ((vm) <ret42_sig> ret42);
funcdef! ((vm) <ret42_sig> ret42 VERSION ret42_v1);
typedef! ((vm) funcref_to_ret42 = mu_funcref(ret42_sig));
globaldef! ((vm) <funcref_to_ret42> my_global);
// ---ret42---
{
// blk entry
block! ((vm, ret42_v1) blk_entry);
consta! ((vm, ret42_v1) int64_42_local = int64_42);
inst! ((vm, ret42_v1) blk_entry_ret:
RET (int64_42_local)
);
define_block!((vm, ret42_v1) blk_entry() {
blk_entry_ret
});
define_func_ver!((vm) ret42_v1 (entry: blk_entry) {blk_entry});
}
// ---my_main---
{
funcsig! ((vm) my_main_sig = () -> ());
funcdecl! ((vm) <my_main_sig> my_main);
funcdef! ((vm) <my_main_sig> my_main VERSION my_main_v1);
// blk entry
block! ((vm, my_main_v1) blk_entry);
global! ((vm, my_main_v1) blk_entry_global = my_global);
ssa! ((vm, my_main_v1) <funcref_to_ret42> func);
inst! ((vm, my_main_v1) blk_entry_load:
func = LOAD blk_entry_global (is_ptr: false, order: MemoryOrder::SeqCst)
);
ssa! ((vm, my_main_v1) <int64> blk_entry_res);
inst! ((vm, my_main_v1) blk_entry_call:
blk_entry_res = EXPRCALL (CallConvention::Mu, is_abort: false) func ()
);
let blk_entry_exit = gen_ccall_exit(blk_entry_res.clone(), &mut my_main_v1, &vm);
inst! ((vm, my_main_v1) blk_entry_ret:
RET
);
define_block! ((vm, my_main_v1) blk_entry() {
blk_entry_load,
blk_entry_call,
blk_entry_exit,
blk_entry_ret
});
define_func_ver!((vm) my_main_v1 (entry: blk_entry) {
blk_entry
});
}
} }
\ No newline at end of file
...@@ -1148,8 +1148,8 @@ def test_linked_list(): ...@@ -1148,8 +1148,8 @@ def test_linked_list():
assert res.returncode == 0, res.err assert res.returncode == 0, res.err
assert res.out == '1\n' assert res.out == '1\n'
@pytest.mark.xfail(reason='unimplemented memory order in API store')
@may_spawn_proc @may_spawn_proc
@pytest.mark.xfail(reason='segment fault')
def test_rpytarget_richards(): def test_rpytarget_richards():
from rpython.translator.goal.richards import entry_point from rpython.translator.goal.richards import entry_point
def main(argv): def main(argv):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment