GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 6c16d1db authored by qinsoon's avatar qinsoon

implement API for object pin/unpin

internally they do nothing (gc doesnt move in zebu atm)
parent 08a74443
Pipeline #265 failed with stage
in 30 minutes and 47 seconds
......@@ -270,6 +270,10 @@ pub enum Instruction_ {
CommonInst_GetThreadLocal,
CommonInst_SetThreadLocal(OpIndex),
// pin/unpin
CommonInst_Pin (OpIndex),
CommonInst_Unpin(OpIndex),
// internal use: mov from ops[0] to value
Move(OpIndex)
}
......@@ -389,6 +393,9 @@ impl Instruction_ {
&Instruction_::CommonInst_GetThreadLocal => format!("COMMONINST GetThreadLocal"),
&Instruction_::CommonInst_SetThreadLocal(op) => format!("COMMONINST SetThreadLocal {}", ops[op]),
&Instruction_::CommonInst_Pin(op) => format!("COMMONINST Pin {}", ops[op]),
&Instruction_::CommonInst_Unpin(op) => format!("COMMONINST Unpin {}", ops[op]),
// move
&Instruction_::Move(from) => format!("MOVE {}", ops[from])
}
......
......@@ -29,6 +29,8 @@ pub fn is_terminal_inst(inst: &Instruction_) -> bool {
| &Fence(_)
| &CommonInst_GetThreadLocal
| &CommonInst_SetThreadLocal(_)
| &CommonInst_Pin(_)
| &CommonInst_Unpin(_)
| &Move(_) => false,
&Return(_)
| &ThreadExit
......@@ -92,6 +94,8 @@ pub fn has_side_effect(inst: &Instruction_) -> bool {
&ExnInstruction{..} => true,
&CommonInst_GetThreadLocal => true,
&CommonInst_SetThreadLocal(_) => true,
&CommonInst_Pin(_) => true,
&CommonInst_Unpin(_) => true,
&Move(_) => false,
}
}
......@@ -61,6 +61,8 @@ pub enum OpCode {
CommonInst_GetThreadLocal,
CommonInst_SetThreadLocal,
CommonInst_Pin,
CommonInst_Unpin,
Move
}
......@@ -289,6 +291,8 @@ pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
Instruction_::ExnInstruction{..} => OpCode::ExnInstruction,
Instruction_::CommonInst_GetThreadLocal => OpCode::CommonInst_GetThreadLocal,
Instruction_::CommonInst_SetThreadLocal(_) => OpCode::CommonInst_SetThreadLocal,
Instruction_::CommonInst_Pin(_) => OpCode::CommonInst_Pin,
Instruction_::CommonInst_Unpin(_) => OpCode::CommonInst_Unpin,
Instruction_::Move(_) => OpCode::Move,
}
}
......@@ -1300,6 +1300,32 @@ impl <'a> InstructionSelection {
self.emit_store_base_offset(&tl, *thread::USER_TLS_OFFSET as i32, &tmp_op, vm);
}
Instruction_::CommonInst_Pin(op) => {
trace!("instsel on PIN");
if !mm::GC_MOVES_OBJECT {
// non-moving GC: pin is a nop (move from op to result)
let ops = inst.ops.read().unwrap();
let ref op = ops[op];
let tmp_res = self.get_result_value(node);
self.emit_move_node_to_value(&tmp_res, op, f_content, f_context, vm);
} else {
unimplemented!()
}
}
Instruction_::CommonInst_Unpin(_) => {
trace!("instsel on UNPIN");
if !mm::GC_MOVES_OBJECT {
// do nothing
} else {
unimplemented!()
}
}
Instruction_::Move(op) => {
trace!("instsel on MOVE (internal IR)");
......
......@@ -480,28 +480,28 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// simple
BinOp(_, _, _) => 1,
CmpOp(_, _, _) => 1,
ConvOp{..} => 0,
ConvOp{..} => 0,
// control flow
Branch1(_) => 1,
Branch2{..} => 1,
Select{..} => 2,
Branch1(_) => 1,
Branch2{..} => 1,
Select{..} => 2,
Watchpoint{..} => 1,
WPBranch{..} => 2,
Switch{..} => 3,
WPBranch{..} => 2,
Switch{..} => 3,
// call
ExprCall{..} | ExprCCall{..} | Call{..} | CCall{..} => 5,
Return(_) => 1,
Return(_) => 1,
TailCall(_) => 1,
// memory access
Load{..} | Store{..} => 1,
CmpXchg{..} => 1,
AtomicRMW{..} => 1,
AllocA(_) => 1,
AllocAHybrid(_, _) => 1,
Fence(_) => 1,
CmpXchg{..} => 1,
AtomicRMW{..} => 1,
AllocA(_) => 1,
AllocAHybrid(_, _) => 1,
Fence(_) => 1,
// memory addressing
GetIRef(_) | GetFieldIRef{..} | GetElementIRef{..} | ShiftIRef{..} | GetVarPartIRef{..} => 0,
......@@ -509,10 +509,11 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// runtime
New(_) | NewHybrid(_, _) => 10,
NewStack(_) | NewThread(_, _) | NewThreadExn(_, _) | NewFrameCursor(_) => 10,
ThreadExit => 10,
Throw(_) => 10,
ThreadExit => 10,
Throw(_) => 10,
SwapStack{..} => 10,
CommonInst_GetThreadLocal | CommonInst_SetThreadLocal(_) => 10,
CommonInst_Pin(_) | CommonInst_Unpin(_) => 10,
// others
Move(_) => 0,
......
......@@ -30,6 +30,8 @@ use std::fmt;
use std::sync::Arc;
use std::sync::RwLock;
pub const GC_MOVES_OBJECT : bool = false;
pub const LARGE_OBJECT_THRESHOLD : usize = BYTES_IN_LINE;
pub use heap::immix::ImmixMutatorLocal as Mutator;
......
......@@ -300,7 +300,6 @@ impl MuThread {
if ! unsafe{muentry_get_thread_local()}.is_zero() {
warn!("current thread has a thread local (has a muthread to it)");
panic!("should not have muthread here");
return false;
}
......
......@@ -368,15 +368,15 @@ impl MuCtx {
}
pub fn pin(&mut self, loc: &APIHandle) -> *const APIHandle {
panic!("Not implemented")
prepare_handle(self.get_mvm().vm.handle_pin_object(loc))
}
pub fn unpin(&mut self, loc: &APIHandle) {
panic!("Not implemented")
self.get_mvm().vm.handle_unpin_object(loc)
}
pub fn get_addr(&mut self, loc: &APIHandle) -> *const APIHandle {
panic!("Not implemented")
prepare_handle(self.get_mvm().vm.handle_get_addr(loc))
}
pub fn expose(&mut self, func: &APIHandle, call_conv: CMuCallConv, cookie: &APIHandle) -> *const APIHandle {
......
......@@ -551,12 +551,18 @@ struct BundleLoader<'lb, 'lvm> {
built_funcs: IdBMap<MuFunction>,
built_funcvers: IdBMap<MuFunctionVersion>,
struct_hybrid_id_tags: Vec<(MuID, MuName)>,
built_void: Option<P<MuType>>,
built_refvoid: Option<P<MuType>>,
built_refi64: Option<P<MuType>>,
built_i1: Option<P<MuType>>,
built_i64: Option<P<MuType>>,
built_funcref_of: IdPMap<MuType>,
built_ref_of: IdPMap<MuType>,
built_iref_of: IdPMap<MuType>,
built_uptr_of: IdPMap<MuType>,
built_constint_of: HashMap<u64, P<Value>>,
}
......@@ -577,12 +583,15 @@ fn load_bundle(b: &mut MuIRBuilder) {
built_funcs: Default::default(),
built_funcvers: Default::default(),
struct_hybrid_id_tags: Default::default(),
built_void: Default::default(),
built_refvoid: Default::default(),
built_refi64: Default::default(),
built_i1: Default::default(),
built_i64: Default::default(),
built_funcref_of: Default::default(),
built_ref_of: Default::default(),
built_iref_of: Default::default(),
built_uptr_of: Default::default(),
built_constint_of: Default::default(),
};
......@@ -604,6 +613,44 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
self.add_everything_to_vm();
}
fn ensure_void(&mut self) -> P<MuType> {
if let Some(ref void) = self.built_void {
return void.clone();
}
let id_void = self.vm.next_id();
let impl_void = P(MuType {
hdr: MuEntityHeader::unnamed(id_void),
v: MuType_::Void
});
trace!("Ensure void is defined: {} {:?}", id_void, impl_void);
self.built_types.insert(id_void, impl_void.clone());
self.built_void = Some(impl_void.clone());
impl_void
}
fn ensure_refvoid(&mut self) -> P<MuType> {
if let Some(ref refvoid) = self.built_refi64 {
return refvoid.clone();
}
let id_refvoid = self.vm.next_id();
let id_void = self.ensure_void().id();
let impl_refvoid = self.ensure_ref(id_void);
trace!("Ensure refvoid is defined: {} {:?}", id_refvoid, impl_refvoid);
self.built_types.insert(id_refvoid, impl_refvoid.clone());
self.built_refvoid = Some(impl_refvoid.clone());
impl_refvoid
}
fn ensure_refi64(&mut self) -> P<MuType> {
if let Some(ref refi64) = self.built_refi64 {
return refi64.clone();
......@@ -1715,6 +1762,14 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
exc_clause, keepalive_clause,
true, CallConvention::Foreign(ForeignFFI::C))
},
NodeInst::NodeCommInst {
id, ref result_ids, opcode,
ref flags, ref tys, ref sigs, ref args,
ref exc_clause, ref keepalive_clause
} => {
self.build_comm_inst(fcb, hdr, result_ids, opcode, flags, tys, sigs, args, exc_clause, keepalive_clause)
},
ref i => panic!("{:?} not implemented", i),
};
......@@ -1853,6 +1908,80 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
}
}
#[allow(unused_variables)]
fn build_comm_inst(&mut self, fcb: &mut FuncCtxBuilder, hdr: MuEntityHeader,
result_ids: &Vec<MuID>, opcode: MuCommInst, flags: &Vec<Flag>,
tys: &Vec<MuTypeNode>, sigs: &Vec<MuFuncSigNode>, args: &Vec<MuVarNode>,
exc_clause: &Option<MuExcClause>, keepalives: &Option<MuKeepaliveClause>) -> Instruction {
match opcode {
CMU_CI_UVM_GET_THREADLOCAL => {
assert!(result_ids.len() == 1);
let rv_ty = self.ensure_refvoid();
let rv = self.new_ssa(fcb, result_ids[0], rv_ty).clone_value();
Instruction {
hdr: hdr,
value: Some(vec![rv]),
ops: RwLock::new(vec![]),
v: Instruction_::CommonInst_GetThreadLocal
}
}
CMU_CI_UVM_SET_THREADLOCAL => {
assert!(args.len() == 1);
let op_ty = self.ensure_type_rec(tys[0]);
let op = self.get_treenode(fcb, args[0]);
Instruction {
hdr: hdr,
value: None,
ops: RwLock::new(vec![op]),
v: Instruction_::CommonInst_SetThreadLocal(0)
}
}
CMU_CI_UVM_NATIVE_PIN => {
assert!(result_ids.len() == 1);
assert!(args.len() == 1);
assert!(tys.len() == 1);
let op_ty = self.ensure_type_rec(tys[0]);
let op = self.get_treenode(fcb, args[0]);
let referent_ty = match op_ty.get_referenced_ty() {
Some(ty) => ty,
_ => panic!("expected ty in PIN to be ref/iref, found {}", op_ty)
};
let rv_ty = self.ensure_uptr(referent_ty.id());
let rv = self.new_ssa(fcb, result_ids[0], rv_ty).clone_value();
Instruction {
hdr: hdr,
value: Some(vec![rv]),
ops: RwLock::new(vec![op]),
v: Instruction_::CommonInst_Pin(0)
}
}
CMU_CI_UVM_NATIVE_UNPIN => {
assert!(args.len() == 1);
assert!(tys.len() == 1);
let op_ty = self.ensure_type_rec(tys[0]);
let op = self.get_treenode(fcb, args[0]);
Instruction {
hdr: hdr,
value: None,
ops: RwLock::new(vec![op]),
v: Instruction_::CommonInst_Unpin(0)
}
}
_ => unimplemented!()
}
}
fn build_mem_ord(&self, ord: MuMemoryOrder) -> MemoryOrder {
match ord {
CMU_ORD_NOT_ATOMIC => MemoryOrder::NotAtomic,
......
......@@ -58,6 +58,14 @@ pub enum APIHandleValue {
}
impl APIHandleValue {
pub fn as_ref_or_iref(&self) -> (P<MuType>, Address) {
match self {
&APIHandleValue::Ref(ref ty, addr)
| &APIHandleValue::IRef(ref ty, addr) => (ty.clone(), addr),
_ => panic!("expected Ref or IRef handle")
}
}
pub fn as_ref(&self) -> (P<MuType>, Address) {
match self {
&APIHandleValue::Ref(ref ty, addr) => (ty.clone(), addr),
......
......@@ -1113,6 +1113,37 @@ impl <'a> VM {
}
}
// this function and the following two make assumption that GC will not move object
// they need to be reimplemented if we have a moving GC
pub fn handle_pin_object(&self, loc: APIHandleArg) -> APIHandleResult {
assert!(!gc::GC_MOVES_OBJECT);
// gc will not move, so we just put ref into uptr
let (ty, addr) = loc.v.as_ref_or_iref();
self.new_handle(APIHandle {
id: self.next_id(),
v : APIHandleValue::UPtr(ty, addr)
})
}
#[allow(unused_variables)]
pub fn handle_unpin_object(&self, loc: APIHandleArg) {
assert!(!gc::GC_MOVES_OBJECT);
// gc will not move, do no need to unpin
// do nothing
}
pub fn handle_get_addr(&self, loc: APIHandleArg) -> APIHandleResult {
assert!(!gc::GC_MOVES_OBJECT);
// loc needs to be already pinned - we don't check since we don't pin
let (ty, addr) = loc.v.as_ref_or_iref();
self.new_handle(APIHandle {
id: self.next_id(),
v : APIHandleValue::UPtr(ty, addr)
})
}
pub fn handle_from_global(&self, id: MuID) -> APIHandleResult {
let global_iref = {
let global_locs = self.global_locations.read().unwrap();
......
from util import fncptr_from_c_script
from util import fncptr_from_c_script, preload_libmu
import ctypes
def test_select():
fnp, _ = fncptr_from_c_script('test_select.c', 'test_fnc', [ctypes.c_byte])
assert fnp(0) == 20
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment