WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit f83b014b authored by Javad Ebrahimian Amiri's avatar Javad Ebrahimian Amiri
Browse files

The thesis tested version (needs clean-up)

parent 3aef3b51
......@@ -102,6 +102,10 @@ impl Instruction {
| AllocAUHybrid(_,_)
| NewReg(_)
| DeleteReg(_)
| Intrinsic_CollectReg(_)
| Intrinsic_FindReg(_)
| Intrinsic_GetAllocatedReg(_)
| Intrinsic_GetFreeReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
| eAlloc(_)
......@@ -201,6 +205,7 @@ impl Instruction {
| AllocAUHybrid(_,_)
| NewReg(_)
| DeleteReg(_)
| Intrinsic_CollectReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_,_)
| eAlloc(_)
......@@ -302,6 +307,9 @@ impl Instruction {
| AttrGetPriority(_)
| RandF(_,_)
| RandI(_,_)
| Intrinsic_GetAllocatedReg(_)
| Intrinsic_GetFreeReg(_)
| Intrinsic_FindReg(_)
| ThreadIssetCPU(_, _) => false,
// | AffinityEqual(_, _)
}
......@@ -337,6 +345,10 @@ impl Instruction {
| AllocAUHybrid(_, _)
| NewReg(_)
| DeleteReg(_)
| Intrinsic_CollectReg(_)
| Intrinsic_FindReg(_)
| Intrinsic_GetAllocatedReg(_)
| Intrinsic_GetFreeReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
| eAlloc(_)
......@@ -347,7 +359,7 @@ impl Instruction {
| NewFutex(_)
| DeleteFutex(_)
| LockFutex(_, _)
| UnlockFutex(_,_)
| UnlockFutex(_, _)
| CondVarNew
| CondVarDelete(_)
| CondVarAcqLock(_)
......@@ -463,6 +475,10 @@ impl Instruction {
| AllocAUHybrid(_,_)
| NewReg(_)
| DeleteReg(_)
| Intrinsic_CollectReg(_)
| Intrinsic_FindReg(_)
| Intrinsic_GetAllocatedReg(_)
| Intrinsic_GetFreeReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_,_)
// | rAllocT(_)
......@@ -782,6 +798,18 @@ impl Instruction {
&Instruction_::DeleteReg(regref) => {
format!("COMMINST @uvm.delete_region({})", ops[regref])
}
&Instruction_::Intrinsic_CollectReg(regref) => {
format!("INTRINSIC @uvm.collect_region({})", ops[regref])
}
&Instruction_::Intrinsic_FindReg(regref) => {
format!("INTRINSIC @uvm.find_region({})", ops[regref])
}
&Instruction_::Intrinsic_GetAllocatedReg(regref) => {
format!("INTRINSIC @uvm.get_allocated_region({})", ops[regref])
}
&Instruction_::Intrinsic_GetFreeReg(regref) => {
format!("INTRINSIC @uvm.get_free_region({})", ops[regref])
}
&Instruction_::AllocAU(ref ty) => {
format!("COMMINST @uvm.AllocAU({})", ty.id())
}
......@@ -840,7 +868,9 @@ impl Instruction {
)
}
&Instruction_::NewFutex(init_val) => format!("NEWFUTEX {}", ops[init_val]),
&Instruction_::NewFutex(init_val) => {
format!("NEWFUTEX {}", ops[init_val])
}
&Instruction_::DeleteFutex(futexref) => {
format!("DELETEFUTEX {}", ops[futexref])
}
......@@ -851,9 +881,7 @@ impl Instruction {
format!("UNLOCKFUTEX {}, {}", ops[futexref], ops[count])
}
&Instruction_::CondVarNew => {
format!("CONDVAR_NEW")
}
&Instruction_::CondVarNew => format!("CONDVAR_NEW"),
&Instruction_::CondVarDelete(addr) => {
format!("CONDVAR_DELETE {}", ops[addr])
}
......@@ -1423,6 +1451,11 @@ pub enum Instruction_ {
/// args: regionref to the target region
DeleteReg(OpIndex),
Intrinsic_CollectReg(OpIndex),
Intrinsic_FindReg(OpIndex),
Intrinsic_GetFreeReg(OpIndex),
Intrinsic_GetAllocatedReg(OpIndex),
BindRegion(OpIndex),
UnbindRegion(OpIndex),
......
......@@ -2079,11 +2079,26 @@ impl<'a> InstructionSelection {
}
Instruction_::CommonInst_GetThreadLocal => {
trace!("instsel on GETTHREADLOCAL");
trace!("thread.ALLOCATOR_OFFSET = {}", *thread::ALLOCATOR_OFFSET as i32);
trace!("thread.NATIVE_SP_LOC_OFFSET = {}", *thread::NATIVE_SP_LOC_OFFSET as i32);
trace!("thread.USER_TLS_OFFSET = {}", *thread::USER_TLS_OFFSET as i32);
trace!("thread.STACK_OFFSET = {}", *thread::STACK_OFFSET as i32);
trace!("thread.EXCEPTION_OBJ_OFFSET = {}", *thread::EXCEPTION_OBJ_OFFSET as i32);
trace!(
"thread.ALLOCATOR_OFFSET = {}",
*thread::ALLOCATOR_OFFSET as i32
);
trace!(
"thread.NATIVE_SP_LOC_OFFSET = {}",
*thread::NATIVE_SP_LOC_OFFSET as i32
);
trace!(
"thread.USER_TLS_OFFSET = {}",
*thread::USER_TLS_OFFSET as i32
);
trace!(
"thread.STACK_OFFSET = {}",
*thread::STACK_OFFSET as i32
);
trace!(
"thread.EXCEPTION_OBJ_OFFSET = {}",
*thread::EXCEPTION_OBJ_OFFSET as i32
);
// get thread local
let tl = self.emit_get_threadlocal(
......@@ -2104,11 +2119,26 @@ impl<'a> InstructionSelection {
}
Instruction_::CommonInst_SetThreadLocal(op) => {
trace!("instsel on SETTHREADLOCAL");
trace!("thread.ALLOCATOR_OFFSET = {}", *thread::ALLOCATOR_OFFSET as i32);
trace!("thread.NATIVE_SP_LOC_OFFSET = {}", *thread::NATIVE_SP_LOC_OFFSET as i32);
trace!("thread.USER_TLS_OFFSET = {}", *thread::USER_TLS_OFFSET as i32);
trace!("thread.STACK_OFFSET = {}", *thread::STACK_OFFSET as i32);
trace!("thread.EXCEPTION_OBJ_OFFSET = {}", *thread::EXCEPTION_OBJ_OFFSET as i32);
trace!(
"thread.ALLOCATOR_OFFSET = {}",
*thread::ALLOCATOR_OFFSET as i32
);
trace!(
"thread.NATIVE_SP_LOC_OFFSET = {}",
*thread::NATIVE_SP_LOC_OFFSET as i32
);
trace!(
"thread.USER_TLS_OFFSET = {}",
*thread::USER_TLS_OFFSET as i32
);
trace!(
"thread.STACK_OFFSET = {}",
*thread::STACK_OFFSET as i32
);
trace!(
"thread.EXCEPTION_OBJ_OFFSET = {}",
*thread::EXCEPTION_OBJ_OFFSET as i32
);
let ref ops = inst.ops;
let ref op = ops[op];
......@@ -2367,6 +2397,89 @@ impl<'a> InstructionSelection {
);
}
#[cfg(feature = "realtime")]
Instruction_::Intrinsic_CollectReg(regionref) => {
trace!("instsel on CollectReg");
let ref ops = inst.ops;
let ref region = ops[regionref];
let tmp_region =
self.emit_ireg(region, f_content, f_context, vm);
self.emit_runtime_entry(
&entrypoints::COLLECT_REG,
vec![tmp_region],
Some(vec![]),
Some(node),
f_content,
f_context,
vm
);
}
#[cfg(feature = "realtime")]
Instruction_::Intrinsic_FindReg(regionref) => {
trace!("instsel on FindReg");
let ref ops = inst.ops;
let ref region = ops[regionref];
let tmp_res = self.get_result_value(node);
let tmp_region =
self.emit_ireg(region, f_content, f_context, vm);
self.emit_runtime_entry(
&entrypoints::FIND_REG,
vec![tmp_region],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
vm
);
}
#[cfg(feature = "realtime")]
Instruction_::Intrinsic_GetAllocatedReg(regionref) => {
trace!("instsel on GetAllocatedReg");
let ref ops = inst.ops;
let ref region = ops[regionref];
let tmp_res = self.get_result_value(node);
let tmp_region =
self.emit_ireg(region, f_content, f_context, vm);
self.emit_runtime_entry(
&entrypoints::GET_ALLOCATED_REG,
vec![tmp_region],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
vm
);
}
#[cfg(feature = "realtime")]
Instruction_::Intrinsic_GetFreeReg(regionref) => {
trace!("instsel on GetAllocatedReg");
let ref ops = inst.ops;
let ref region = ops[regionref];
let tmp_res = self.get_result_value(node);
let tmp_region =
self.emit_ireg(region, f_content, f_context, vm);
self.emit_runtime_entry(
&entrypoints::GET_FREE_REG,
vec![tmp_region],
Some(vec![tmp_res]),
Some(node),
f_content,
f_context,
vm
);
}
#[cfg(feature = "realtime")]
Instruction_::rAlloc(regionref, ref ty) => {
trace!(
"instsel on rAlloc: {}, {}",
......@@ -2382,21 +2495,24 @@ impl<'a> InstructionSelection {
let tmp_region =
self.emit_ireg(region, f_content, f_context, vm);
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
// self.make_int64_const(ty_id as u64, vm);
//
// self.emit_runtime_entry(
// &entrypoints::RALLOC_TRACED,
// vec![tmp_region, tmp_id],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
//
// self.make_int64_const(ty_id as u64, vm);
//
// self.emit_runtime_entry(
//
// &entrypoints::RALLOC_TRACED,
// vec![tmp_region,
// tmp_id],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
{
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
......@@ -2443,35 +2559,43 @@ impl<'a> InstructionSelection {
self.emit_ireg(region, f_content, f_context, vm);
// if the type contains iref to heap objects
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
// self.make_int64_const(ty_id as u64, vm);
//
// let tmp_var_len = if self
// .match_iconst_any(op_var_len)
// {
// let const_var_len = op_var_len
// .as_value()
// .extract_int_const()
// .unwrap();
//
// self.make_int64_const(const_var_len as u64, vm)
// } else {
// debug_assert!(self.match_ireg(op_var_len));
// op_var_len.as_value().clone()
// };
//
// self.emit_runtime_entry(
// &entrypoints::RALLOC_HYBRID_TRACED,
// vec![tmp_region, tmp_id, tmp_var_len],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
//
// self.make_int64_const(ty_id as u64, vm);
//
// let tmp_var_len = if self
//
// .match_iconst_any(op_var_len)
// {
// let const_var_len =
// op_var_len
// .as_value()
// .extract_int_const()
// .unwrap();
//
//
// self.make_int64_const(const_var_len as u64, vm)
// } else {
//
// debug_assert!(self.match_ireg(op_var_len));
//
// op_var_len.as_value().clone()
// };
//
// self.emit_runtime_entry(
//
// &entrypoints::RALLOC_HYBRID_TRACED,
// vec![tmp_region,
// tmp_id, tmp_var_len],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
{
let tmp_fixed_size =
self.make_int64_const(fix_part_size as u64, vm);
......@@ -2519,21 +2643,23 @@ impl<'a> InstructionSelection {
// let ty_info = vm.get_backend_type_info(ty.id());
let tmp_res = self.get_result_value(node);
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
// self.make_int64_const(ty_id as u64, vm);
//
// self.emit_runtime_entry(
// &entrypoints::EALLOC_TRACED,
// vec![tmp_id],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
//
// self.make_int64_const(ty_id as u64, vm);
//
// self.emit_runtime_entry(
//
// &entrypoints::EALLOC_TRACED,
// vec![tmp_id],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
{
let ty_info = vm.get_backend_type_info(ty.id());
let size = ty_info.size;
......@@ -2575,35 +2701,43 @@ impl<'a> InstructionSelection {
let tmp_res = self.get_result_value(node);
// if the type contains iref to heap objects
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
// self.make_int64_const(ty_id as u64, vm);
//
// let tmp_var_len = if self
// .match_iconst_any(op_var_len)
// {
// let const_var_len = op_var_len
// .as_value()
// .extract_int_const()
// .unwrap();
//
// self.make_int64_const(const_var_len as u64, vm)
// } else {
// debug_assert!(self.match_ireg(op_var_len));
// op_var_len.as_value().clone()
// };
//
// self.emit_runtime_entry(
// &entrypoints::EALLOC_HYBRID_TRACED,
// vec![tmp_id, tmp_var_len],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
// if ty.contains_iref() {
// let ty_id: MuID = ty.id();
// let tmp_id =
//
// self.make_int64_const(ty_id as u64, vm);
//
// let tmp_var_len = if self
//
// .match_iconst_any(op_var_len)
// {
// let const_var_len =
// op_var_len
// .as_value()
// .extract_int_const()
// .unwrap();
//
//
// self.make_int64_const(const_var_len as u64, vm)
// } else {
//
// debug_assert!(self.match_ireg(op_var_len));
//
// op_var_len.as_value().clone()
// };
//
// self.emit_runtime_entry(
//
// &entrypoints::EALLOC_HYBRID_TRACED,
// vec![tmp_id,
// tmp_var_len],
// Some(vec![tmp_res]),
// Some(node),
// f_content,
// f_context,
// vm
// );
// } else
{
let tmp_fixed_size =
self.make_int64_const(fix_part_size as u64, vm);
......@@ -2798,7 +2932,8 @@ impl<'a> InstructionSelection {
}
}
}
// FIXME. to finish it faster, I'm using rust atomic ops, may need fix
// FIXME. to finish it faster, I'm using rust atomic ops,
// may need fix
Instruction_::CmpXchg {
is_weak,
success_order,
......@@ -2811,11 +2946,12 @@ impl<'a> InstructionSelection {
let res = self.get_result_value(node);
let resty = &res.ty.v;
match resty {
MuType_::Int(64) => {
;
}
MuType_::Int(64) => {}
_ => {
unimplemented!("Atomic compare and exchange for type: {}", resty);
unimplemented!(
"Atomic compare and exchange for type: {}",
resty
);
}
}
}
......@@ -2919,7 +3055,8 @@ impl<'a> InstructionSelection {
trace!("instsel on NewFutex");
let ref val = inst.ops[init_val];
let tmp_val = self.emit_ireg(val, f_content, f_context, vm);
let tmp_val =
self.emit_ireg(val, f_content, f_context, vm);
let res = self.get_result_value(node);
......
......@@ -708,6 +708,10 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
| NewHybrid(_, _)
| NewReg(_)
| DeleteReg(_)
| Intrinsic_CollectReg(_)
| Intrinsic_FindReg(_)
| Intrinsic_GetFreeReg(_)
| Intrinsic_GetAllocatedReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
| eAlloc(_)
......@@ -726,7 +730,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
| CondVarBroadcast(_)
| DeleteFutex(_)
| LockFutex(_, _)
| UnlockFutex(_,_)
| UnlockFutex(_, _)
| NotifyThread(_)
| ThreadSetPriority(_, _)
| ThreadGetPriority(_)
......
......@@ -83,6 +83,10 @@ fn is_suitable_child(inst: &Instruction) -> bool {
| AllocAUHybrid(_, _)
| NewReg(_)
| DeleteReg(_)
| Intrinsic_CollectReg(_)
| Intrinsic_FindReg(_)
| Intrinsic_GetFreeReg(_)
| Intrinsic_GetAllocatedReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
| eAlloc(_)
......@@ -94,7 +98,7 @@ fn is_suitable_child(inst: &Instruction) -> bool {
| NewFutex(_)
| DeleteFutex(_)
| LockFutex(_, _)
| UnlockFutex(_,_)
| UnlockFutex(_, _)
| CondVarNew
| CondVarDelete(_)
| CondVarAcqLock(_)
......
......@@ -38,6 +38,7 @@ pub extern crate mu_ast as ast;
#[macro_use]
pub extern crate mu_utils as utils;
extern crate core;
extern crate memsec;
pub extern crate mu_gc as gc;
extern crate proc_macro;
extern crate rand;
......
......@@ -23,3 +23,5 @@ pub const MM_RTMU_INFO: bool = true;
pub const MM_MU_TRACE: bool = false;
pub const MM_MU_DEBUG: bool = false;
pub const MM_MU_INFO: bool = false;
pub const IRBLDR_TRACE: bool = true;
......@@ -168,6 +168,30 @@ lazy_static! {
vec![ADDRESS_TYPE.clone()], // (region_ref)
vec![] // returns nothing
);
/// Resets an EMM region
pub static ref COLLECT_REG: RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_collect_reg",
vec![ADDRESS_TYPE.clone()], // (region_ref)
vec![] // returns nothing
);
/// Returns the regionref for an object, or null if the object is not in any region
pub static ref FIND_REG: RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_find_reg",
vec![ADDRESS_TYPE.clone()], // (ref<T>)
vec![ADDRESS_TYPE.clone()] // returns regionref
);
/// Returns the number of free bytes in an EMM region
pub static ref GET_FREE_REG: RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_get_free_reg",
vec![ADDRESS_TYPE.clone()], // (region_ref)
vec![UINT64_TYPE.clone()] // returns usize
);
/// Returns the number of allocated bytes in an EMM region
pub static ref GET_ALLOCATED_REG: RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_get_allocated_reg",
vec![ADDRESS_TYPE.clone()], // (region_ref)
vec![UINT64_TYPE.clone()] // returns usize
);
/// Untraced rAlloc for non-hybrid types
pub static ref RALLOC: RuntimeEntrypoint = RuntimeEntrypoint::new(
"muentry_ralloc",
......