To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit cb0e5f33 authored by qinsoon's avatar qinsoon
Browse files

fix last commit. Not using memory order for API store(), Issue #51

parent 8728d082
......@@ -16,7 +16,7 @@ use std;
use std::cmp;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicPtr, Ordering};
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use std::ops::*;
use {ByteOffset, ByteSize};
......
......@@ -924,15 +924,9 @@ impl <'a> VM {
unimplemented!()
}
// make sure only one of primordial_func or primoridial_stack is set
let has_primordial_func = primordial_func.is_some();
let has_primordial_stack = primordial_stack.is_some();
assert!(
(has_primordial_func && !has_primordial_stack)
|| (!has_primordial_func && has_primordial_stack)
);
// we assume client will start with a function (instead of a stack)
if has_primordial_stack {
panic!("Zebu doesnt support creating primordial thread from a stack, name a entry function instead")
......@@ -1255,10 +1249,12 @@ impl <'a> VM {
}
/// performs STORE
#[allow(unused_variables)]
pub fn handle_store(&self, ord: MemoryOrder, loc: APIHandleArg, val: APIHandleArg) {
// get address
let (_, addr) = loc.v.as_iref();
// FIXME: not using memory order for store at the moment - See Issue #51
let rust_memord = match ord {
MemoryOrder::Relaxed => Ordering::Relaxed,
MemoryOrder::Release => Ordering::Release,
......@@ -1274,25 +1270,25 @@ impl <'a> VM {
APIHandleValue::Int(ival, bits) => {
let trunc: u64 = ival & bits_ones(bits);
match bits {
1 ... 8 => addr.store_order::<u8> (trunc as u8 , rust_memord),
9 ... 16 => addr.store_order::<u16>(trunc as u16, rust_memord),
17 ... 32 => addr.store_order::<u32>(trunc as u32, rust_memord),
33 ... 64 => addr.store_order::<u64>(trunc as u64, rust_memord),
1 ... 8 => addr.store::<u8> (trunc as u8 ),
9 ... 16 => addr.store::<u16>(trunc as u16),
17 ... 32 => addr.store::<u32>(trunc as u32),
33 ... 64 => addr.store::<u64>(trunc as u64),
_ => panic!("unimplemented int length")
}
},
APIHandleValue::TagRef64(val) => addr.store_order::<u64>(val , rust_memord),
APIHandleValue::Float(fval) => addr.store_order::<f32>(fval, rust_memord),
APIHandleValue::Double(fval) => addr.store_order::<f64>(fval, rust_memord),
APIHandleValue::UPtr(_, aval) => addr.store_order::<Address>(aval, rust_memord),
APIHandleValue::UFP(_, aval) => addr.store_order::<Address>(aval, rust_memord),
APIHandleValue::TagRef64(val) => addr.store::<u64>(val ),
APIHandleValue::Float(fval) => addr.store::<f32>(fval),
APIHandleValue::Double(fval) => addr.store::<f64>(fval),
APIHandleValue::UPtr(_, aval) => addr.store::<Address>(aval),
APIHandleValue::UFP(_, aval) => addr.store::<Address>(aval),
APIHandleValue::Struct(_)
| APIHandleValue::Array(_)
| APIHandleValue::Vector(_) => unimplemented!(),
APIHandleValue::Ref(_, aval)
| APIHandleValue::IRef(_, aval) => addr.store_order::<Address>(aval, rust_memord),
| APIHandleValue::IRef(_, aval) => addr.store::<Address>(aval),
// if we are JITing, we can store the address of the function
// but if we are doing AOT, we pend the store, and resolve the store when making boot image
......
......@@ -14,4 +14,5 @@
mod test_threadlocal;
mod test_entry_offset;
mod test_mem_layout;
\ No newline at end of file
mod test_mem_layout;
mod test_address;
\ No newline at end of file
use utils::Address;
use std::sync::atomic::Ordering;
#[test]
fn test_align_up() {
let addr = unsafe {Address::from_usize(0)};
let aligned = addr.align_up(8);
assert_eq!(aligned, addr);
let addr = unsafe {Address::from_usize(1)};
let aligned = addr.align_up(8);
assert_eq!(aligned, unsafe {Address::from_usize(8)});
}
#[test]
fn test_is_aligned() {
let addr = unsafe {Address::from_usize(0)};
assert!(addr.is_aligned_to(8));
let addr = unsafe {Address::from_usize(1)};
assert!(!addr.is_aligned_to(8));
let addr = unsafe {Address::from_usize(8)};
assert!(addr.is_aligned_to(8));
}
#[test]
fn test_load_order_u64() {
let mem = Box::new(42u64);
let ptr = Box::into_raw(mem);
let addr = Address::from_mut_ptr(ptr);
unsafe {
let value_relaxed : u64 = addr.load_order(Ordering::Relaxed);
assert_eq!(value_relaxed, 42);
let value_seqcst : u64 = addr.load_order(Ordering::SeqCst);
assert_eq!(value_seqcst, 42);
let value_acquire : u64 = addr.load_order(Ordering::Acquire);
assert_eq!(value_acquire, 42);
}
}
#[ignore]
#[test]
fn test_store_order_u64() {
let mem = Box::new(0u64);
let ptr = Box::into_raw(mem);
let addr = Address::from_mut_ptr(ptr);
unsafe {
let expect : u64 = 42;
addr.store_order(expect, Ordering::Relaxed);
let val : u64 = addr.load();
assert_eq!(val, expect);
let expect : u64 = 21;
addr.store_order(expect, Ordering::Release);
let val : u64 = addr.load();
assert_eq!(val, expect);
let expect : u64 = 10;
addr.store_order(expect, Ordering::SeqCst);
let val : u64 = addr.load();
assert_eq!(val, expect);
}
}
#[test]
fn test_load_order_u32() {
let mem = Box::new(-1isize);
let ptr = Box::into_raw(mem);
let addr = Address::from_mut_ptr(ptr);
unsafe {
addr.store(42u32);
let value_relaxed : u32 = addr.load_order(Ordering::Relaxed);
assert_eq!(value_relaxed, 42);
let value_seqcst : u32 = addr.load_order(Ordering::SeqCst);
assert_eq!(value_seqcst, 42);
let value_acquire : u32 = addr.load_order(Ordering::Acquire);
assert_eq!(value_acquire, 42);
}
}
#[test]
fn test_load_order_f64() {
let mem = Box::new(42.0f64);
let ptr = Box::into_raw(mem);
let addr = Address::from_mut_ptr(ptr);
unsafe {
let value_relaxed : f64 = addr.load_order(Ordering::Relaxed);
assert_eq!(value_relaxed, 42f64);
let value_seqcst : f64 = addr.load_order(Ordering::SeqCst);
assert_eq!(value_seqcst, 42f64);
let value_acquire : f64 = addr.load_order(Ordering::Acquire);
assert_eq!(value_acquire, 42f64);
}
}
#[test]
fn test_load_order_f32() {
let mem = Box::new(42.0f64);
let ptr = Box::into_raw(mem);
let addr = Address::from_mut_ptr(ptr);
unsafe {
addr.store(10f32);
let value_relaxed : f32 = addr.load_order(Ordering::Relaxed);
assert_eq!(value_relaxed, 10f32);
let value_seqcst : f32 = addr.load_order(Ordering::SeqCst);
assert_eq!(value_seqcst, 10f32);
let value_acquire : f32 = addr.load_order(Ordering::Acquire);
assert_eq!(value_acquire, 10f32);
}
}
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment