GitLab will be upgraded to the 12.10.14-ce.0 on 28 Sept 2020 at 2.00pm (AEDT) to 2.30pm (AEDT). During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit b6a1ffc3 authored by qinsoon's avatar qinsoon

refactoring Address, supporting add/sub ops

parent 27df3a88
......@@ -3649,13 +3649,13 @@ pub fn emit_context_with_reloc(vm: &VM,
// get ready to go through from the object start (not mem_start) to the end
let base = obj_dump.reference_addr;
let end = obj_dump.mem_start.plus(obj_dump.mem_size);
let end = obj_dump.mem_start + obj_dump.mem_size;
assert!(base.is_aligned_to(POINTER_SIZE));
// offset as cursor
let mut offset = 0;
while offset < obj_dump.mem_size {
let cur_addr = base.plus(offset);
let cur_addr = base + offset;
if obj_dump.reference_offsets.contains(&offset) {
// if this offset is a reference field, we put a relocatable label generated by the GC
......@@ -3683,7 +3683,7 @@ pub fn emit_context_with_reloc(vm: &VM,
// otherwise this offset is plain data
// write plain word (as bytes)
let next_word_addr = cur_addr.plus(POINTER_SIZE);
let next_word_addr = cur_addr + POINTER_SIZE;
if next_word_addr <= end {
write_data_bytes(&mut file, cur_addr, next_word_addr);
} else {
......@@ -3731,7 +3731,7 @@ fn write_data_bytes(f: &mut File, from: Address, to: Address) {
let byte = unsafe {cursor.load::<u8>()};
f.write_fmt(format_args!("0x{:x}", byte)).unwrap();
cursor = cursor.plus(1);
cursor = cursor + 1 as ByteSize;
if cursor != to {
f.write(",".as_bytes()).unwrap();
}
......
......@@ -35,8 +35,9 @@ pub use compiler::backend::x86_64::asm_backend::emit_context;
pub use compiler::backend::x86_64::asm_backend::emit_context_with_reloc;
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::spill_rewrite;
use utils::Address;
use utils::Address;
use utils::ByteSize;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
......@@ -526,13 +527,13 @@ pub fn get_previous_frame_pointer(frame_pointer: Address) -> Address {
/// gets the return address for the current frame pointer
#[inline(always)]
pub fn get_return_address(frame_pointer: Address) -> Address {
unsafe { frame_pointer.plus(8).load::<Address>() }
unsafe { (frame_pointer + 8 as ByteSize).load::<Address>() }
}
/// gets the stack pointer before the current frame was created
#[inline(always)]
pub fn get_previous_stack_pointer(frame_pointer: Address) -> Address {
frame_pointer.plus(16)
frame_pointer + 16 as ByteSize
}
/// sets the stack point
......@@ -544,7 +545,7 @@ pub fn set_previous_frame_pointer(frame_pointer: Address, value: Address) {
/// gets the return address for the current frame pointer
#[inline(always)]
pub fn set_return_address(frame_pointer: Address, value: Address) {
unsafe { frame_pointer.plus(8).store::<Address>(value) }
unsafe { (frame_pointer + 8 as ByteSize).store::<Address>(value) }
}
/// returns offset of callee saved register
......
......@@ -26,7 +26,7 @@ pub struct AddressBitmap {
impl AddressBitmap {
pub fn new(start: Address, end: Address) -> AddressBitmap {
let bitmap_len = end.diff(start) >> LOG_POINTER_SIZE;
let bitmap_len = (end - start) >> LOG_POINTER_SIZE;
let bitmap = Bitmap::new(bitmap_len);
AddressBitmap{start: start, end: end, bitmap: bitmap}
......@@ -37,7 +37,7 @@ impl AddressBitmap {
pub unsafe fn set_bit(&self, addr: Address) {
use std::mem;
let mutable_bitmap : &mut Bitmap = mem::transmute(&self.bitmap);
mutable_bitmap.set_bit(addr.diff(self.start) >> LOG_POINTER_SIZE);
mutable_bitmap.set_bit((addr - self.start) >> LOG_POINTER_SIZE);
}
#[inline(always)]
......@@ -45,17 +45,17 @@ impl AddressBitmap {
pub unsafe fn clear_bit(&self, addr: Address) {
use std::mem;
let mutable_bitmap : &mut Bitmap = mem::transmute(&self.bitmap);
mutable_bitmap.clear_bit(addr.diff(self.start) >> LOG_POINTER_SIZE);
mutable_bitmap.clear_bit((addr - self.start) >> LOG_POINTER_SIZE);
}
#[inline(always)]
pub fn test_bit(&self, addr: Address) -> bool {
self.bitmap.test_bit(addr.diff(self.start) >> LOG_POINTER_SIZE)
self.bitmap.test_bit((addr - self.start) >> LOG_POINTER_SIZE)
}
#[inline(always)]
pub fn length_until_next_bit(&self, addr: Address) -> usize {
self.bitmap.length_until_next_bit(addr.diff(self.start) >> LOG_POINTER_SIZE)
self.bitmap.length_until_next_bit((addr - self.start) >> LOG_POINTER_SIZE)
}
#[inline(always)]
......@@ -67,7 +67,7 @@ impl AddressBitmap {
assert!(addr >= self.start && addr <= self.end);
}
let index = addr.diff(self.start) >> LOG_POINTER_SIZE;
let index = (addr - self.start) >> LOG_POINTER_SIZE;
let mutable_bitmap : &mut Bitmap = mem::transmute(&self.bitmap);
mutable_bitmap.set(index, value, length);
}
......@@ -78,7 +78,7 @@ impl AddressBitmap {
assert!(addr >= self.start && addr <= self.end);
}
let index = addr.diff(self.start) >> LOG_POINTER_SIZE;
let index = (addr - self.start) >> LOG_POINTER_SIZE;
self.bitmap.get(index, length)
}
......
......@@ -29,7 +29,7 @@ pub struct AddressMap<T: Copy> {
impl <T> AddressMap<T> where T: Copy{
pub fn new(start: Address, end: Address) -> AddressMap<T> {
let len = end.diff(start) >> LOG_POINTER_SIZE;
let len = (end - start) >> LOG_POINTER_SIZE;
let ptr = unsafe{malloc_zero(mem::size_of::<T>() * len)} as *mut T;
AddressMap{start: start, end: end, ptr: ptr, len: len}
......@@ -40,19 +40,19 @@ impl <T> AddressMap<T> where T: Copy{
while cursor < self.end {
self.set(cursor, init);
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
}
}
#[inline(always)]
pub fn set(&self, addr: Address, value: T) {
let index = (addr.diff(self.start) >> LOG_POINTER_SIZE) as isize;
let index = ((addr - self.start) >> LOG_POINTER_SIZE) as isize;
unsafe{*self.ptr.offset(index) = value};
}
#[inline(always)]
pub fn get(&self, addr: Address) -> T {
let index = (addr.diff(self.start) >> LOG_POINTER_SIZE) as isize;
let index = ((addr - self.start) >> LOG_POINTER_SIZE) as isize;
unsafe {*self.ptr.offset(index)}
}
}
......@@ -64,7 +64,7 @@ impl HeapDump {
fn persist_object(&self, obj: Address) -> ObjectDump {
trace!("dump object: {}", obj);
let hdr_addr = obj.offset(objectmodel::OBJECT_HEADER_OFFSET);
let hdr_addr = obj + objectmodel::OBJECT_HEADER_OFFSET;
let hdr = unsafe {hdr_addr.load::<u64>()};
if objectmodel::header_is_fix_size(hdr) {
......@@ -132,7 +132,7 @@ impl HeapDump {
let base = obj_dump.reference_addr;
for offset in obj_dump.reference_offsets.iter() {
let field_addr = base.plus(*offset);
let field_addr = base + *offset;
let edge = unsafe {field_addr.load::<Address>()};
if !edge.is_zero() && !self.objects.contains_key(&edge) {
......
......@@ -50,7 +50,7 @@ impl FreeListSpace {
Err(_) => panic!("failed to call mmap")
};
let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let end : Address = start.plus(space_size);
let end : Address = start + space_size;
let trace_map = AddressMap::new(start, end);
let alloc_map = AddressMap::new(start, end);
......@@ -102,7 +102,7 @@ impl FreeListSpace {
if res.is_zero() {
res
} else {
res.offset(-objectmodel::OBJECT_HEADER_OFFSET)
res + (-objectmodel::OBJECT_HEADER_OFFSET)
}
}
......@@ -259,7 +259,7 @@ struct Treadmill{
impl Treadmill {
fn new(start: Address, end: Address) -> Treadmill {
let half_space = start.plus(end.diff(start) / 2);
let half_space = start + ((end - start) / 2);
let mut from_space = vec![];
let mut to_space = vec![];
......@@ -268,12 +268,12 @@ impl Treadmill {
while addr < half_space {
from_space.push(TreadmillNode::new(addr));
addr = addr.plus(BLOCK_SIZE);
addr = addr + BLOCK_SIZE;
}
while addr < end {
to_space.push(TreadmillNode::new(addr));
addr = addr.plus(BLOCK_SIZE);
addr = addr + BLOCK_SIZE;
}
Treadmill {
......@@ -392,7 +392,7 @@ impl Treadmill {
// we need to check if 7&8, 8&9 (cursor is 7, and 8)
let mut cursor = start;
while cursor < start + n_blocks - 1 {
if from_space[cursor].payload.plus(BLOCK_SIZE) != from_space[cursor + 1].payload {
if from_space[cursor].payload + BLOCK_SIZE != from_space[cursor + 1].payload {
return false;
}
......
......@@ -117,7 +117,7 @@ pub fn stack_scan() -> Vec<ObjectReference> {
ret.push(unsafe {value.to_object_reference()});
}
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
}
let roots_from_stack = ret.len();
......@@ -473,7 +473,7 @@ pub fn steal_trace_object(obj: ObjectReference, local_queue: &mut Vec<ObjectRefe
// this part of code has some duplication with code in objectdump
// FIXME: remove the duplicate code - use 'Tracer' trait
let hdr = unsafe {addr.offset(objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
let hdr = unsafe {(addr + objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
if objectmodel::header_is_fix_size(hdr) {
// fix sized type
......@@ -589,7 +589,7 @@ pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<Obj
#[inline(always)]
#[cfg(not(feature = "use-sidemap"))]
pub fn steal_process_edge(base: Address, offset: usize, local_queue:&mut Vec<ObjectReference>, job_sender: &mpsc::Sender<ObjectReference>, mark_state: u8, immix_space: &ImmixSpace, lo_space: &FreeListSpace) {
let field_addr = base.plus(offset);
let field_addr = base + offset;
let edge = unsafe {field_addr.load::<ObjectReference>()};
if cfg!(debug_assertions) {
......
......@@ -18,6 +18,7 @@ use heap::immix::immix_space::ImmixBlock;
use heap::gc;
use objectmodel;
use utils::Address;
use utils::ByteSize;
use std::*;
use std::sync::Arc;
......@@ -161,7 +162,7 @@ impl ImmixMutatorLocal {
}
let start = self.cursor.align_up(align);
let end = start.plus(size);
let end = start + size;
if TRACE_ALLOC_FASTPATH {
trace!("Mutator{}: fastpath alloc: start=0x{:x}, end=0x{:x}", self.id, start, end);
......@@ -182,7 +183,7 @@ impl ImmixMutatorLocal {
}
// this offset should be removed as well (for performance)
ret.offset(-objectmodel::OBJECT_HEADER_OFFSET)
ret + (-objectmodel::OBJECT_HEADER_OFFSET)
} else {
if cfg!(debug_assertions) {
if !start.is_aligned_to(align) {
......@@ -193,7 +194,7 @@ impl ImmixMutatorLocal {
}
self.cursor = end;
start.offset(-objectmodel::OBJECT_HEADER_OFFSET)
start + (-objectmodel::OBJECT_HEADER_OFFSET)
}
}
......@@ -211,7 +212,7 @@ impl ImmixMutatorLocal {
#[cfg(not(feature = "use-sidemap"))]
pub fn init_object(&mut self, addr: Address, encode: u64) {
unsafe {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
(addr + objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
......@@ -225,7 +226,7 @@ impl ImmixMutatorLocal {
pub fn init_hybrid(&mut self, addr: Address, encode: u64, len: u64) {
let encode = encode | ((len << objectmodel::SHR_HYBRID_LENGTH) & objectmodel::MASK_HYBRID_LENGTH);
unsafe {
addr.offset(objectmodel::OBJECT_HEADER_OFFSET).store(encode);
(addr + objectmodel::OBJECT_HEADER_OFFSET).store(encode);
}
}
......@@ -242,11 +243,11 @@ impl ImmixMutatorLocal {
// we can alloc from local blocks
let end_line = self.block().get_next_unavailable_line(next_available_line);
self.cursor = self.block().start().plus(next_available_line << immix::LOG_BYTES_IN_LINE);
self.limit = self.block().start().plus(end_line << immix::LOG_BYTES_IN_LINE);
self.cursor = self.block().start() + (next_available_line << immix::LOG_BYTES_IN_LINE);
self.limit = self.block().start() + (end_line << immix::LOG_BYTES_IN_LINE);
self.line = end_line;
self.cursor.memset(0, self.limit.diff(self.cursor));
unsafe {self.cursor.memset(0, self.limit - self.cursor);}
for line in next_available_line..end_line {
self.block().line_mark_table_mut().set(line, immix::LineMark::FreshAlloc);
......@@ -254,7 +255,7 @@ impl ImmixMutatorLocal {
// allocate fast path
let start = self.cursor.align_up(align);
let end = start.plus(size);
let end = start + size;
self.cursor = end;
start
......@@ -344,9 +345,9 @@ impl ImmixMutatorLocal {
pub fn print_object_static(obj: Address, length: usize) {
debug!("===Object {:#X} size: {} bytes===", obj, length);
let mut cur_addr = obj;
while cur_addr < obj.plus(length) {
while cur_addr < obj + length {
debug!("Address: {:#X} {:#X}", cur_addr, unsafe {cur_addr.load::<u64>()});
cur_addr = cur_addr.plus(8);
cur_addr = cur_addr + 8 as ByteSize;
}
debug!("----");
debug!("=========");
......
......@@ -45,7 +45,7 @@ pub struct LineMarkTableSlice {
impl LineMarkTable {
pub fn new(space_start: Address, space_end: Address) -> LineMarkTable {
let line_mark_table_len = space_end.diff(space_start) / immix::BYTES_IN_LINE;
let line_mark_table_len = (space_end - space_start) / immix::BYTES_IN_LINE;
let line_mark_table = {
let ret = unsafe {malloc_zero(mem::size_of::<immix::LineMark>() * line_mark_table_len)} as *mut immix::LineMark;
let mut cursor = ret;
......@@ -79,12 +79,12 @@ impl LineMarkTable {
}
pub fn index_to_address(&self, index: usize) -> Address {
self.space_start.plus(index << immix::LOG_BYTES_IN_LINE)
self.space_start + (index << immix::LOG_BYTES_IN_LINE)
}
#[inline(always)]
pub fn mark_line_live(&self, addr: Address) {
let line_table_index = addr.diff(self.space_start) >> immix::LOG_BYTES_IN_LINE;
let line_table_index = (addr - self.space_start) >> immix::LOG_BYTES_IN_LINE;
self.set(line_table_index, immix::LineMark::Live);
......@@ -95,7 +95,7 @@ impl LineMarkTable {
#[inline(always)]
pub fn mark_line_live2(&self, space_start: Address, addr: Address) {
let line_table_index = addr.diff(space_start) >> immix::LOG_BYTES_IN_LINE;
let line_table_index = (addr - space_start) >> immix::LOG_BYTES_IN_LINE;
self.set(line_table_index, immix::LineMark::Live);
......@@ -165,7 +165,7 @@ impl ImmixSpace {
Err(_) => panic!("failed to call mmap"),
};
let start : Address = Address::from_ptr::<u8>(anon_mmap.ptr()).align_up(SPACE_ALIGN);
let end : Address = start.plus(space_size);
let end : Address = start + space_size;
let line_mark_table = LineMarkTable::new(start, end);
......@@ -204,7 +204,7 @@ impl ImmixSpace {
let mut usable_blocks_lock = self.usable_blocks.lock().unwrap();
while block_start.plus(immix::BYTES_IN_BLOCK) <= self.end {
while block_start + immix::BYTES_IN_BLOCK <= self.end {
usable_blocks_lock.push_back(Box::new(ImmixBlock {
id : id,
state: immix::BlockMark::Usable,
......@@ -213,7 +213,7 @@ impl ImmixSpace {
}));
id += 1;
block_start = block_start.plus(immix::BYTES_IN_BLOCK);
block_start = block_start + immix::BYTES_IN_BLOCK;
line += immix::LINES_IN_BLOCK;
}
......@@ -372,7 +372,7 @@ impl ImmixBlock {
let line_mark_table = self.line_mark_table();
for i in 0..line_mark_table.len {
if line_mark_table.get(i) == immix::LineMark::Free {
let line_start : Address = self.start.plus(i << immix::LOG_BYTES_IN_LINE);
let line_start : Address = self.start + (i << immix::LOG_BYTES_IN_LINE);
// zero the line
unsafe {
......
......@@ -79,7 +79,7 @@ pub trait Space {
}
// use header
let hdr = unsafe {addr.offset(objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
let hdr = unsafe {(addr + objectmodel::OBJECT_HEADER_OFFSET).load::<u64>()};
if !objectmodel::header_is_object_start(hdr) {
return false;
}
......@@ -100,5 +100,5 @@ pub trait Space {
#[inline(always)]
pub fn fill_alignment_gap(start : Address, end : Address) -> () {
debug_assert!(end >= start);
start.memset(ALIGNMENT_VALUE, end.diff(start));
unsafe {start.memset(ALIGNMENT_VALUE, end - start);}
}
......@@ -120,7 +120,7 @@ pub fn print_object(obj: Address) {
let mut cursor = obj;
trace!("OBJECT 0x{:x}", obj);
let hdr = unsafe {cursor.offset(OBJECT_HEADER_OFFSET).load::<u64>()};
let hdr = unsafe {(cursor + OBJECT_HEADER_OFFSET).load::<u64>()};
trace!("- is object start? {}", header_is_object_start(hdr));
trace!("- is traced? {}", header_is_traced(hdr, objectmodel::load_mark_state()));
......@@ -137,23 +137,23 @@ pub fn print_object(obj: Address) {
trace!("0x{:x} | val: 0x{:15x} | hdr: {:b}",
cursor, unsafe{cursor.load::<u64>()}, hdr);
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
cursor = cursor.plus(POINTER_SIZE);
cursor = cursor + POINTER_SIZE;
trace!("0x{:x} | val: 0x{:15x}",
cursor, unsafe{cursor.load::<u64>()});
}
......@@ -161,7 +161,7 @@ pub fn print_object(obj: Address) {
#[inline(always)]
pub fn mark_as_traced(obj: ObjectReference, mark_state: u8) {
unsafe {
let hdr_addr = obj.to_address().offset(OBJECT_HEADER_OFFSET);
let hdr_addr = obj.to_address() + OBJECT_HEADER_OFFSET;
hdr_addr.store(bit_utils::set_nth_bit_u64(hdr_addr.load::<u64>(), BIT_IS_TRACED, mark_state));
}
}
......@@ -169,7 +169,7 @@ pub fn mark_as_traced(obj: ObjectReference, mark_state: u8) {
#[inline(always)]
pub fn mark_as_untraced(addr: Address, mark_state: u8) {
unsafe {
let hdr_addr = addr.offset(OBJECT_HEADER_OFFSET);
let hdr_addr = addr + OBJECT_HEADER_OFFSET;
hdr_addr.store(bit_utils::set_nth_bit_u64(hdr_addr.load::<u64>(), BIT_IS_TRACED, mark_state ^ 1));
}
}
......@@ -177,7 +177,7 @@ pub fn mark_as_untraced(addr: Address, mark_state: u8) {
#[inline(always)]
pub fn is_traced(obj: ObjectReference, mark_state: u8) -> bool {
unsafe {
let hdr = obj.to_address().offset(OBJECT_HEADER_OFFSET).load::<u64>();
let hdr = (obj.to_address() + OBJECT_HEADER_OFFSET).load::<u64>();
bit_utils::test_nth_bit_u64(hdr, BIT_IS_TRACED, mark_state)
}
}
......
......@@ -102,8 +102,8 @@ pub extern fn throw_exception_internal(exception_obj: Address, frame_cursor: Add
let ref callee_saved = cf.frame.callee_saved;
for (target_offset, source_offset) in callee_saved {
// *(frame_cursor + target_offset) = *(frame_pointer + source_offset)
let val = previous_frame_pointer.offset(*source_offset).load::<Address>();
frame_cursor.offset(*target_offset).store::<Address>(val);
let val = (previous_frame_pointer + *source_offset).load::<Address>();
(frame_cursor + *target_offset).store::<Address>(val);
}
}
......@@ -124,7 +124,7 @@ fn print_frame(cursor: Address) {
let bottom = -(CALLEE_SAVED_COUNT as isize);
for i in (bottom .. top).rev() {
unsafe {
let addr = cursor.offset(i * POINTER_SIZE as isize);
let addr = cursor + (i * POINTER_SIZE as isize);
let val = addr.load::<Word>();
trace!("\taddr: 0x{:x} | val: 0x{:x} {}", addr, val, {if addr == cursor {"<- cursor"} else {""}});
}
......
......@@ -118,8 +118,8 @@ impl MuStack {
// calculate the addresses
let overflow_guard = mmap_start;
let lower_bound = mmap_start.plus(PAGE_SIZE);
let upper_bound = lower_bound.plus(STACK_SIZE);
let lower_bound = mmap_start + PAGE_SIZE;
let upper_bound = lower_bound + STACK_SIZE;
let underflow_guard = upper_bound;
// protect the guard pages
......@@ -185,7 +185,7 @@ impl MuStack {
// store floating point argument registers
let mut stack_ptr = self.sp;
for i in 0..ARGUMENT_FPRS.len() {
stack_ptr = stack_ptr.sub(WORD_SIZE);
stack_ptr -= WORD_SIZE;
let val = {
if i < fpr_used.len() {
fpr_used[i]
......@@ -200,7 +200,7 @@ impl MuStack {
// store general purpose argument registers
for i in 0..ARGUMENT_GPRS.len() {
stack_ptr = stack_ptr.sub(WORD_SIZE);
stack_ptr -= WORD_SIZE;
let val = {
if i < gpr_used.len() {
gpr_used[i]
......@@ -227,7 +227,7 @@ impl MuStack {
use utils::Word;
use utils::WORD_SIZE;
let mut cursor = self.upper_bound.sub(WORD_SIZE);
let mut cursor = self.upper_bound - WORD_SIZE;
let mut count = 0;
debug!("0x{:x} | UPPER_BOUND", self.upper_bound);
......@@ -240,7 +240,7 @@ impl MuStack {
debug!("0x{:x} | 0x{:x} ({})", cursor, val, val);
}
cursor = cursor.sub(WORD_SIZE);
cursor -= WORD_SIZE;
count += 1;
if n_entries.is_some() && count > n_entries.unwrap() {
......@@ -382,7 +382,7 @@ impl MuThread {
unsafe {set_thread_local(muthread)};
let addr = unsafe {muentry_get_thread_local()};
let sp_threadlocal_loc = addr.plus(*NATIVE_SP_LOC_OFFSET);
let sp_threadlocal_loc = addr + *NATIVE_SP_LOC_OFFSET;
debug!("new sp: 0x{:x}", new_sp);
debug!("sp_store: 0x{:x}", sp_threadlocal_loc);
......
This diff is collapsed.
......@@ -1279,7 +1279,7 @@ impl <'a> VM {
let offset_addr = {
let backend_ty = self.get_backend_type_info(ty.id());
addr.plus(backend_ty.size * (offset as usize))
addr + (backend_ty.size * (offset as usize))
};
let ret = self.new_handle(APIHandle {
......@@ -1303,7 +1303,7 @@ impl <'a> VM {
};
let elem_addr = {
let backend_ty = self.get_backend_type_info(ele_ty.id());
addr.plus(backend_ty.size * (index as usize))
addr + (backend_ty.size * (index as usize))
};
let ret = self.new_handle(APIHandle {
......@@ -1322,7 +1322,7 @@ impl <'a> VM {
let varpart_addr = {
let backend_ty = self.get_backend_type_info(ty.id());
addr.plus(backend_ty.size)
addr + backend_ty.size
};
let varpart_ty = match ty.get_hybrid_varpart_ty() {
......@@ -1354,7 +1354,7 @@ impl <'a> VM {
let field_addr = {
let backend_ty = self.get_backend_type_info(ty.id());
let field_offset = backend_ty.get_field_offset(field);
addr.plus(field_offset)
addr + field_offset
};
let ret = self.new_handle(APIHandle {
......
......@@ -35,18 +35,18 @@ fn test_muthread_entry_offset() {
assert_eq!(tl_addr, Address::from_ptr(tl_ptr));
let allocator_ptr = &tl.allocator as *const mm::Mutator;
let allocator_addr = tl_addr.plus(*thread::ALLOCATOR_OFFSET);
let allocator_addr = tl_addr + *thread::ALLOCATOR_OFFSET;
assert_eq!(allocator_addr, Address::from_ptr(allocator_ptr));
let native_sp_ptr = &tl.native_sp_loc as *const Address;
let native_sp_addr = tl_addr.plus(*thread::NATIVE_SP_LOC_OFFSET);
let native_sp_addr = tl_addr + *thread::NATIVE_SP_LOC_OFFSET;
assert_eq!(native_sp_addr, Address::from_ptr(native_sp_ptr));
let user_tls_ptr = &tl.user_tls as *const Address;
let user_tls_addr = tl_addr.plus(*thread::USER_TLS_OFFSET);
let user_tls_addr = tl_addr + *thread::USER_TLS_OFFSET;
assert_eq!(user_tls_addr, Address::from_ptr(user_tls_ptr));
let exc_obj_ptr = &tl.exception_obj as *const Address;
let exc_obj_addr = tl_addr.plus(*thread::EXCEPTION_OBJ_OFFSET);
let exc_obj_addr = tl_addr + *thread::EXCEPTION_OBJ_OFFSET;
assert_eq!(exc_obj_addr, Address::from_ptr(exc_obj_ptr));
}
\ No newline at end of file
......@@ -36,7 +36,7 @@ fn test_access_exception_obj() {
// set exception obj using offset
let tl_addr = unsafe {thread::muentry_get_thread_local()};
let exc_obj_addr = tl_addr.plus(*thread::EXCEPTION_OBJ_OFFSET);
let exc_obj_addr = tl_addr + *thread::EXCEPTION_OBJ_OFFSET;
println!("storing exception obj Address::max() to {}", exc_obj_addr);
unsafe {exc_obj_addr.store(usize::MAX)};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment