EMM works now, Physical memory access remains

parent 1eae9d8d
Pipeline #3913 failed with stages
in 2 minutes and 36 seconds
......@@ -6,8 +6,8 @@ stages:
before_script:
- export MU_ZEBU=$CI_PROJECT_DIR
- export ZEBU_BUILD=debug
- export ZEBU_CARGO_ARG=""
- export ZEBU_BUILD=release
- export ZEBU_CARGO_ARG="--release"
- export CARGO_HOME=.cargo
- export CC=clang
- export CXX=clang++
......
......@@ -47,7 +47,8 @@ num = "*"
hprof = "*"
memmap = "*"
memsec = "0.1.9"
serde = "*"
serde = { version = "*", features = ["derive"]}
bincode = "*"
serde_derive = "*"
time = "*"
maplit = "*"
......
This diff is collapsed.
This diff is collapsed.
......@@ -14,7 +14,8 @@
//! # MuIR AST crate
//!
//! This crate provides data structures to allow construct MuIR in Rust code, including:
//! This crate provides data structures to allow construct MuIR in Rust code,
//! including:
//!
//! * types
//! * ir
......
......@@ -36,7 +36,7 @@ pub enum BinOp {
FSub,
FMul,
FDiv,
FRem,
FRem
}
impl fmt::Display for BinOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -49,7 +49,7 @@ impl BinOp {
use op::BinOp::*;
match self {
FAdd | FSub | FMul | FDiv | FRem => true,
_ => false,
_ => false
}
}
}
......@@ -83,7 +83,7 @@ pub enum CmpOp {
FULT,
FULE,
FUNE,
FUNO,
FUNO
}
impl fmt::Display for CmpOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -92,7 +92,8 @@ impl fmt::Display for CmpOp {
}
impl CmpOp {
/// returns the CmpOp X for CmpOp Y, such that (a Y b) is equivalent to (b X a)
/// returns the CmpOp X for CmpOp Y, such that (a Y b) is equivalent to (b X
/// a)
pub fn swap_operands(self) -> CmpOp {
use op::CmpOp::*;
match self {
......@@ -116,11 +117,12 @@ impl CmpOp {
FUGT => FULT,
FULT => FUGT,
_ => self, // all other comparisons are symmetric
_ => self // all other comparisons are symmetric
}
}
/// returns the CmpOp X for CmpOp Y, such that (a Y b) is equivalent to NOT(a X b)
/// returns the CmpOp X for CmpOp Y, such that (a Y b) is equivalent to
/// NOT(a X b)
pub fn invert(self) -> CmpOp {
use op::CmpOp::*;
match self {
......@@ -161,7 +163,7 @@ impl CmpOp {
FONE => FUEQ,
FFALSE => FTRUE,
FTRUE => FFALSE,
FTRUE => FFALSE
}
}
......@@ -173,7 +175,7 @@ impl CmpOp {
SLT => ULT,
SGT => UGT,
SLE => ULE,
_ => self,
_ => self
}
}
......@@ -181,7 +183,7 @@ impl CmpOp {
use op::CmpOp::*;
match self {
SGE | SLT | SGT | SLE => true,
_ => false,
_ => false
}
}
......@@ -189,7 +191,7 @@ impl CmpOp {
use op::CmpOp::*;
match self {
EQ | NE | SGE | SGT | SLE | SLT | UGE | UGT | ULE | ULT => true,
_ => false,
_ => false
}
}
......@@ -200,14 +202,14 @@ impl CmpOp {
use op::CmpOp::*;
match self {
EQ | NE => true,
_ => false,
_ => false
}
}
pub fn is_ult_cmp(self) -> bool {
use op::CmpOp::*;
match self {
UGE | UGT | ULE | ULT => true,
_ => false,
_ => false
}
}
......@@ -215,7 +217,7 @@ impl CmpOp {
use op::CmpOp::*;
match self {
EQ | NE | FORD | FUNO | FUNE | FUEQ | FONE | FOEQ => true,
_ => false,
_ => false
}
}
}
......@@ -233,7 +235,7 @@ pub enum ConvOp {
SITOFP,
BITCAST,
REFCAST,
PTRCAST,
PTRCAST
}
impl fmt::Display for ConvOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -253,7 +255,7 @@ pub enum AtomicRMWOp {
MAX,
MIN,
UMAX,
UMIN,
UMIN
}
impl fmt::Display for AtomicRMWOp {
......
......@@ -14,7 +14,8 @@
use std::sync::Arc;
/// P<T> is alias type for sharable Mu AST components (such as Value, MuFuncSig, MuType, etc)
/// P<T> is alias type for sharable Mu AST components (such as Value, MuFuncSig,
/// MuType, etc)
// This design is similar to P<T> in rustc compiler.
// However, instead of using Box<T>, we use Arc<T> to encourage sharing
pub type P<T> = Arc<T>;
......
......@@ -573,7 +573,7 @@ impl MuType {
for _type in _types {
if _type.is_iref() || _type.containsIRef() {
return true
return true;
}
}
false
......@@ -587,12 +587,12 @@ impl MuType {
for _type in fix_tys.iter() {
if _type.is_iref() || _type.containsIRef() {
return true
return true;
}
}
if var_ty.is_iref() || var_ty.containsIRef() {
return true
return true;
}
false
......@@ -752,7 +752,7 @@ impl fmt::Debug for STRUCT_TAG_MAP {
writeln!(f, "STRUCT_TAG_MAP_LOC").unwrap();
let lock = match STRUCT_TAG_MAP.read() {
Ok(map) => map,
Err(err) => panic!("STRUCT_TAG_MAP reading error: {:#?}", err),
Err(err) => panic!("STRUCT_TAG_MAP reading error: {:#?}", err)
};
writeln!(f, "STRUCT_TAG_MAP content:").unwrap();
write!(f, "{:#?}", lock)
......@@ -764,7 +764,7 @@ impl fmt::Debug for HYBRID_TAG_MAP {
writeln!(f, "HYBRID_TAG_MAP_LOC").unwrap();
let lock = match HYBRID_TAG_MAP.read() {
Ok(map) => map,
Err(err) => panic!("HYBRID_TAG_MAP reading error: {:#?}", err),
Err(err) => panic!("HYBRID_TAG_MAP reading error: {:#?}", err)
};
writeln!(f, "HYBRID_TAG_MAP content:").unwrap();
write!(f, "{:#?}", lock)
......
......@@ -391,8 +391,18 @@ pub trait CodeGenerator {
// Other binaries
fn emit_adc(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_adcs(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_add(&mut self, dest: Reg, src1: Reg /* GPR or SP */, src2: Reg);
fn emit_adds(&mut self, dest: Reg, src1: Reg /* GPR or SP */, src2: Reg);
fn emit_add(
&mut self,
dest: Reg,
src1: Reg, /* GPR or SP */
src2: Reg
);
fn emit_adds(
&mut self,
dest: Reg,
src1: Reg, /* GPR or SP */
src2: Reg
);
fn emit_sbc(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_sbcs(&mut self, dest: Reg, src1: Reg, src2: Reg);
fn emit_sub(&mut self, dest: Reg, src1: Reg, src2: Reg);
......
......@@ -4626,9 +4626,9 @@ impl<'a> InstructionSelection {
match t.v {
Vector(_, _) => unimplemented!(),
Float | Double => 0, // Can return in FPR
Hybrid(_) => panic!("cant return a hybrid"), /* don't know how
* much space to
* reserve */
Hybrid(_) => panic!("cant return a hybrid"), /* don't know how */
// much space to
// reserve
Struct(_) | Array(_, _) => {
if hfa_length(t) > 0 || size <= 16 {
0 // Can return in register (or multiple registers)
......@@ -5877,8 +5877,8 @@ impl<'a> InstructionSelection {
// },
// _ => {
// trace!("nested: compute arg
// for branch");
// // nested: compute arg
// for branch");
// // nested: compute arg
// self.instruction_select(arg, cur_func);
//
// self.emit_get_result(arg);
......
......@@ -2372,6 +2372,8 @@ impl<'a> InstructionSelection {
let tmp_region =
self.emit_ireg(region, f_content, f_context, vm);
let ty_info = vm.get_backend_type_info(ty.id());
if ty.containsIRef() {
let ty_id: MuID = ty.id();
let tmp_id =
......@@ -2504,6 +2506,7 @@ impl<'a> InstructionSelection {
trace!("instsel on eAlloc: {}", ty.print_details());
assert!(!ty.is_hybrid());
let ty_info = vm.get_backend_type_info(ty.id());
let tmp_res = self.get_result_value(node);
if ty.containsIRef() {
......@@ -7250,16 +7253,13 @@ impl<'a> InstructionSelection {
Instruction_::GetVarPartIRef { base, .. } => {
trace!("MEM from GETVARPARTIREF: {}", op);
let ref base = ops[base];
let struct_ty = match base
.as_value()
.ty
.get_referent_ty()
{
Some(ty) => ty,
None => panic!(
let struct_ty =
match base.as_value().ty.get_referent_ty() {
Some(ty) => ty,
None => panic!(
"expecting an iref or uptr in GetVarPartIRef"
)
};
};
let fix_part_size =
vm.get_backend_type_size(struct_ty.id());
......
......@@ -737,13 +737,13 @@ impl BackendType {
&self,
vm: &Arc<VM>,
var_len: Option<usize>
) -> Vec<ByteOffset> {
let mut addr_list: Vec<ByteOffset> = vec![];
) -> Vec<ByteSize> {
let mut addr_list: Vec<ByteSize> = vec![];
match &self.ty.v {
MuType_::IRef(_type) => {
trace!("get_iref_offsets(IRef({:?}))", _type);
let mut cur_offset: ByteOffset = 0;
let mut cur_offset: ByteSize = 0;
BackendType::append_iref_offsets_internal(
vm,
&self.ty,
......@@ -753,7 +753,7 @@ impl BackendType {
}
MuType_::Array(_type, len) => {
trace!("get_iref_offsets(Array({:?},{:?}))", _type, len);
let mut cur_offset: ByteOffset = 0;
let mut cur_offset: ByteSize = 0;
for i in 0..*len {
BackendType::append_iref_offsets_internal(
vm,
......@@ -765,70 +765,91 @@ impl BackendType {
}
MuType_::Struct(ref name) => {
trace!("get_iref_offsets(Struct({}))", name);
let _struct_tag_map = STRUCT_TAG_MAP.read().unwrap();
let _struct = _struct_tag_map.get(&*name).unwrap();
let _types = _struct.get_tys();
let mut cur_offset: ByteOffset = 0;
let mut cur_offset: ByteSize = 0;
for _type in _types.iter() {
BackendType::append_iref_offsets_internal(
vm,
_type,
&mut addr_list,
&mut cur_offset
);
let t_encode = match &self.gc_type {
Some(enc) => enc,
None => panic!("struct must have gc_type.TypeEncode")
};
match t_encode {
TypeEncode::Short(short_enc) => {
assert_eq!(short_enc.var_len(), 0);
for i in 0..short_enc.fix_len() {
if short_enc.fix_ty(i) == WordType::Ref {
addr_list.push(cur_offset);
}
cur_offset += WORD_SIZE;
}
}
TypeEncode::Full(full_enc) => {
assert_eq!(full_enc.var.len(), 0);
for word_type in full_enc.fix.iter() {
if *word_type == WordType::Ref {
addr_list.push(cur_offset);
}
cur_offset += WORD_SIZE;
}
}
}
}
MuType_::Hybrid(ref name) => {
trace!("get_iref_offsets(Hybrid({}))", name);
trace!("{:#?}", HYBRID_TAG_MAP);
let _hybrid_tag_map = HYBRID_TAG_MAP.read();
let _hybrid_tag_map = match _hybrid_tag_map {
Ok(lock) => {
trace!("HYBRID_TAG_MAP lock aquired");
lock
}
Err(err) => {
panic!("Unable to aquire HYBRID_TAG_MAP lock with error: {:#?}", err);
}
};
let _hybrid = _hybrid_tag_map.get(name);
let _hybrid = match _hybrid {
Some(item) => item,
None => {
panic!("Hybrid tag not found!");
}
};
trace!("get_iref_offsets.Hybrid_type = {:#?}", _hybrid);
let fix_tys = _hybrid.get_fix_tys();
let var_ty = _hybrid.get_var_ty();
let var_len = match var_len {
Some(len) => len,
None => panic!(
"Variable part length must be specified for hybrids!"
)
};
let mut cur_offset: ByteOffset = 0;
assert!(var_len.is_some());
assert!(self.elem_size.is_some());
for _type in fix_tys.iter() {
BackendType::append_iref_offsets_internal(
vm,
_type,
&mut addr_list,
&mut cur_offset
);
}
let obj_size =
self.size + self.elem_size.unwrap() * var_len.unwrap();
for i in 0..var_len {
// continue from here
BackendType::append_iref_offsets_internal(
vm,
var_ty,
&mut addr_list,
&mut cur_offset
);
let mut cur_offset: ByteSize = 0;
let t_encode = match &self.gc_type {
Some(enc) => enc,
None => panic!("hybrid must have gc_type.TypeEncode")
};
match t_encode {
TypeEncode::Short(short_enc) => {
assert!(short_enc.var_len() > 0);
for i in 0..short_enc.fix_len() {
if short_enc.fix_ty(i) == WordType::Ref {
addr_list.push(cur_offset);
}
cur_offset += WORD_SIZE;
}
while cur_offset < obj_size {
for i in 0..short_enc.var_len() {
if short_enc.var_ty(i) == WordType::Ref {
addr_list.push(cur_offset);
}
cur_offset += WORD_SIZE;
}
}
}
TypeEncode::Full(full_enc) => {
assert!(full_enc.var.len() > 0);
for word_type in full_enc.fix.iter() {
if *word_type == WordType::Ref {
addr_list.push(cur_offset);
}
cur_offset += WORD_SIZE;
}
while cur_offset < obj_size {
for word_type in full_enc.var.iter() {
if *word_type == WordType::Ref {
addr_list.push(cur_offset);
}
cur_offset += WORD_SIZE;
}
}
}
}
}
......@@ -840,8 +861,8 @@ impl BackendType {
fn append_iref_offsets_internal(
vm: &Arc<VM>,
mutype: &MuType,
addr_list: &mut Vec<ByteOffset>,
cur_offset: &mut ByteOffset
addr_list: &mut Vec<ByteSize>,
cur_offset: &mut ByteSize
) {
trace!(
"append_iref_offsets_internal({:?}) @offset({})",
......@@ -854,21 +875,21 @@ impl BackendType {
let addr_sub_list = type_info.get_iref_offsets(vm, None);
*cur_offset =
align_up(*cur_offset as ByteSize, type_info.alignment)
as ByteOffset;
as ByteSize;
for addr in addr_sub_list {
addr_list.push(addr + *cur_offset)
}
*cur_offset += (type_info.size as ByteOffset);
*cur_offset += (type_info.size as ByteSize);
}
MuType_::IRef(_) => {
let type_info = vm.get_backend_type_info(mutype.id());
*cur_offset =
align_up(*cur_offset as ByteSize, type_info.alignment)
as ByteOffset;
as ByteSize;
unsafe {
addr_list.push(*cur_offset);
}
*cur_offset += (type_info.size as ByteOffset);
*cur_offset += (type_info.size as ByteSize);
}
MuType_::Hybrid(_) => {
panic!("Hybrid must not be contained inside any types!");
......@@ -877,8 +898,8 @@ impl BackendType {
let type_info = vm.get_backend_type_info(mutype.id());
*cur_offset =
align_up(*cur_offset as ByteSize, type_info.alignment)
as ByteOffset;
*cur_offset += (type_info.size as ByteOffset);
as ByteSize;
*cur_offset += (type_info.size as ByteSize);
}
}
}
......
......@@ -937,7 +937,7 @@ impl<'a> GraphColoring<'a> {
self.frozen_moves.insert(m);
// if !self.precolored.contains(&v) &&
// self.node_moves(v).is_empty() &&
// self.node_moves(v).is_empty() &&
// self.ig.get_degree_of(v) < self.n_regs_for_node(v)
if self.worklist_freeze.contains(&v)
&& self.node_moves(v).is_empty()
......
......@@ -293,9 +293,9 @@ impl Inlining {
keepalives: None
});
//
//
// dbg!(&cur_block);
//
//
// dbg!(&new_blocks);
debug!(
"cur_block befor copy_inline_blocks: {:?}",
......@@ -318,9 +318,9 @@ impl Inlining {
f_context,
&inlined_fv_guard.context
);
//
//
// dbg!(&cur_block);
//
//
// dbg!(&new_blocks);
debug!(
"cur_block after copy_inline_blocks: {:?}",
......
......@@ -21,7 +21,7 @@ pub struct AddressBitmap {
start: Address,
end: Address,
bitmap: Bitmap,
bitmap: Bitmap
}
impl AddressBitmap {
......@@ -32,7 +32,7 @@ impl AddressBitmap {
AddressBitmap {
start: start,
end: end,
bitmap: bitmap,
bitmap: bitmap
}
}
......
......@@ -25,12 +25,12 @@ pub struct AddressMap<T: Copy> {
end: Address,
pub ptr: *mut T,
len: usize,
len: usize
}
impl<T> AddressMap<T>
where
T: Copy,
T: Copy
{
pub fn new(start: Address, end: Address) -> AddressMap<T> {
let len = (end - start) >> LOG_POINTER_SIZE;
......@@ -40,7 +40,7 @@ where
start: start,
end: end,
ptr: ptr,
len: len,
len: len
}
}
......@@ -68,7 +68,7 @@ where
impl<T> Drop for AddressMap<T>
where
T: Copy,
T: Copy
{
fn drop(&mut self) {
unsafe { free(self.ptr) }
......
......@@ -19,7 +19,7 @@ use utils::mem::memsec::free;
#[derive(Clone)]
pub struct Bitmap {
bitmap: *mut u64,
bitmap_len: usize,
bitmap_len: usize
}
impl Bitmap {
......@@ -27,12 +27,13 @@ impl Bitmap {
let bitmap_len = length;
let bitmap = unsafe {
// secretly reserve one more word
malloc_zero(mem::size_of::<u64>() * ((bitmap_len >> 6) + 1)) as *mut u64
malloc_zero(mem::size_of::<u64>() * ((bitmap_len >> 6) + 1))
as *mut u64
};
Bitmap {
bitmap: bitmap,
bitmap_len: bitmap_len,
bitmap_len: bitmap_len
}
}
......@@ -112,7 +113,8 @@ impl Bitmap {
let next_word = self.bitmap.offset(nth_u64 as isize + 1);
let part1 = *word >> nth_bit;
let part2 = (*next_word & ((1 << (nth_bit + length - 64)) - 1)) << (64 - nth_bit);
let part2 = (*next_word & ((1 << (nth_bit + length - 64)) - 1))
<< (64 - nth_bit);
part1 | part2
}
......
......@@ -23,7 +23,7 @@ use std::mem::transmute;
pub struct HeapDump {
pub objects: HashMap<Address, ObjectDump>,
pub relocatable_refs: HashMap<Address, String>,
pub relocatable_refs: HashMap<Address, String>
}
pub struct ObjectDump {
......@@ -31,7 +31,7 @@ pub struct ObjectDump {
pub size: ByteSize,
pub align: ByteSize,
pub encode: ObjectEncode,
pub reference_offsets: Vec<ByteSize>, // based on reference_addr
pub reference_offsets: Vec<ByteSize> // based on reference_addr
}
impl HeapDump {
......@@ -40,7 +40,7 @@ impl HeapDump {
let mut work_queue: Vec<Address> = roots;
let mut heap: HeapDump = HeapDump {
objects: HashMap::new(),
relocatable_refs: HashMap::new(),
relocatable_refs: HashMap::new()
};
while !work_queue.is_empty() {
......@@ -51,7 +51,10 @@ impl HeapDump {
let obj_dump = heap.persist_object(obj);
heap.objects.insert(obj, obj_dump);
heap.keep_tracing(heap.objects.get(&obj).unwrap(), &mut work_queue);
heap.keep_tracing(
heap.objects.get(&obj).unwrap(),
&mut work_queue
);
}
}
......@@ -84,7 +87,7 @@ impl HeapDump {
size: encode.size(),
align: MINIMAL_ALIGNMENT,
encode: ObjectEncode::Tiny(encode),
reference_offsets: ref_offsets,
reference_offsets: ref_offsets
}
}
SpaceDescriptor::ImmixNormal => {
......@@ -94,7 +97,8 @@ impl HeapDump {
.get_type_byte_slot(space.get_word_index(obj))
.load::<MediumObjectEncode>()
};
let small_encode: &SmallObjectEncode = unsafe { transmute(&encode) };
let small_encode: &SmallObjectEncode =
unsafe { transmute(&encode) };
// get type id
let (type_id, type_size) = if small_encode.is_small() {
......@@ -104,7 +108,8 @@ impl HeapDump {
};
// get type encode, and find all references
let type_encode: &ShortTypeEncode = &GlobalTypeTable::table()[type_id];
let type_encode: &ShortTypeEncode =
&GlobalTypeTable::table()[type_id];
let mut ref_offsets = vec![];
let mut offset = 0;
for i in 0..type_encode.fix_len() {
......@@ -132,13 +137,16 @@ impl HeapDump {
} else {
ObjectEncode::Medium(encode)