Commit 99de258f authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano

Implemented GC allocator for AArch64

parent e71b64fa
......@@ -18,6 +18,7 @@ use utils::ByteSize;
use utils::Address;
use utils::POINTER_SIZE;
use compiler::backend::aarch64::*;
use runtime::mm::*;
use compiler::backend::{Reg, Mem};
use compiler::machine_code::MachineCode;
......@@ -37,6 +38,7 @@ use std::usize;
use std::ops;
use std::collections::HashSet;
use std::sync::RwLock;
use std::io::Write;
macro_rules! trace_emit {
($arg1:tt $($arg:tt)*) => {
......@@ -3662,30 +3664,30 @@ pub fn emit_context_with_reloc(
// data
writeln!(file, ".data").unwrap();
// persist heap - we traverse the heap from globals
{
use runtime::mm;
// persist globals
let global_locs_lock = vm.global_locations().read().unwrap();
let global_lock = vm.globals().read().unwrap();
// a map from address to ID
let global_addr_id_map = {
let mut map: LinkedHashMap<Address, MuID> = LinkedHashMap::new();
for (id, global_loc) in global_locs_lock.iter() {
map.insert(global_loc.to_address(), *id);
}
map
};
// dump heap from globals
// get address of all globals so we can traverse heap from them
let global_addrs: Vec<Address> =
global_locs_lock.values().map(|x| x.to_address()).collect();
debug!("going to dump these globals: {:?}", global_addrs);
// heap dump
let mut global_dump = mm::persist_heap(global_addrs);
debug!("Heap Dump from GC: {:?}", global_dump);
let ref objects = global_dump.objects;
let ref mut relocatable_refs = global_dump.relocatable_refs;
......@@ -3694,15 +3696,18 @@ pub fn emit_context_with_reloc(
relocatable_refs.insert(addr, mangle_name(str));
}
// for all the reachable object, we write them to the boot image
for obj_dump in objects.values() {
// write object metadata
write_align(&mut file, 8);
write_obj_header(&mut file, &obj_dump.encode);
// .bytes xx,xx,xx,xx (between mem_start to reference_addr)
write_data_bytes(&mut file, obj_dump.mem_start, obj_dump.reference_addr);
if global_addr_id_map.contains_key(&obj_dump.reference_addr) {
let global_id = global_addr_id_map.get(&obj_dump.reference_addr).unwrap();
// write alignment for the object
write_align(&mut file, obj_dump.align);
// if this object is a global cell, we add labels so it can be accessed
if global_addr_id_map.contains_key(&obj_dump.addr) {
let global_id = global_addr_id_map.get(&obj_dump.addr).unwrap();
let global_value = global_lock.get(global_id).unwrap();
// .globl global_cell_name
......@@ -3712,6 +3717,7 @@ pub fn emit_context_with_reloc(
writeln!(file, "\t{}", directive_globl(global_cell_name.clone())).unwrap();
writeln!(file, "{}:", global_cell_name.clone()).unwrap();
// .equiv global_cell_name_if_its_valid_c_ident
if is_valid_c_identifier(&demangled_name) {
let demangled_name = (*demangled_name).clone();
writeln!(file, "\t{}", directive_globl(demangled_name.clone())).unwrap();
......@@ -3723,51 +3729,56 @@ pub fn emit_context_with_reloc(
}
}
// dump_label:
let dump_label = relocatable_refs
.get(&obj_dump.reference_addr)
.unwrap()
.clone();
writeln!(file, "{}:", dump_label).unwrap();
// put dump_label for this object (so it can be referred to from other dumped objects)
let dump_label = relocatable_refs.get(&obj_dump.addr).unwrap().clone();
file.write_fmt(format_args!("{}:\n", dump_label)).unwrap();
let base = obj_dump.reference_addr;
let end = obj_dump.mem_start + obj_dump.mem_size;
// get ready to go through from the object start (not mem_start) to the end
let base = obj_dump.addr;
let end = obj_dump.addr + obj_dump.size;
assert!(base.is_aligned_to(POINTER_SIZE));
// offset as cursor
let mut offset = 0;
while offset < obj_dump.mem_size {
while offset < obj_dump.size {
let cur_addr = base + offset;
if obj_dump.reference_offsets.contains(&offset) {
// write ref with label
// if this offset is a reference field, we put a relocatable label
// generated by the GC instead of address value
let load_ref = unsafe { cur_addr.load::<Address>() };
if load_ref.is_zero() {
// write 0
writeln!(file, ".xword 0").unwrap();
// null reference, write 0
file.write("\t.xword 0\n".as_bytes()).unwrap();
} else {
// get the relocatable label
let label = match relocatable_refs.get(&load_ref) {
Some(label) => label,
None => {
panic!(
"cannot find label for address {}, \
it is not dumped by GC (why GC didn't trace to it)",
"cannot find label for address {}, it is not dumped by GC \
(why GC didn't trace to it?)",
load_ref
)
}
};
writeln!(file, ".xword {}", label.clone()).unwrap();
file.write_fmt(format_args!("\t.xword {}\n", label.clone()))
.unwrap();
}
} else if fields.contains_key(&cur_addr) {
// write uptr (or other relocatable value) with label
// if this offset is a field named by the client to relocatable,
// we put the relocatable label given by the client
let label = fields.get(&cur_addr).unwrap();
writeln!(file, ".xword {}", mangle_name(label.clone())).unwrap();
file.write_fmt(format_args!("\t.xword {}\n", mangle_name(label.clone())))
.unwrap();
} else {
// otherwise this offset is plain data
// write plain word (as bytes)
let next_word_addr = cur_addr + POINTER_SIZE;
if next_word_addr <= end {
write_data_bytes(&mut file, cur_addr, next_word_addr);
} else {
......@@ -3809,6 +3820,15 @@ pub fn emit_context_with_reloc(
debug!("---finish---");
}
fn write_obj_header(f: &mut File, obj: &ObjectEncode) {
// header is 8 bytes aligned, and takes 24 bytes
write_align(f, 8);
let hdr = obj.as_raw();
f.write_fmt(format_args!("\t.xword {}\n", hdr[0])).unwrap();
f.write_fmt(format_args!("\t.xword {}\n", hdr[1])).unwrap();
f.write_fmt(format_args!("\t.xword {}\n", hdr[2])).unwrap();
}
pub fn emit_context(vm: &VM) {
emit_context_with_reloc(vm, hashmap!{}, hashmap!{});
}
......
......@@ -964,7 +964,8 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
PrintHex(_) => 10,
SetRetval(_) => 10,
ExnInstruction { ref inner, .. } => estimate_insts_for_ir(&inner),
_ => unimplemented!()
GetVMThreadLocal => 10,
_ => 1
}
}
......
......@@ -25,4 +25,4 @@ pub use self::address_map::AddressMap;
pub const SIZE_1KB: ByteSize = 1 << 10;
pub const SIZE_1MB: ByteSize = 1 << 20;
pub const SIZE_1GB: ByteSize = 1 << 30;
\ No newline at end of file
pub const SIZE_1GB: ByteSize = 1 << 30;
......@@ -61,4 +61,4 @@ impl FreelistAllocator {
slot.store(encode);
}
}
}
\ No newline at end of file
}
......@@ -15,4 +15,4 @@ mod freelist_space;
mod freelist_mutator;
pub use self::freelist_space::FreelistSpace;
pub use self::freelist_mutator::FreelistAllocator;
\ No newline at end of file
pub use self::freelist_mutator::FreelistAllocator;
......@@ -23,4 +23,4 @@ pub const IMMORTAL_OBJECT_HEADER_SIZE: ByteSize = 32;
pub struct ImmortalObjectHeader {
pub encode: ObjectEncode,
pub gc_byte: u8
}
\ No newline at end of file
}
......@@ -47,4 +47,4 @@ pub fn check_size(size: ByteSize) -> ByteSize {
} else {
size
}
}
\ No newline at end of file
}
......@@ -97,4 +97,4 @@ mod global_type_table;
pub use objectmodel::sidemap::object_encode::*;
pub use objectmodel::sidemap::type_encode::*;
pub use objectmodel::sidemap::global_type_table::*;
\ No newline at end of file
pub use objectmodel::sidemap::global_type_table::*;
......@@ -148,4 +148,4 @@ impl<K: Hash + Eq + Debug, V: Debug> Debug for LinkedRepeatableMultiMap<K, V> {
}
Ok(())
}
}
\ No newline at end of file
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment