Commit b89f2e04 authored by qinsoon's avatar qinsoon

can persist linked list

parent 86819838
Pipeline #248 failed with stage
in 24 minutes and 26 seconds
......@@ -65,6 +65,24 @@ impl MuType {
}
}
pub fn get_field_ty(&self, index: usize) -> Option<P<MuType>> {
match self.v {
MuType_::Struct(ref tag) => {
let map_lock = STRUCT_TAG_MAP.read().unwrap();
let struct_inner = map_lock.get(tag).unwrap();
Some(struct_inner.tys[index].clone())
},
MuType_::Hybrid(ref tag) => {
let map_lock = HYBRID_TAG_MAP.read().unwrap();
let hybrid_inner = map_lock.get(tag).unwrap();
Some(hybrid_inner.fix_tys[index].clone())
},
_ => None
}
}
pub fn get_referenced_ty(&self) -> Option<P<MuType>> {
use types::MuType_::*;
match self.v {
......
......@@ -2741,34 +2741,47 @@ pub fn emit_context(vm: &VM) {
use runtime::mm;
use runtime::mm::common::objectdump::*;
// presist globals
// persist globals
let global_locs_lock = vm.global_locations.read().unwrap();
let global_lock = vm.globals().read().unwrap();
let global_addr_id_map = {
let mut map : LinkedHashMap<Address, MuID> = LinkedHashMap::new();
for (id, global_loc) in global_locs_lock.iter() {
map.insert(global_loc.to_address(), *id);
}
map
};
// dump heap from globals
let global_addrs : Vec<Address> = global_locs_lock.values().map(|x| x.to_address()).collect();
debug!("going to dump these globals: {:?}", global_addrs);
let global_dump = mm::persist_heap(global_addrs);
debug!("Heap Dump from GC: {:?}", global_dump);
for id in global_locs_lock.keys() {
let global_value = global_lock.get(id).unwrap();
let global_addr = global_locs_lock.get(id).unwrap().to_address();
let obj_dump = global_dump.objects.get(&global_addr).unwrap();
let ref objects = global_dump.objects;
let ref relocatable_refs = global_dump.relocatable_refs;
for obj_dump in objects.values() {
// .bytes xx,xx,xx,xx (between mem_start to reference_addr)
write_data_bytes(&mut file, obj_dump.mem_start, obj_dump.reference_addr);
// .globl global_cell_name
// global_cell_name:
let global_cell_name = symbol(global_value.name().unwrap());
file.write_fmt(format_args!("\t{}\n", directive_globl(global_cell_name.clone()))).unwrap();
file.write_fmt(format_args!("{}:\n", global_cell_name)).unwrap();
// .globl dump_label
if global_addr_id_map.contains_key(&obj_dump.reference_addr) {
let global_id = global_addr_id_map.get(&obj_dump.reference_addr).unwrap();
let global_value = global_lock.get(global_id).unwrap();
// .globl global_cell_name
// global_cell_name:
let global_cell_name = symbol(global_value.name().unwrap());
file.write_fmt(format_args!("\t{}\n", directive_globl(global_cell_name.clone()))).unwrap();
file.write_fmt(format_args!("{}:\n", global_cell_name)).unwrap();
}
// dump_label:
let dump_label = symbol(global_dump.relocatable_refs.get(&obj_dump.reference_addr).unwrap().clone());
file.write_fmt(format_args!("\t{}\n", directive_globl(dump_label.clone()))).unwrap();
file.write_fmt(format_args!("{}:\n", dump_label)).unwrap();
let base = obj_dump.reference_addr;
......@@ -2782,7 +2795,17 @@ pub fn emit_context(vm: &VM) {
}
// write ref with label
file.write_fmt(format_args!("\t.quad {}\n", symbol(global_dump.relocatable_refs.get(&cur_ref_addr).unwrap().clone()))).unwrap();
let load_ref = unsafe {cur_ref_addr.load::<Address>()};
if load_ref.is_zero() {
file.write("\t.quad 0\n".as_bytes());
} else {
let label = match global_dump.relocatable_refs.get(&load_ref) {
Some(label) => label,
None => panic!("cannot find label for address {}, it is not dumped by GC (why GC didn't trace to it)", load_ref)
};
file.write_fmt(format_args!("\t.quad {}\n", symbol(label.clone()))).unwrap();
}
cursor = cur_ref_addr.plus(POINTER_SIZE);
}
......
......@@ -2741,21 +2741,18 @@ impl <'a> InstructionSelection {
}
}
fn emit_get_mem_from_inst_inner(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> MemoryLocation {
let header_size = mm::objectmodel::OBJECT_HEADER_SIZE as u64;
match op.v {
fn emit_get_mem_from_inst_inner(&mut self, op: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> MemoryLocation { match op.v {
TreeNode_::Instruction(ref inst) => {
let ref ops = inst.ops.read().unwrap();
match inst.v {
// GETIREF -> [base + HDR_SIZE]
// GETIREF -> [base]
Instruction_::GetIRef(op_index) => {
let ref ref_op = ops[op_index];
let ret = MemoryLocation::Address {
base: ref_op.clone_value(),
offset: Some(self.make_value_int_const(header_size, vm)),
offset: None,
index: None,
scale: None
};
......
......@@ -276,6 +276,17 @@ pub struct BackendTypeInfo {
pub gc_type: P<GCType>
}
impl BackendTypeInfo {
pub fn get_field_offset(&self, index: usize) -> ByteSize {
if self.struct_layout.is_some() {
let layout = self.struct_layout.as_ref().unwrap();
layout[index]
} else {
panic!("trying to get field offset on a non-struct type")
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum RegGroup {GPR, FPR}
......
......@@ -72,6 +72,9 @@ pub fn gen_gctype_encode(ty: &GCType) -> u64 {
// encode ref map?
if ty.size < REF_MAP_LENGTH * POINTER_SIZE {
// has ref map
ret = ret | (1 << BIT_HAS_REF_MAP);
// encode ref map
let offsets = ty.gen_ref_offsets();
let mut ref_map = 0;
......@@ -286,6 +289,7 @@ mod tests {
println!("encode: {:64b}", encode);
assert!(header_is_fix_size(encode));
assert!(header_has_ref_map(encode));
assert_eq!(header_get_object_size(encode), 16);
assert_eq!(header_get_ref_map(encode), 0b1);
}
......@@ -307,6 +311,7 @@ mod tests {
println!("encode: {:64b}", encode);
assert!(header_is_fix_size(encode));
assert!(header_has_ref_map(encode));
assert_eq!(header_get_object_size(encode), 32);
assert_eq!(header_get_ref_map(encode), 0b11);
}
......@@ -330,6 +335,7 @@ mod tests {
println!("encode: {:64b}", encode);
assert!(header_is_fix_size(encode));
assert!(!header_has_ref_map(encode));
assert_eq!(header_get_gctype_id(encode), 999);
}
......
......@@ -6,6 +6,8 @@ use utils::ByteSize;
use utils::ObjectReference;
use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
use utils::Address;
use compiler::backend::RegGroup;
use vm::VM;
use runtime::ValueLocation;
......@@ -25,6 +27,18 @@ fn allocate(size: ByteSize, align: ByteSize, encode: u64) -> ObjectReference {
ret
}
pub fn allocate_fixed(ty: P<MuType>, vm: &VM) -> Address {
let backendtype = vm.get_backend_type_info(ty.id());
let gctype = backendtype.gc_type.clone();
let encode = get_gc_type_encode(gctype.id);
trace!("API: allocate fixed ty: {}", ty);
trace!("API: gc ty : {:?}", gctype);
trace!("API: encode : {:b}", encode);
allocate(backendtype.size, backendtype.alignment, encode).to_address()
}
pub fn allocate_global(value: P<Value>, vm: &VM) -> ValueLocation {
let tyid = value.ty.id();
......@@ -38,7 +52,10 @@ pub fn allocate_global(value: P<Value>, vm: &VM) -> ValueLocation {
let gctype_id = gctype.id;
let encode = get_gc_type_encode(gctype_id);
trace!("allocating global as gctype {:?}", gctype);
trace!("API: allocate global ty: {}", referenced_type);
trace!("API: gc ty : {:?}", gctype);
trace!("API: encode : {:b}", encode);
let addr = allocate(backendtype.size, backendtype.alignment, encode).to_address();
ValueLocation::Direct(RegGroup::GPR, addr)
......
use ast::ir::*;
use ast::inst::*;
use ast::ptr::*;
use ast::types::*;
use utils::BitSize;
use utils::Address;
......@@ -25,8 +27,8 @@ pub enum APIHandleValue {
Vector(Vec<APIHandleValue>),
// GenRef
Ref(Address),
IRef(Address),
Ref (P<MuType>, Address), // referenced type
IRef(P<MuType>, Address),
TagRef64(u64),
FuncRef,
ThreadRef,
......@@ -56,9 +58,16 @@ pub enum APIHandleValue {
}
impl APIHandleValue {
pub fn as_iref(&self) -> Address {
pub fn as_ref(&self) -> (P<MuType>, Address) {
match self {
&APIHandleValue::IRef(addr) => addr,
&APIHandleValue::Ref(ref ty, addr) => (ty.clone(), addr),
_ => panic!("expected Ref")
}
}
pub fn as_iref(&self) -> (P<MuType>, Address) {
match self {
&APIHandleValue::IRef(ref ty, addr) => (ty.clone(), addr),
_ => panic!("expected IRef")
}
}
......@@ -68,7 +77,7 @@ pub fn store(ord: MemoryOrder, loc: Arc<APIHandle>, val: Arc<APIHandle>) {
// FIXME: take memory order into consideration
// get address
let addr = loc.v.as_iref();
let (_, addr) = loc.v.as_iref();
// get value and store
// we will store here (its unsafe)
......@@ -92,8 +101,8 @@ pub fn store(ord: MemoryOrder, loc: Arc<APIHandle>, val: Arc<APIHandle>) {
| APIHandleValue::Array(_)
| APIHandleValue::Vector(_) => panic!("cannot store an aggregated value to an address"),
APIHandleValue::Ref(aval)
| APIHandleValue::IRef(aval) => addr.store::<Address>(aval),
APIHandleValue::Ref(_, aval)
| APIHandleValue::IRef(_, aval) => addr.store::<Address>(aval),
_ => unimplemented!()
}
......
use std::collections::HashMap;
use ast::ptr::P;
use ast::ptr::*;
use ast::ir::*;
use ast::types;
use ast::types::*;
......@@ -844,36 +844,86 @@ impl <'a> VM {
unimplemented!()
}
// -- API ---
fn new_handle(&self, handle: APIHandle) -> Arc<APIHandle> {
let ret = Arc::new(handle);
let mut handles = self.active_handles.write().unwrap();
handles.insert(ret.id, ret.clone());
ret
}
pub fn new_fixed(&self, tyid: MuID) -> Arc<APIHandle> {
let ty = {
let types_lock = self.types.read().unwrap();
types_lock.get(&tyid).unwrap().clone()
};
let addr = gc::allocate_fixed(ty.clone(), self);
trace!("API: allocated fixed type {} at {}", ty, addr);
self.new_handle(APIHandle {
id: self.next_id(),
v : APIHandleValue::Ref(ty, addr)
})
}
pub fn handle_get_iref(&self, handle_ref: Arc<APIHandle>) -> Arc<APIHandle> {
let (ty, addr) = handle_ref.v.as_ref();
// iref has the same address as ref
self.new_handle(APIHandle {
id: self.next_id(),
v : APIHandleValue::IRef(ty, addr)
})
}
pub fn handle_get_field_iref(&self, handle_iref: Arc<APIHandle>, field: usize) -> Arc<APIHandle> {
let (ty, addr) = handle_iref.v.as_iref();
let field_ty = match ty.get_field_ty(field) {
Some(ty) => ty,
None => panic!("ty is not struct ty: {}", ty)
};
let field_addr = {
let backend_ty = self.get_backend_type_info(ty.id());
let field_offset = backend_ty.get_field_offset(field);
addr.plus(field_offset)
};
self.new_handle(APIHandle {
id: self.next_id(),
v : APIHandleValue::IRef(field_ty, field_addr)
})
}
pub fn handle_from_global(&self, id: MuID) -> Arc<APIHandle> {
let global_iref = {
let global_locs = self.global_locations.read().unwrap();
global_locs.get(&id).unwrap().to_address()
};
let global_inner_ty = {
let global_lock = self.globals.read().unwrap();
global_lock.get(&id).unwrap().ty.get_referenced_ty().unwrap()
};
let handle_id = self.next_id();
let ret = Arc::new(APIHandle {
self.new_handle(APIHandle {
id: handle_id,
v : APIHandleValue::IRef(global_iref)
});
let mut handles = self.active_handles.write().unwrap();
handles.insert(handle_id, ret.clone());
ret
v : APIHandleValue::IRef(global_inner_ty, global_iref)
})
}
pub fn handle_from_uint64(&self, num: u64, len: BitSize) -> Arc<APIHandle> {
let handle_id = self.next_id();
let ret = Arc::new(APIHandle {
self.new_handle(APIHandle {
id: handle_id,
v : APIHandleValue::Int(num, len)
});
let mut handles = self.active_handles.write().unwrap();
handles.insert(handle_id, ret.clone());
ret
})
}
}
......@@ -29,6 +29,13 @@ macro_rules! typedef {
let $name = $vm.declare_type($vm.next_id(), MuType_::mustruct(Mu(stringify!($name)), vec![]));
$vm.set_name($name.as_entity(), Mu(stringify!($name)));
};
(($vm: expr) $name: ident = mu_struct_placeholder()) => {
let $name = $vm.declare_type($vm.next_id(), MuType_::mustruct_empty(Mu(stringify!($name))));
$vm.set_name($name.as_entity(), Mu(stringify!($name)));
};
(($vm: expr) mu_struct_put($name: ident, $($ty: ident), *)) => {
MuType_::mustruct_put(&Mu(stringify!($name)), vec![$($ty.clone()), *])
};
(($vm: expr) $name: ident = mu_hybrid($($ty: ident), *); $var_ty: ident) => {
let $name = $vm.declare_type($vm.next_id(), MuType_::hybrid(Mu(stringify!($name)), vec![$($ty.clone()), *], $var_ty.clone()));
......
......@@ -125,3 +125,214 @@ fn set_global_by_api(vm: &VM) {
blk_entry
});
}
#[test]
fn test_persist_linked_list() {
VM::start_logging_trace();
let vm = Arc::new(VM::new());
unsafe {
MuThread::current_thread_as_mu_thread(Address::zero(), vm.clone());
}
persist_linked_list(&vm);
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("persist_linked_list");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
// create a linked list by api
const LINKED_LIST_SIZE : usize = 5;
{
let mut i = 0;
let mut last_node : Option<Arc<handle::APIHandle>> = None;
let node_tyid = vm.id_of("node");
while i < LINKED_LIST_SIZE {
// new node
let node_ref = vm.new_fixed(node_tyid);
let node_iref = vm.handle_get_iref(node_ref.clone());
// store i as payload
let payload_iref = vm.handle_get_field_iref(node_iref.clone(), 1); // payload is the 2nd field
let int_handle = vm.handle_from_uint64(i as u64, 64);
handle::store(MemoryOrder::Relaxed, payload_iref, int_handle);
// store last_node as next
let next_iref = vm.handle_get_field_iref(node_iref, 0);
if last_node.is_some() {
let handle = last_node.take().unwrap();
handle::store(MemoryOrder::Relaxed, next_iref, handle);
}
last_node = Some(node_ref);
i += 1;
}
// store last_node in global
let global_id = vm.id_of("my_global");
let global_handle = vm.handle_from_global(global_id);
handle::store(MemoryOrder::Relaxed, global_handle, last_node.unwrap());
}
// then emit context (global will be put into context.s
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
// link
let executable = aot::link_primordial(vec![Mu("persist_linked_list")], "persist_linked_list_test", &vm);
let output = aot::execute_nocheck(executable);
assert!(output.status.code().is_some());
let ret_code = output.status.code().unwrap();
println!("return code: {} (i.e. the value set before)", ret_code);
assert!(ret_code == 10);
}
fn persist_linked_list(vm: &VM) {
typedef! ((vm) int1 = mu_int(1));
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) node = mu_struct_placeholder());
typedef! ((vm) iref_node = mu_iref(node));
typedef! ((vm) iref_int64 = mu_iref(int64));
typedef! ((vm) ref_node = mu_ref(node));
typedef! ((vm) iref_ref_node = mu_iref(ref_node));
typedef! ((vm) mu_struct_put(node, ref_node, int64));
globaldef! ((vm) <ref_node> my_global);
constdef! ((vm) <int64> int64_0 = Constant::Int(0));
constdef! ((vm) <ref_node> ref_node_null = Constant::NullRef);
funcsig! ((vm) sig = () -> (int64));
funcdecl! ((vm) <sig> persist_linked_list);
funcdef! ((vm) <sig> persist_linked_list VERSION persist_linked_list_v1);
// --- blk entry ---
block! ((vm, persist_linked_list_v1) blk_entry);
consta! ((vm, persist_linked_list_v1) int64_0_local = int64_0);
global! ((vm, persist_linked_list_v1) blk_entry_my_global = my_global);
// %head = LOAD %blk_entry_my_global
ssa! ((vm, persist_linked_list_v1) <ref_node> head);
inst! ((vm, persist_linked_list_v1) blk_entry_load:
head = LOAD blk_entry_my_global (is_ptr: false, order: MemoryOrder::SeqCst)
);
// branch blk_loop_head (%head, 0)
block! ((vm, persist_linked_list_v1) blk_loop_head);
inst! ((vm, persist_linked_list_v1) blk_entry_branch:
BRANCH blk_loop_head (head, int64_0_local)
);
define_block! ((vm, persist_linked_list_v1) blk_entry() {
blk_entry_load, blk_entry_branch
});
// --- blk loop_head ---
ssa! ((vm, persist_linked_list_v1) <ref_node> cursor);
ssa! ((vm, persist_linked_list_v1) <int64> sum);
// %cond = CMP EQ cursor NULLREF
ssa! ((vm, persist_linked_list_v1) <int1> cond);
consta! ((vm, persist_linked_list_v1) ref_node_null_local = ref_node_null);
inst! ((vm, persist_linked_list_v1) blk_loop_head_cmp:
cond = CMPOP (CmpOp::EQ) cursor ref_node_null_local
);
// BRANCH2 exit[sum] loop_body[cursor, sum]
block! ((vm, persist_linked_list_v1) blk_exit);
block! ((vm, persist_linked_list_v1) blk_loop_body);
inst! ((vm, persist_linked_list_v1) blk_loop_head_branch2:
BRANCH2 (cond, sum, cursor)
IF (OP 0)
THEN blk_exit (vec![1]) WITH 0.1f32,
ELSE blk_loop_body (vec![2, 1])
);
define_block! ((vm, persist_linked_list_v1) blk_loop_head(cursor, sum) {
blk_loop_head_cmp, blk_loop_head_branch2
});
// --- blk loop_body ---
ssa! ((vm, persist_linked_list_v1) <ref_node> body_cursor);
ssa! ((vm, persist_linked_list_v1) <int64> body_sum);
// %iref_cursor = GETIREF %body_cursor
ssa! ((vm, persist_linked_list_v1) <iref_node> iref_cursor);
inst! ((vm, persist_linked_list_v1) blk_loop_body_getiref:
iref_cursor = GETIREF body_cursor
);
// %iref_payload = GETFIELDIREF iref_cursor 1
ssa! ((vm, persist_linked_list_v1) <iref_int64> iref_payload);
inst! ((vm, persist_linked_list_v1) blk_loop_body_getfieldiref:
iref_payload = GETFIELDIREF iref_cursor (is_ptr: false, index: 1)
);
// %payload = LOAD %iref_payload
ssa! ((vm, persist_linked_list_v1) <int64> payload);
inst! ((vm, persist_linked_list_v1) blk_loop_body_load:
payload = LOAD iref_payload (is_ptr: false, order: MemoryOrder::SeqCst)
);
// %body_sum2 = BINOP ADD body_sum payload
ssa! ((vm, persist_linked_list_v1) <int64> body_sum2);
inst! ((vm, persist_linked_list_v1) blk_loop_body_add:
body_sum2 = BINOP (BinOp::Add) body_sum payload
);
// %iref_next = GETFIELDIREF iref_cursor 0
ssa! ((vm, persist_linked_list_v1) <iref_ref_node> iref_next);
inst! ((vm, persist_linked_list_v1) blk_loop_body_getfieldiref2:
iref_next = GETFIELDIREF iref_cursor (is_ptr: false, index: 0)
);
// %next = LOAD %iref_next
ssa! ((vm, persist_linked_list_v1) <ref_node> next);
inst! ((vm, persist_linked_list_v1) blk_loop_body_load2:
next = LOAD iref_next (is_ptr: false, order: MemoryOrder::SeqCst)
);
// BRANCH blk_loop_head (next, body_sum2)
inst! ((vm, persist_linked_list_v1) blk_loop_body_branch:
BRANCH blk_loop_head (next, body_sum2)
);
define_block! ((vm, persist_linked_list_v1) blk_loop_body(body_cursor, body_sum){
blk_loop_body_getiref,
blk_loop_body_getfieldiref,
blk_loop_body_load,
blk_loop_body_add,
blk_loop_body_getfieldiref2,
blk_loop_body_load2,
blk_loop_body_branch
});
// --- blk exit ---
ssa! ((vm, persist_linked_list_v1) <int64> res);
let blk_exit_exit = gen_ccall_exit(res.clone(), &mut persist_linked_list_v1, &vm);
inst! ((vm, persist_linked_list_v1) blk_exit_ret:
RET (res)
);
define_block! ((vm, persist_linked_list_v1) blk_exit(res) {
blk_exit_exit, blk_exit_ret
});
define_func_ver!((vm) persist_linked_list_v1 (entry: blk_entry) {
blk_entry, blk_loop_head, blk_loop_body, blk_exit
});
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment