GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 43eb1d7a authored by qinsoon's avatar qinsoon

[wip] need a global liveness analysis

(for blocks created during instruction selection)
parent f6bac8b3
#!/bin/sh
RUST_BACKTRACE=1 RUST_TEST_THREADS=1 cargo test "$@"
......@@ -19,11 +19,16 @@ pub trait CodeGenerator {
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32);
......@@ -35,17 +40,17 @@ pub trait CodeGenerator {
fn emit_mul_r64(&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
fn emit_jmp(&mut self, dest: &Block);
fn emit_je(&mut self, dest: &Block);
fn emit_jne(&mut self, dest: &Block);
fn emit_ja(&mut self, dest: &Block);
fn emit_jae(&mut self, dest: &Block);
fn emit_jb(&mut self, dest: &Block);
fn emit_jbe(&mut self, dest: &Block);
fn emit_jg(&mut self, dest: &Block);
fn emit_jge(&mut self, dest: &Block);
fn emit_jl(&mut self, dest: &Block);
fn emit_jle(&mut self, dest: &Block);
fn emit_jmp(&mut self, dest: MuName);
fn emit_je(&mut self, dest: MuName);
fn emit_jne(&mut self, dest: MuName);
fn emit_ja(&mut self, dest: MuName);
fn emit_jae(&mut self, dest: MuName);
fn emit_jb(&mut self, dest: MuName);
fn emit_jbe(&mut self, dest: MuName);
fn emit_jg(&mut self, dest: MuName);
fn emit_jge(&mut self, dest: MuName);
fn emit_jl(&mut self, dest: MuName);
fn emit_jle(&mut self, dest: MuName);
fn emit_call_near_rel32(&mut self, func: MuName);
fn emit_call_near_r64(&mut self, func: &P<Value>);
......
......@@ -260,8 +260,14 @@ pub fn is_machine_reg(reg: MuID) -> bool {
}
}
fn build_live_set(cf: &CompiledFunction, func: &MuFunctionVersion) {
}
// from Tailoring Graph-coloring Register Allocation For Runtime Compilation, Figure 4
pub fn build_chaitin_briggs (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGraph {
pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion) -> InterferenceGraph {
build_live_set(cf, func);
let mut ig = InterferenceGraph::new();
// precolor machine register nodes
......@@ -285,7 +291,7 @@ pub fn build_chaitin_briggs (cf: &CompiledFunction, func: &MuFunctionVersion) ->
}
// all nodes has been added, we init graph (create adjacency matrix)
ig.init_graph();
ig.init_graph();
for block in cf.mc.get_all_blocks() {
// Current_Live(B) = LiveOut(B)
......
......@@ -25,6 +25,7 @@ lazy_static! {
}
#[repr(C)]
// do not change the layout (unless change the offset of fields correspondingly)
pub struct ImmixMutatorLocal {
id : usize,
......@@ -47,6 +48,15 @@ pub struct ImmixMutatorLocal {
block : Option<Box<ImmixBlock>>,
}
lazy_static! {
pub static ref CURSOR_OFFSET : usize = mem::size_of::<usize>()
+ mem::size_of::<*mut u8>()
+ mem::size_of::<Address>();
pub static ref LIMIT_OFFSET : usize = *CURSOR_OFFSET
+ mem::size_of::<Address>();
}
pub struct ImmixMutatorGlobal {
take_yield : AtomicBool,
still_blocked : AtomicBool
......
......@@ -7,6 +7,8 @@ pub use self::immix_mutator::ImmixMutatorGlobal;
pub use self::immix_space::LineMarkTable as ImmixLineMarkTable;
pub use self::immix_mutator::MUTATORS;
pub use self::immix_mutator::N_MUTATORS;
pub use self::immix_mutator::CURSOR_OFFSET;
pub use self::immix_mutator::LIMIT_OFFSET;
use std::sync::Arc;
use std::sync::RwLock;
......
......@@ -13,7 +13,6 @@ pub mod common;
pub mod objectmodel;
pub mod heap;
pub use heap::immix::ImmixMutatorLocal as Mutator;
use utils::ObjectReference;
use heap::immix::BYTES_IN_LINE;
use heap::immix::ImmixSpace;
......@@ -24,10 +23,13 @@ use heap::freelist::FreeListSpace;
use std::fmt;
use std::sync::Arc;
use std::sync::RwLock;
use std::boxed::Box;
pub const LARGE_OBJECT_THRESHOLD : usize = BYTES_IN_LINE;
pub use heap::immix::ImmixMutatorLocal as Mutator;
pub use heap::immix::CURSOR_OFFSET as ALLOCATOR_CURSOR_OFFSET;
pub use heap::immix::LIMIT_OFFSET as ALLOCATOR_LIMIT_OFFSET;
#[repr(C)]
pub struct GC {
immix_space: Arc<ImmixSpace>,
......@@ -91,14 +93,14 @@ pub extern fn gc_init(immix_size: usize, lo_size: usize, n_gcthreads: usize) {
}
#[no_mangle]
pub extern fn new_mutator() -> Box<ImmixMutatorLocal> {
Box::new(ImmixMutatorLocal::new(MY_GC.read().unwrap().as_ref().unwrap().immix_space.clone()))
pub extern fn new_mutator() -> ImmixMutatorLocal {
ImmixMutatorLocal::new(MY_GC.read().unwrap().as_ref().unwrap().immix_space.clone())
}
#[no_mangle]
#[allow(unused_variables)]
pub extern fn drop_mutator(mut mutator: Box<ImmixMutatorLocal>) {
mutator.destroy();
pub extern fn drop_mutator(mutator: *mut ImmixMutatorLocal) {
unsafe {mutator.as_mut().unwrap()}.destroy();
// rust will reclaim the boxed mutator
}
......@@ -111,32 +113,32 @@ extern "C" {
#[no_mangle]
#[inline(always)]
pub extern fn yieldpoint(mutator: &mut Box<ImmixMutatorLocal>) {
mutator.yieldpoint();
pub extern fn yieldpoint(mutator: *mut ImmixMutatorLocal) {
unsafe {mutator.as_mut().unwrap()}.yieldpoint();
}
#[no_mangle]
#[inline(never)]
pub extern fn yieldpoint_slow(mutator: &mut Box<ImmixMutatorLocal>) {
mutator.yieldpoint_slow()
pub extern fn yieldpoint_slow(mutator: *mut ImmixMutatorLocal) {
unsafe {mutator.as_mut().unwrap()}.yieldpoint_slow()
}
#[no_mangle]
#[inline(always)]
pub extern fn alloc(mutator: &mut Box<ImmixMutatorLocal>, size: usize, align: usize) -> ObjectReference {
let addr = mutator.alloc(size, align);
pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let addr = unsafe {mutator.as_mut().unwrap()}.alloc(size, align);
unsafe {addr.to_object_reference()}
}
#[no_mangle]
#[inline(never)]
pub extern fn alloc_slow(mutator: &mut Box<ImmixMutatorLocal>, size: usize, align: usize) -> ObjectReference {
let ret = mutator.try_alloc_from_local(size, align);
pub extern fn alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let ret = unsafe {mutator.as_mut().unwrap()}.try_alloc_from_local(size, align);
unsafe {ret.to_object_reference()}
}
#[no_mangle]
pub extern fn alloc_large(mutator: &mut Box<ImmixMutatorLocal>, size: usize, align: usize) -> ObjectReference {
let ret = freelist::alloc_large(size, align, mutator, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
pub extern fn alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
unsafe {ret.to_object_reference()}
}
\ No newline at end of file
......@@ -37,4 +37,14 @@ lazy_static! {
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("swap_back_to_native_stack")),
jit: RwLock::new(None),
};
pub static ref ALLOC_SLOW : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![runtime::ADDRESS_TYPE.clone()],
arg_tys: vec![runtime::UINT64_TYPE.clone(), runtime::UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("alloc_slow")),
jit: RwLock::new(None),
};
}
\ No newline at end of file
......@@ -25,6 +25,14 @@ lazy_static! {
pub static ref ADDRESS_TYPE : P<MuType> = P(
MuType::new(ir::new_internal_id(), MuType_::int(64))
);
pub static ref UINT32_TYPE : P<MuType> = P(
MuType::new(ir::new_internal_id(), MuType_::int(32))
);
pub static ref UINT64_TYPE : P<MuType> = P(
MuType::new(ir::new_internal_id(), MuType_::int(64))
);
}
// consider using libloading crate instead of the raw c functions for dynalic libraries
......
......@@ -202,9 +202,10 @@ pub enum MuStackState {
#[repr(C)]
#[allow(improper_ctypes)]
// do not change the layout (unless change the offset of fields correspondingly)
pub struct MuThread {
pub hdr: MuEntityHeader,
allocator: Box<mm::Mutator>,
allocator: mm::Mutator,
stack: Option<Box<MuStack>>,
native_sp_loc: Address,
......@@ -237,7 +238,7 @@ extern "C" {
}
impl MuThread {
pub fn new(id: MuID, allocator: Box<mm::Mutator>, stack: Box<MuStack>, user_tls: Option<Address>) -> MuThread {
pub fn new(id: MuID, allocator: mm::Mutator, stack: Box<MuStack>, user_tls: Option<Address>) -> MuThread {
MuThread {
hdr: MuEntityHeader::unnamed(id),
allocator: allocator,
......@@ -247,7 +248,7 @@ impl MuThread {
}
}
pub fn fake_thread(id: MuID, allocator: Box<mm::Mutator>) -> MuThread {
pub fn fake_thread(id: MuID, allocator: mm::Mutator) -> MuThread {
MuThread {
hdr: MuEntityHeader::unnamed(id),
allocator: allocator,
......
......@@ -28,6 +28,8 @@ pub trait MachineCode {
fn get_ir_block_livein(&self, block: &str) -> Option<&Vec<MuID>>;
fn get_ir_block_liveout(&self, block: &str) -> Option<&Vec<MuID>>;
fn set_ir_block_livein(&mut self, block: &str, set: Vec<MuID>);
fn set_ir_block_liveout(&mut self, block: &str, set: Vec<MuID>);
fn get_all_blocks(&self) -> &Vec<MuName>;
fn get_block_range(&self, block: &str) -> Option<ops::Range<usize>>;
......
......@@ -29,5 +29,5 @@ fn test_gc_no_alive() {
mutator.init_object(res, 0b1100_0011);
}
mm::drop_mutator(mutator);
mutator.destroy();
}
\ No newline at end of file
......@@ -29,7 +29,7 @@ fn test_exhaust_alloc() {
mutator.init_object(res, 0b1100_0011);
}
mm::drop_mutator(mutator);
mutator.destroy();
}
const LARGE_OBJECT_SIZE : usize = 256;
......@@ -47,7 +47,7 @@ fn test_exhaust_alloc_large() {
let res = mm::alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
}
mm::drop_mutator(mutator);
mutator.destroy();
}
#[test]
......@@ -90,7 +90,7 @@ fn test_alloc_mark() {
}
}
mm::drop_mutator(mutator);
mutator.destroy();
}
#[allow(dead_code)]
......@@ -132,5 +132,5 @@ fn test_alloc_trace() {
heap::gc::start_trace(&mut roots, shared_space, lo_space);
mm::drop_mutator(mutator);
mutator.destroy();
}
\ No newline at end of file
......@@ -167,5 +167,5 @@ fn start() {
println!("Completed in {} msec", tElapsed);
println!("Finished with {} collections", heap::gc::GC_COUNT.load(Ordering::SeqCst));
mm::drop_mutator(mutator);
mutator.destroy();
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment