WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

Commit 83c457ab authored by qinsoon's avatar qinsoon
Browse files

fix bugs about allocation (wrong field offsets, forgot to add header

size, etc). add PRINTHEX instruction to print a number (for debugging use)
parent d152c68c
......@@ -277,7 +277,9 @@ pub enum Instruction_ {
CommonInst_Unpin(OpIndex),
// internal use: mov from ops[0] to value
Move(OpIndex)
Move(OpIndex),
// internal use: print op as hex value
PrintHex(OpIndex)
}
impl Instruction_ {
......@@ -402,7 +404,9 @@ impl Instruction_ {
&Instruction_::CommonInst_Unpin(op) => format!("COMMONINST Unpin {}", ops[op]),
// move
&Instruction_::Move(from) => format!("MOVE {}", ops[from])
&Instruction_::Move(from) => format!("MOVE {}", ops[from]),
// print hex
&Instruction_::PrintHex(i) => format!("PRINTHEX {}", ops[i])
}
}
}
......
......@@ -32,7 +32,8 @@ pub fn is_terminal_inst(inst: &Instruction_) -> bool {
| &CommonInst_SetThreadLocal(_)
| &CommonInst_Pin(_)
| &CommonInst_Unpin(_)
| &Move(_) => false,
| &Move(_)
| &PrintHex(_) => false,
&Return(_)
| &ThreadExit
| &Throw(_)
......@@ -99,5 +100,6 @@ pub fn has_side_effect(inst: &Instruction_) -> bool {
&CommonInst_Pin(_) => true,
&CommonInst_Unpin(_) => true,
&Move(_) => false,
&PrintHex(_) => true
}
}
......@@ -65,7 +65,8 @@ pub enum OpCode {
CommonInst_Pin,
CommonInst_Unpin,
Move
Move,
PrintHex
}
pub fn pick_op_code_for_ssa(ty: &P<MuType>) -> OpCode {
......@@ -296,5 +297,6 @@ pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
Instruction_::CommonInst_Pin(_) => OpCode::CommonInst_Pin,
Instruction_::CommonInst_Unpin(_) => OpCode::CommonInst_Unpin,
Instruction_::Move(_) => OpCode::Move,
Instruction_::PrintHex(_) => OpCode::PrintHex,
}
}
......@@ -7,6 +7,8 @@ use ast::types;
use ast::types::*;
use vm::VM;
use runtime::mm;
use runtime::mm::objectmodel::OBJECT_HEADER_SIZE;
use runtime::mm::objectmodel::OBJECT_HEADER_OFFSET;
use runtime::ValueLocation;
use runtime::thread;
use runtime::entrypoints;
......@@ -1153,6 +1155,20 @@ impl <'a> InstructionSelection {
None,
Some(node), f_content, f_context, vm);
}
Instruction_::PrintHex(index) => {
trace!("instsel on PRINTHEX");
let ops = inst.ops.read().unwrap();
let ref op = ops[index];
self.emit_runtime_entry(
&entrypoints::PRINT_HEX,
vec![op.clone_value()],
None,
Some(node), f_content, f_context, vm
);
}
_ => unimplemented!()
} // main switch
......@@ -1859,6 +1875,8 @@ impl <'a> InstructionSelection {
tmp_res
}
// this function needs to generate exact same code as alloc() in immix_mutator.rs in GC
// FIXME: currently it is slightly different
fn emit_alloc_sequence_small (&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// emit immix allocation fast path
......@@ -1883,10 +1901,19 @@ impl <'a> InstructionSelection {
// or lea size(%start) -> %end
let tmp_end = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
if size.is_int_const() {
let offset = size.extract_int_const() as i32;
let mut offset = size.extract_int_const() as i32;
if OBJECT_HEADER_SIZE != 0 {
offset += OBJECT_HEADER_SIZE as i32;
}
self.emit_lea_base_immoffset(&tmp_end, &tmp_start, offset, vm);
} else {
self.backend.emit_mov_r_r(&tmp_end, &tmp_start);
if OBJECT_HEADER_SIZE != 0 {
// ASM: add %size, HEADER_SIZE -> %size
self.backend.emit_add_r_imm(&size, OBJECT_HEADER_SIZE as i32);
}
self.backend.emit_add_r_r(&tmp_end, &size);
}
......@@ -1910,9 +1937,14 @@ impl <'a> InstructionSelection {
self.emit_store_base_offset(&tmp_tl, cursor_offset as i32, &tmp_end, vm);
// put start as result
// ASM: mov %start -> %result
let tmp_res = self.get_result_value(node);
self.backend.emit_mov_r_r(&tmp_res, &tmp_start);
if OBJECT_HEADER_OFFSET != 0 {
// ASM: lea -HEADER_OFFSET(%start) -> %result
self.emit_lea_base_immoffset(&tmp_res, &tmp_start, - OBJECT_HEADER_OFFSET as i32, vm);
} else {
// ASM: mov %start -> %result
self.backend.emit_mov_r_r(&tmp_res, &tmp_start);
}
// ASM jmp alloc_end
let allocend = format!("{}_alloc_small_end", node.id());
......
......@@ -519,6 +519,7 @@ pub fn estimate_insts_for_ir(inst: &Instruction) -> usize {
// others
Move(_) => 0,
PrintHex(_) => 10,
ExnInstruction{ref inner, ..} => estimate_insts_for_ir(&inner)
}
}
......@@ -23,6 +23,8 @@ lazy_static! {
pub static ref N_MUTATORS : RwLock<usize> = RwLock::new(0);
}
const TRACE_ALLOC_FASTPATH : bool = true;
#[repr(C)]
// do not change the layout (unless change the offset of fields correspondingly)
pub struct ImmixMutatorLocal {
......@@ -52,7 +54,7 @@ pub struct ImmixMutatorLocal {
lazy_static! {
pub static ref CURSOR_OFFSET : usize = mem::size_of::<usize>()
+ mem::size_of::<*mut u8>()
+ mem::size_of::<*mut u8>() * 2
+ mem::size_of::<Address>();
pub static ref LIMIT_OFFSET : usize = *CURSOR_OFFSET
......@@ -145,11 +147,22 @@ impl ImmixMutatorLocal {
let size = size + objectmodel::OBJECT_HEADER_SIZE;
// end
if TRACE_ALLOC_FASTPATH {
trace!("Mutator{}: fastpath alloc: size={}, align={}", self.id, size, align);
}
let start = self.cursor.align_up(align);
let end = start.plus(size);
if TRACE_ALLOC_FASTPATH {
trace!("Mutator{}: fastpath alloc: start=0x{:x}, end=0x{:x}", self.id, start, end);
}
if end > self.limit {
let ret = self.try_alloc_from_local(size, align);
if TRACE_ALLOC_FASTPATH {
trace!("Mutator{}: fastpath alloc: try_alloc_from_local()=0x{:x}", self.id, ret);
}
if cfg!(debug_assertions) {
if !ret.is_aligned_to(align) {
......@@ -229,8 +242,13 @@ impl ImmixMutatorLocal {
for line in next_available_line..end_line {
self.block().line_mark_table_mut().set(line, immix::LineMark::FreshAlloc);
}
self.alloc(size, align)
// allocate fast path
let start = self.cursor.align_up(align);
let end = start.plus(size);
self.cursor = end;
start
},
None => {
self.alloc_from_global(size, align)
......@@ -263,6 +281,8 @@ impl ImmixMutatorLocal {
self.cursor = self.block().start();
self.limit = self.block().start();
self.line = 0;
trace!("Mutator{}: slowpath: new block starting from 0x{:x}", self.id, self.cursor);
return self.alloc(size, align);
},
......
......@@ -106,4 +106,15 @@ lazy_static! {
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_frem")),
jit: RwLock::new(None)
};
// impl/decl: mod.rs
pub static ref PRINT_HEX : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![],
arg_tys: vec![UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_print_hex")),
jit: RwLock::new(None)
};
}
......@@ -234,3 +234,9 @@ pub extern fn mu_main(serialized_vm : *const c_char, argc: c_int, argv: *const *
thread.join().unwrap();
}
}
#[no_mangle]
#[allow(unreachable_code)]
pub extern fn muentry_print_hex(x: u64) {
println!("0x{:x}", x);
}
\ No newline at end of file
......@@ -139,6 +139,28 @@ impl fmt::Debug for Address {
}
}
#[cfg(test)]
mod addr_tests {
use super::*;
#[test]
fn test_align_up() {
let addr = Address(0);
let aligned = addr.align_up(8);
assert!(addr == aligned);
}
#[test]
fn test_is_aligned() {
let addr = Address(0);
assert!(addr.is_aligned_to(8));
let addr = Address(8);
assert!(addr.is_aligned_to(8));
}
}
#[derive(Copy, Clone, Eq, Hash)]
pub struct ObjectReference (usize);
......
......@@ -30,7 +30,7 @@ mod tests {
pub fn test_u8_bits() {
let value : u8 = 0b1100_0011;
assert_eq!(test_nth_bit_u8(value, 6), true);
assert_eq!(test_nth_bit_u8(value, 6, 1), true);
assert_eq!(lower_bits_u8(value, 6), 0b00_0011);
}
......
......@@ -436,4 +436,24 @@ macro_rules! inst {
v: Instruction_::Return(vec![])
});
};
// THREADEXIT
(($vm: expr, $fv: ident) $name: ident: THREADEXIT) => {
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
value: None,
ops: RwLock::new(vec![]),
v: Instruction_::ThreadExit
});
};
// PRINTHEX
(($vm: expr, $fv: ident) $name: ident: PRINTHEX $val: ident) => {
let $name = $fv.new_inst(Instruction{
hdr: MuEntityHeader::unnamed($vm.next_id()),
value: None,
ops: RwLock::new(vec![$val.clone()]),
v: Instruction_::PrintHex(0)
});
}
}
......@@ -16,6 +16,80 @@ use std::sync::RwLock;
use self::mu::testutil;
use self::mu::testutil::aot;
#[test]
fn test_allocation_fastpath() {
VM::start_logging_trace();
let vm = Arc::new(allocation_fastpath());
let compiler = Compiler::new(CompilerPolicy::default(), &vm);
let func_id = vm.id_of("allocation_fastpath");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, true, vec![]);
backend::emit_context(&vm);
let executable = aot::link_primordial(vec!["allocation_fastpath".to_string()], "allocation_fastpath_test", &vm);
aot::execute(executable);
}
fn allocation_fastpath() -> VM {
let vm = VM::new();
typedef! ((vm) int1 = mu_int(1));
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) ref_int64 = mu_ref(int64));
typedef! ((vm) struct_t = mu_struct(int64, int64, ref_int64));
typedef! ((vm) ref_struct_t = mu_ref(struct_t));
funcsig! ((vm) sig = () -> ());
funcdecl! ((vm) <sig> allocation_fastpath);
funcdef! ((vm) <sig> allocation_fastpath VERSION allocation_fastpath_v1);
block! ((vm, allocation_fastpath_v1) blk_entry);
// a = NEW <struct_t>
ssa! ((vm, allocation_fastpath_v1) <ref_struct_t> a);
inst! ((vm, allocation_fastpath_v1) blk_entry_new1:
a = NEW <struct_t>
);
inst! ((vm, allocation_fastpath_v1) blk_entry_print1:
PRINTHEX a
);
ssa! ((vm, allocation_fastpath_v1) <ref_struct_t> b);
inst! ((vm, allocation_fastpath_v1) blk_entry_new2:
b = NEW <struct_t>
);
inst! ((vm, allocation_fastpath_v1) blk_entry_print2:
PRINTHEX b
);
inst! ((vm, allocation_fastpath_v1) blk_entry_threadexit:
THREADEXIT
);
define_block! ((vm, allocation_fastpath_v1) blk_entry() {
blk_entry_new1, blk_entry_print1,
blk_entry_new2, blk_entry_print2,
blk_entry_threadexit
});
define_func_ver!((vm) allocation_fastpath_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_instruction_new() {
VM::start_logging_trace();
......
......@@ -997,7 +997,12 @@ def run_boot_image(entry, output, has_c_main_sig = False, args = [], impl=os.get
t.driver.standalone = True # force standalone
t.driver.exe_name = output
#t.backendopt(inline=True, mallocs=True)
#t.view()
#t.mutype()
#t.view()
db, mugen, epf_name = t.compile_mu()
exe = py.path.local(output)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment