Commit cfa05199 authored by John Zhang's avatar John Zhang

Merge branch 'master' into jit-test

parents 6b06e94b abefe290
# This file is a template, and might need editing before it works on your project.
# Unofficial language image. Look for the different tagged releases at:
# https://hub.docker.com/r/scorpil/rust/tags/
image: "scorpil/rust:stable"
# Optional: Pick zero or more services to be used on all builds.
# Only needed when using a docker container to run your tests in.
# Check out: http://docs.gitlab.com/ce/ci/docker/using_docker_images.html#what-is-service
#services:
# - mysql:latest
# - redis:latest
# - postgres:latest
# Optional: Install a C compiler, cmake and git into the container.
# You will often need this when you (or any of your dependencies) depends on C code.
#before_script:
#- apt-get update -yqq
#- apt-get install -yqq --no-install-recommends build-essential
# Use cargo to test the project
test:cargo:
script:
- rustc --version && cargo --version # Print version info for debugging
- cargo test --verbose --jobs 1 # Don't paralize to make errors more readable
......@@ -954,4 +954,4 @@ pub fn op_vector_str(vec: &Vec<OpIndex>, ops: &Vec<P<TreeNode>>) -> String {
}
}
ret
}
\ No newline at end of file
}
......@@ -443,6 +443,56 @@ impl MachineCode for ASMCode {
self.code.remove(index);
self.code.insert(index, ASMInst::nop());
}
fn remove_unnecessary_callee_saved(&mut self, used_callee_saved: Vec<MuID>) -> Vec<MuID> {
// we always save rbp
let rbp = x86_64::RBP.extract_ssa_id().unwrap();
// every push/pop will use/define rsp
let rsp = x86_64::RSP.extract_ssa_id().unwrap();
let find_op_other_than_rsp = |inst: &ASMInst| -> Option<MuID> {
for id in inst.defines.keys() {
if *id != rsp && *id != rbp {
return Some(*id);
}
}
for id in inst.uses.keys() {
if *id != rsp && *id != rbp {
return Some(*id);
}
}
None
};
let mut inst_to_remove = vec![];
let mut regs_to_remove = vec![];
for i in 0..self.number_of_insts() {
let ref inst = self.code[i];
if inst.code.contains("push") || inst.code.contains("pop") {
match find_op_other_than_rsp(inst) {
Some(op) => {
// if this push/pop instruction is about a callee saved register
// and the register is not used, we set the instruction as nop
if x86_64::is_callee_saved(op) && !used_callee_saved.contains(&op) {
trace!("removing instruction {:?} for save/restore unnecessary callee saved regs", inst);
regs_to_remove.push(op);
inst_to_remove.push(i);
}
}
None => {}
}
}
}
for i in inst_to_remove {
self.set_inst_nop(i);
}
regs_to_remove
}
fn emit(&self) -> Vec<u8> {
let mut ret = vec![];
......
......@@ -65,7 +65,7 @@ pub trait CodeGenerator {
fn emit_call_near_mem64(&mut self, callsite: String, func: &P<Value>) -> ValueLocation;
fn emit_ret(&mut self);
fn emit_push_r64(&mut self, src: &P<Value>);
fn emit_push_imm32(&mut self, src: i32);
fn emit_pop_r64(&mut self, dest: &P<Value>);
......
......@@ -14,6 +14,7 @@ use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass;
use compiler::backend;
use compiler::backend::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
use compiler::backend::x86_64::ASMCodeGen;
......@@ -898,7 +899,7 @@ impl <'a> InstructionSelection {
}
fn emit_common_prologue(&mut self, args: &Vec<P<Value>>, vm: &VM) {
let block_name = "prologue".to_string();
let block_name = PROLOGUE_BLOCK_NAME.to_string();
self.backend.start_block(block_name.clone());
// no livein
......@@ -914,10 +915,12 @@ impl <'a> InstructionSelection {
// push all callee-saved registers
{
let frame = self.current_frame.as_mut().unwrap();
let rbp = x86_64::RBP.extract_ssa_id().unwrap();
for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
// not pushing rbp (as we have done taht)
if reg.extract_ssa_id().unwrap() != x86_64::RBP.extract_ssa_id().unwrap() {
// not pushing rbp (as we have done that)
if reg.extract_ssa_id().unwrap() != rbp {
trace!("allocate frame slot for reg {}", reg);
self.backend.emit_push_r64(&reg);
frame.alloc_slot_for_callee_saved_reg(reg.clone(), vm);
}
......
......@@ -11,6 +11,10 @@ pub const WORD_SIZE : ByteSize = 8;
pub const AOT_EMIT_DIR : &'static str = "emit";
pub const AOT_EMIT_CONTEXT_FILE : &'static str = "context.s";
// this is not full name, but pro/epilogue name is generated from this
pub const PROLOGUE_BLOCK_NAME: &'static str = "prologue";
pub const EPILOGUE_BLOCK_NAME: &'static str = "epilogue";
// X86_64
#[cfg(target_arch = "x86_64")]
......
......@@ -9,6 +9,7 @@ pub use compiler::backend::reg_alloc::graph_coloring::coloring::GraphColoring;
use ast::ir::*;
use vm::VM;
use compiler::CompilerPass;
use compiler::backend::is_callee_saved;
use compiler::backend::init_machine_regs_for_func;
use std::any::Any;
......@@ -58,6 +59,24 @@ impl RegisterAllocation {
}
}
// find out what callee saved registers are used
{
use std::collections::HashSet;
let used_callee_saved: HashSet<MuID> =
coloring.cf.temps.values()
.map(|x| *x)
.filter(|x| is_callee_saved(*x))
.collect();
let used_callee_saved: Vec<MuID> = used_callee_saved.into_iter().collect();
let removed_callee_saved = coloring.cf.mc_mut().remove_unnecessary_callee_saved(used_callee_saved);
for reg in removed_callee_saved {
coloring.cf.frame.remove_record_for_callee_saved_reg(reg);
}
}
coloring.cf.mc().trace_mc();
}
}
......
......@@ -60,6 +60,10 @@ impl Frame {
let slot = self.alloc_slot(&reg, vm);
slot.make_memory_op(reg.ty.clone(), vm)
}
pub fn remove_record_for_callee_saved_reg(&mut self, reg: MuID) {
self.allocated.remove(&reg);
}
pub fn alloc_slot_for_spilling(&mut self, reg: P<Value>, vm: &VM) -> P<Value> {
let slot = self.alloc_slot(&reg, vm);
......
......@@ -10,7 +10,9 @@ use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
pub struct CompiledFunction {
pub func_id: MuID,
pub func_ver_id: MuID,
pub temps: HashMap<MuID, MuID>, // assumes one temporary maps to one register
// assumes one temporary maps to one register
pub temps: HashMap<MuID, MuID>,
// not emitting this
pub mc: Option<Box<MachineCode + Send + Sync>>,
......@@ -122,10 +124,15 @@ pub trait MachineCode {
// functions for rewrite
/// replace a temp with a machine register (to_reg must be a machine register)
fn replace_reg(&mut self, from: MuID, to: MuID);
/// replace a temp with another temp
/// replace a temp that is defined in the inst with another temp
fn replace_define_tmp_for_inst(&mut self, from: MuID, to: MuID, inst: usize);
/// replace a temp that is used in the inst with another temp
fn replace_use_tmp_for_inst(&mut self, from: MuID, to: MuID, inst: usize);
/// set an instruction as nop
fn set_inst_nop(&mut self, index: usize);
/// remove unnecessary push/pop if the callee saved register is not used
/// returns what registers push/pop have been deleted
fn remove_unnecessary_callee_saved(&mut self, used_callee_saved: Vec<MuID>) -> Vec<MuID>;
fn as_any(&self) -> &Any;
}
......
use super::common::*;
use ast::op::*;
pub struct MuIRBuilder {
/// ref to MuVM
mvm: *const MuVM,
......@@ -105,17 +107,6 @@ impl MuIRBuilder {
pub fn new_type_int(&mut self, id: MuID, len: c_int) {
self.bundle.types.insert(id, Box::new(NodeType::TypeInt { id: id, len: len }));
// let maybe_name = self.consume_name_of(id);
// let pty = P(MuType {
// hdr: MuEntityHeader {
// id: id,
// name: RwLock::new(maybe_name),
// },
// v: MuType_::Int(len as usize),
// });
//
// self.bundle.types.push(pty);
}
pub fn new_type_float(&mut self, id: MuID) {
......@@ -451,7 +442,10 @@ struct BundleLoader<'lb, 'lvm> {
built_types: IdPMap<MuType>,
built_sigs: IdPMap<MuFuncSig>,
built_values: IdPMap<Value>,
built_funcs: IdPMap<MuFunction>,
built_funcvers: IdPMap<MuFunctionVersion>,
struct_id_tags: Vec<(MuID, MuName)>,
built_refi64: P<MuType>,
}
fn load_bundle(b: &mut MuIRBuilder) {
......@@ -459,20 +453,50 @@ fn load_bundle(b: &mut MuIRBuilder) {
let new_map = b.id_name_map.drain().collect::<HashMap<_,_>>();
let mut built_types: IdPMap<MuType> = Default::default();
let refi64: P<MuType> = ensure_refi64(vm, &mut built_types);
let mut bl = BundleLoader {
b: b,
vm: vm,
id_name_map: new_map,
visited: Default::default(),
built_types: Default::default(),
built_types: built_types,
built_sigs: Default::default(),
built_values: Default::default(),
built_funcs: Default::default(),
built_funcvers: Default::default(),
struct_id_tags: Default::default(),
built_refi64: refi64,
};
bl.load_bundle();
}
fn ensure_refi64(vm: &VM, built_types: &mut IdPMap<MuType>) -> P<MuType> {
let id_i64 = vm.next_id();
let id_ref = vm.next_id();
let impl_i64 = P(MuType {
hdr: MuEntityHeader::unnamed(id_i64),
v: MuType_::Int(64),
});
let impl_ref = P(MuType {
hdr: MuEntityHeader::unnamed(id_ref),
v: MuType_::Ref(impl_i64.clone()),
});
trace!("Ensure i64 is defined: {} {:?}", id_i64, impl_i64);
trace!("Ensure ref is defined: {} {:?}", id_ref, impl_ref);
built_types.insert(id_i64, impl_i64);
built_types.insert(id_ref, impl_ref.clone());
impl_ref
}
impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
fn load_bundle(&mut self) {
self.ensure_names();
......@@ -480,7 +504,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
}
fn name_from_id(id: MuID, hint: &str) -> String {
format!("@uvm.unnamed{}{}", hint, id)
format!("@uvm.unnamed.{}{}", hint, id)
}
fn ensure_name(&mut self, id: MuID, hint: &str) {
......@@ -502,6 +526,14 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
_ => {}
}
}
for id in self.b.bundle.funcvers.keys() {
self.ensure_name(*id, "funcver");
}
for id in self.b.bundle.bbs.keys() {
self.ensure_name(*id, "funcver");
}
}
fn get_name(&self, id: MuID) -> String {
......@@ -542,6 +574,16 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
self.build_const(*id)
}
}
for id in self.b.bundle.funcs.keys() {
if !self.visited.contains(id) {
self.build_func(*id)
}
}
for id in self.b.bundle.funcvers.keys() {
self.build_funcver(*id)
}
}
fn build_type(&mut self, id: MuID) {
......@@ -596,6 +638,13 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
}
}
fn get_built_type(&self, id: MuID) -> P<MuType> {
match self.built_types.get(&id) {
Some(t) => t.clone(),
None => self.vm.get_type(id)
}
}
fn fill_struct(&mut self, id: MuID, tag: &MuName) {
let ty = self.b.bundle.types.get(&id).unwrap();
......@@ -691,5 +740,116 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
self.built_values.insert(id, P(impl_val));
}
fn build_func(&mut self, id: MuID) {
self.visited.insert(id);
let fun = self.b.bundle.funcs.get(&id).unwrap();
trace!("Building function {} {:?}", id, fun);
let hdr = self.make_mu_entity_header(id);
let impl_sig = self.ensure_sig_rec(fun.sig);
let impl_fun = MuFunction {
hdr: hdr,
sig: impl_sig,
cur_ver: None,
all_vers: Default::default(),
};
trace!("Function built: {} {:?}", id, impl_fun);
self.built_funcs.insert(id, P(impl_fun));
}
fn build_funcver(&mut self, id: MuID) {
let fv = self.b.bundle.funcvers.get(&id).unwrap();
trace!("Building function version {} {:?}", id, fv);
let hdr = self.make_mu_entity_header(id);
let fun = self.built_funcs.get(&fv.func).unwrap();
let impl_sig = fun.sig.clone();
let mut ctx = FunctionContext { values: Default::default() };
let blocks = fv.bbs.iter().map(|bbid| {
let block = self.build_block(*bbid, &mut ctx);
(*bbid, block)
}).collect::<HashMap<MuID, Block>>();
let entry_id = *fv.bbs.first().unwrap();
let ctn = FunctionContent {
entry: entry_id,
blocks: blocks,
};
let impl_fv = MuFunctionVersion {
hdr: hdr,
func_id: id,
sig: impl_sig,
content: Some(ctn),
context: ctx,
block_trace: None,
};
trace!("Function version built {} {:?}", id, impl_fv);
self.built_funcvers.insert(id, P(impl_fv));
}
/// Copied from ast::ir::*. That was implemented for the previous API which implies mutability.
/// When we migrate later, we can assume the AST is almost fully immutable, and can be
/// constructed in a functional recursive-descendent style.
fn new_ssa(&self, id: MuID, ty: P<MuType>, ctx: &mut FunctionContext) -> P<TreeNode> {
let hdr = self.make_mu_entity_header(id);
let val = P(Value{
hdr: hdr,
ty: ty,
v: Value_::SSAVar(id)
});
ctx.values.insert(id, SSAVarEntry::new(val.clone()));
P(TreeNode {
op: pick_op_code_for_ssa(&val.ty),
v: TreeNode_::Value(val)
})
}
fn build_block(&self, id: MuID, ctx: &mut FunctionContext) -> Block {
let bb = self.b.bundle.bbs.get(&id).unwrap();
trace!("Building basic block {} {:?}", id, bb);
let nor_ids = &bb.norParamIDs;
let nor_tys = &bb.norParamTys;
let args = nor_ids.iter().zip(nor_tys).map(|(arg_id, arg_ty_id)| {
let arg_ty = self.get_built_type(*arg_ty_id);
self.new_ssa(*arg_id, arg_ty, ctx).clone_value()
}).collect::<Vec<_>>();
let exn_arg = bb.excParamID.map(|arg_id| {
let arg_ty = self.built_refi64.clone();
self.new_ssa(arg_id, arg_ty, ctx).clone_value()
});
let hdr = self.make_mu_entity_header(id);
let ctn = BlockContent {
args: args,
exn_arg: exn_arg,
body: Default::default(), // FIXME: actually build body.
keepalives: None,
};
Block {
hdr: hdr,
content: Some(ctn),
control_flow: Default::default(),
}
}
}
......@@ -150,3 +150,75 @@ fn test_consts_loading() {
}
}
#[test]
#[allow(unused_variables)]
fn test_function_loading() {
let mut csp: CStringPool = Default::default();
unsafe {
simple_logger::init_with_level(log::LogLevel::Trace).ok();
info!("Starting micro VM...");
let mvm = mu_fastimpl_new();
let ctx = ((*mvm).new_context)(mvm);
let b = ((*ctx).new_ir_builder)(ctx);
let id1 = ((*b).gen_sym)(b, csp.get("@i32"));
let id2 = ((*b).gen_sym)(b, csp.get("@i64"));
let id3 = ((*b).gen_sym)(b, csp.get("@sig"));
let id4 = ((*b).gen_sym)(b, csp.get("@func"));
((*b).new_type_int)(b, id1, 32);
((*b).new_type_int)(b, id2, 64);
let mut ptys = vec![id1];
let mut rtys = vec![id2];
((*b).new_funcsig)(b, id3,
ptys.as_mut_ptr(), ptys.len(),
rtys.as_mut_ptr(), rtys.len());
((*b).new_func)(b, id4, id3);
let id5 = ((*b).gen_sym)(b, csp.get("@func.v1"));
let id6 = ((*b).gen_sym)(b, csp.get("@func.v1.entry"));
let id7 = ((*b).gen_sym)(b, csp.get("@func.v1.entry.x"));
let id8 = ((*b).gen_sym)(b, csp.get("@func.v1.bb1"));
let id9 = ((*b).gen_sym)(b, csp.get("@func.v1.bb1.exc"));
//let id4 = ((*b).gen_sym)(b, csp.get("@func"));
let mut bbs = vec![id6, id8];
((*b).new_func_ver)(b, id5, id4, bbs.as_mut_ptr(), bbs.len());
{
let mut args = vec![id7];
let mut argtys = vec![id1];
let mut insts = vec![];
((*b).new_bb)(b, id6,
args.as_mut_ptr(), argtys.as_mut_ptr(), args.len(),
0,
insts.as_mut_ptr(), insts.len());
}
{
let mut args = vec![];
let mut argtys = vec![];
let mut insts = vec![];
((*b).new_bb)(b, id8,
args.as_mut_ptr(), argtys.as_mut_ptr(), args.len(),
id9,
insts.as_mut_ptr(), insts.len());
}
((*b).load)(b);
((*ctx).close_context)(ctx);
info!("Finished.");
}
}
......@@ -5,10 +5,10 @@ use std::sync::atomic::Ordering;
const OBJECT_SIZE : usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD : usize = 10000000;
const WORK_LOAD : usize = 5000000;
const IMMIX_SPACE_SIZE : usize = 40 << 20;
const LO_SPACE_SIZE : usize = 40 << 20;
const IMMIX_SPACE_SIZE : usize = 20 << 20;
const LO_SPACE_SIZE : usize = 20 << 20;
#[test]
fn test_gc_no_alive() {
......
......@@ -7,7 +7,7 @@ use std::sync::atomic::Ordering;
const OBJECT_SIZE : usize = 24;
const OBJECT_ALIGN: usize = 8;
const WORK_LOAD : usize = 250000;
const WORK_LOAD : usize = 10000;
const IMMIX_SPACE_SIZE : usize = 500 << 20;
const LO_SPACE_SIZE : usize = 500 << 20;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment