Commit 5d126206 authored by John Zhang's avatar John Zhang

get master update

parents df046e4d 4a9a7ff9
......@@ -7,6 +7,11 @@ build = "build.rs"
[lib]
crate-type = ["staticlib", "rlib"]
[features]
default = ["aot"]
aot = []
jit = []
[build-dependencies]
gcc = "0.3"
......@@ -24,4 +29,5 @@ hprof = "0.1.3"
memmap = "0.4.0"
memsec = "0.1.9"
rustc-serialize = "*"
time = "0.1.34"
\ No newline at end of file
time = "0.1.34"
maplit = "0.1.4"
\ No newline at end of file
......@@ -11,7 +11,7 @@
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Cargo &lt;mu&gt;" level="project" />
<orderEntry type="library" name="Rust &lt;mu&gt;" level="project" />
<orderEntry type="library" name="Cargo &lt;mu&gt;" level="project" />
</component>
</module>
\ No newline at end of file
......@@ -102,6 +102,7 @@ pub struct MuFunctionVersion {
pub func_id: MuID,
pub sig: P<MuFuncSig>,
pub original_ir: Option<FunctionContent>,
pub content: Option<FunctionContent>,
pub context: FunctionContext,
......@@ -138,26 +139,31 @@ impl MuFunctionVersion {
hdr: MuEntityHeader::unnamed(id),
func_id: func,
sig: sig,
original_ir: None,
content: None,
context: FunctionContext::new(),
block_trace: None}
block_trace: None
}
}
pub fn define(&mut self, content: FunctionContent) {
self.content = Some(content)
self.original_ir = Some(content.clone());
self.content = Some(content);
}
pub fn new_ssa(&mut self, id: MuID, ty: P<MuType>) -> P<TreeNode> {
self.context.values.insert(id, SSAVarEntry::new(id, ty.clone()));
let val = P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
});
self.context.values.insert(id, SSAVarEntry::new(val.clone()));
P(TreeNode {
hdr: MuEntityHeader::unnamed(id),
op: pick_op_code_for_ssa(&ty),
v: TreeNode_::Value(P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
}))
op: pick_op_code_for_ssa(&val.ty),
v: TreeNode_::Value(val)
})
}
......@@ -186,7 +192,7 @@ impl MuFunctionVersion {
}
}
#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable, RustcDecodable, Clone)]
pub struct FunctionContent {
pub entry: MuID,
pub blocks: HashMap<MuID, Block>
......@@ -245,16 +251,18 @@ impl FunctionContext {
}
pub fn make_temporary(&mut self, id: MuID, ty: P<MuType>) -> P<TreeNode> {
self.values.insert(id, SSAVarEntry::new(id, ty.clone()));
let val = P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
});
self.values.insert(id, SSAVarEntry::new(val.clone()));
P(TreeNode {
hdr: MuEntityHeader::unnamed(id),
op: pick_op_code_for_ssa(&ty),
v: TreeNode_::Value(P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
}))
op: pick_op_code_for_ssa(&val.ty),
v: TreeNode_::Value(val)
})
}
......@@ -267,7 +275,7 @@ impl FunctionContext {
}
}
#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable, RustcDecodable, Clone)]
pub struct Block {
pub hdr: MuEntityHeader,
pub content: Option<BlockContent>,
......@@ -298,7 +306,7 @@ impl Block {
}
}
#[derive(Debug, RustcEncodable, RustcDecodable)]
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
pub struct ControlFlow {
pub preds : Vec<MuID>,
pub succs : Vec<BlockEdge>
......@@ -356,7 +364,7 @@ pub enum EdgeKind {
Forward, Backward
}
#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable, RustcDecodable, Clone)]
pub struct BlockContent {
pub args: Vec<P<Value>>,
pub exn_arg: Option<P<Value>>,
......@@ -447,7 +455,7 @@ impl BlockContent {
}
}
#[derive(Debug, RustcEncodable, RustcDecodable)]
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
/// always use with P<TreeNode>
pub struct TreeNode {
pub hdr: MuEntityHeader,
......@@ -511,7 +519,7 @@ impl fmt::Display for TreeNode {
}
}
#[derive(Debug, RustcEncodable, RustcDecodable)]
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
pub enum TreeNode_ {
Value(P<Value>),
Instruction(Instruction)
......@@ -629,25 +637,23 @@ pub enum Value_ {
#[derive(Debug)]
pub struct SSAVarEntry {
id: MuID,
pub ty: P<MuType>,
val: P<Value>,
// how many times this entry is used
// availalbe after DefUse pass
pub use_count: AtomicUsize,
use_count: AtomicUsize,
// this field is only used during TreeGeneration pass
pub expr: Option<Instruction>
expr: Option<Instruction>
}
impl Encodable for SSAVarEntry {
fn encode<S: Encoder> (&self, s: &mut S) -> Result<(), S::Error> {
s.emit_struct("SSAVarEntry", 4, |s| {
try!(s.emit_struct_field("id", 0, |s| self.id.encode(s)));
try!(s.emit_struct_field("ty", 1, |s| self.ty.encode(s)));
s.emit_struct("SSAVarEntry", 3, |s| {
try!(s.emit_struct_field("val", 0, |s| self.val.encode(s)));
let count = self.use_count.load(Ordering::SeqCst);
try!(s.emit_struct_field("use_count", 2, |s| s.emit_usize(count)));
try!(s.emit_struct_field("expr", 3, |s| self.expr.encode(s)));
try!(s.emit_struct_field("use_count", 1, |s| s.emit_usize(count)));
try!(s.emit_struct_field("expr", 2, |s| self.expr.encode(s)));
Ok(())
})
}
......@@ -655,15 +661,13 @@ impl Encodable for SSAVarEntry {
impl Decodable for SSAVarEntry {
fn decode<D: Decoder>(d: &mut D) -> Result<SSAVarEntry, D::Error> {
d.read_struct("SSAVarEntry", 4, |d| {
let id = try!(d.read_struct_field("id", 0, |d| Decodable::decode(d)));
let ty = try!(d.read_struct_field("ty", 1, |d| Decodable::decode(d)));
let count = try!(d.read_struct_field("use_count", 2, |d| d.read_usize()));
let expr = try!(d.read_struct_field("expr", 3, |d| Decodable::decode(d)));
d.read_struct("SSAVarEntry", 3, |d| {
let val = try!(d.read_struct_field("val", 0, |d| Decodable::decode(d)));
let count = try!(d.read_struct_field("use_count", 1, |d| d.read_usize()));
let expr = try!(d.read_struct_field("expr", 2, |d| Decodable::decode(d)));
let ret = SSAVarEntry {
id: id,
ty: ty,
val: val,
use_count: ATOMIC_USIZE_INIT,
expr: expr
};
......@@ -676,10 +680,9 @@ impl Decodable for SSAVarEntry {
}
impl SSAVarEntry {
pub fn new(id: MuID, ty: P<MuType>) -> SSAVarEntry {
pub fn new(val: P<Value>) -> SSAVarEntry {
let ret = SSAVarEntry {
id: id,
ty: ty,
val: val,
use_count: ATOMIC_USIZE_INIT,
expr: None
};
......@@ -688,14 +691,37 @@ impl SSAVarEntry {
ret
}
pub fn ty(&self) -> &P<MuType> {
&self.val.ty
}
pub fn value(&self) -> &P<Value> {
&self.val
}
pub fn use_count(&self) -> usize {
self.use_count.load(Ordering::SeqCst)
}
pub fn increase_use_count(&self) {
self.use_count.fetch_add(1, Ordering::SeqCst);
}
pub fn has_expr(&self) -> bool {
self.expr.is_some()
}
pub fn assign_expr(&mut self, expr: Instruction) {
self.expr = Some(expr)
}
pub fn take_expr(&mut self) -> Instruction {
debug_assert!(self.has_expr());
self.expr.take().unwrap()
}
}
impl fmt::Display for SSAVarEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} #{}", self.ty, self.id)
write!(f, "{}", self.val)
}
}
......@@ -775,12 +801,21 @@ impl fmt::Display for MemoryLocation {
}
#[repr(C)]
#[derive(Debug)] // Display, PartialEq
#[derive(Debug)] // Display, PartialEq, Clone
pub struct MuEntityHeader {
pub id: MuID,
pub name: RwLock<Option<MuName>>
}
impl Clone for MuEntityHeader {
fn clone(&self) -> Self {
MuEntityHeader {
id: self.id,
name: RwLock::new(self.name.read().unwrap().clone())
}
}
}
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
impl Encodable for MuEntityHeader {
fn encode<S: Encoder> (&self, s: &mut S) -> Result<(), S::Error> {
......
......@@ -3,10 +3,15 @@ use ast::ir::*;
use runtime::ValueLocation;
use compiler::machine_code::MachineCode;
use compiler::backend::x86_64::ASMCodeGen;
pub trait CodeGenerator {
fn start_code(&mut self, func_name: MuName) -> ValueLocation;
fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode + Sync + Send>, ValueLocation);
// generate unnamed sequence of linear code (no branch)
fn start_code_sequence(&mut self);
fn finish_code_sequence(&mut self) -> Box<MachineCode + Sync + Send>;
fn print_cur_code(&self);
......@@ -65,4 +70,4 @@ pub trait CodeGenerator {
fn emit_push_r64(&mut self, src: &P<Value>);
fn emit_push_imm32(&mut self, src: i32);
fn emit_pop_r64(&mut self, dest: &P<Value>);
}
}
\ No newline at end of file
......@@ -37,6 +37,7 @@ pub struct InstructionSelection {
}
impl <'a> InstructionSelection {
#[cfg(feature = "aot")]
pub fn new() -> InstructionSelection {
InstructionSelection{
name: "Instruction Selection (x64)",
......@@ -51,6 +52,11 @@ impl <'a> InstructionSelection {
current_exn_blocks: HashMap::new()
}
}
#[cfg(feature = "jit")]
pub fn new() -> InstructionSelection {
unimplemented!()
}
// in this pass, we assume that
// 1. all temporaries will use 64bit registers
......
......@@ -10,6 +10,8 @@ mod asm_backend;
pub use compiler::backend::x86_64::asm_backend::ASMCodeGen;
pub use compiler::backend::x86_64::asm_backend::emit_code;
pub use compiler::backend::x86_64::asm_backend::emit_context;
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::spill_rewrite;
use ast::ptr::P;
use ast::ir::*;
......@@ -85,6 +87,18 @@ lazy_static! {
R14.clone(),
R15.clone()
];
pub static ref CALLER_SAVED_GPRs : [P<Value>; 9] = [
RAX.clone(),
RCX.clone(),
RDX.clone(),
RSI.clone(),
RDI.clone(),
R8.clone(),
R9.clone(),
R10.clone(),
R11.clone()
];
pub static ref ALL_GPRs : [P<Value>; 15] = [
RAX.clone(),
......@@ -139,6 +153,25 @@ lazy_static!{
];
pub static ref CALLEE_SAVED_FPRs : [P<Value>; 0] = [];
pub static ref CALLER_SAVED_FPRs : [P<Value>; 16] = [
XMM0.clone(),
XMM1.clone(),
XMM2.clone(),
XMM3.clone(),
XMM4.clone(),
XMM5.clone(),
XMM6.clone(),
XMM7.clone(),
XMM8.clone(),
XMM9.clone(),
XMM10.clone(),
XMM11.clone(),
XMM12.clone(),
XMM13.clone(),
XMM14.clone(),
XMM15.clone(),
];
pub static ref ALL_FPRs : [P<Value>; 16] = [
XMM0.clone(),
......@@ -242,7 +275,7 @@ lazy_static! {
pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
for reg in ALL_MACHINE_REGs.values() {
let reg_id = reg.extract_ssa_id().unwrap();
let entry = SSAVarEntry::new(reg_id, reg.ty.clone());
let entry = SSAVarEntry::new(reg.clone());
func_context.values.insert(reg_id, entry);
}
......
......@@ -36,6 +36,8 @@ pub use compiler::backend::x86_64::is_callee_saved;
pub use compiler::backend::x86_64::emit_code;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::emit_context;
#[cfg(target_arch = "x86_64")]
pub use compiler::backend::x86_64::spill_rewrite;
// ARM
......
......@@ -20,7 +20,7 @@ impl PeepholeOptimization {
let src : MuID = {
let uses = cf.mc().get_inst_reg_uses(inst);
if uses.len() != 1 {
if uses.len() == 0 {
// moving immediate to register, its not redundant
return;
}
......
......@@ -55,7 +55,7 @@ impl InterferenceGraph {
// add node property
let group = {
let ref ty = entry.ty;
let ref ty = entry.ty();
if types::is_scalar(ty) {
if types::is_fp(ty) {
backend::RegGroup::FPR
......@@ -284,7 +284,7 @@ fn build_live_set(cf: &mut CompiledFunction, func: &MuFunctionVersion) {
// (2) diff = out[n] - def[n]
let mut diff = liveout[n].to_vec();
for def in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def);
vec_utils::remove_value(&mut diff, def);
}
// (3) in[n] = in[n] + diff
vec_utils::append_unique(&mut in_set_new, &mut diff);
......@@ -338,12 +338,10 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// Initialize and creates nodes for all the involved temps/regs
for i in 0..cf.mc().number_of_insts() {
for reg_id in cf.mc().get_inst_reg_defines(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
}
for reg_id in cf.mc().get_inst_reg_uses(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
}
}
......@@ -397,7 +395,7 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// creating nodes if necessary
for e in current_live.iter() {
if src.is_none() || (src.is_some() && *e != src.unwrap()) {
let from = ig.get_node(*d);
let from = ig.get_node(d);
let to = ig.get_node(*e);
if !ig.is_same_node(from, to) && !ig.is_adj(from, to) {
......@@ -415,13 +413,13 @@ pub fn build_chaitin_briggs (cf: &mut CompiledFunction, func: &MuFunctionVersion
// for every definition D in I
for d in cf.mc().get_inst_reg_defines(i) {
// remove D from Current_Live
current_live.remove(d);
current_live.remove(&d);
}
// for every use U in I
for u in cf.mc().get_inst_reg_uses(i) {
// add U to Current_live
current_live.insert(*u);
current_live.insert(u);
}
}
}
......@@ -454,12 +452,10 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
let ref mut in_set = live_in[i];
for reg_id in cf.mc().get_inst_reg_defines(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
}
for reg_id in cf.mc().get_inst_reg_uses(i) {
let reg_id = *reg_id;
ig.new_node(reg_id, &func.context);
in_set.push(reg_id);
......@@ -487,8 +483,8 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
// in = use(i.e. live_in) + (out - def)
let mut diff = out_set.clone();
for def in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(&mut diff, *def);
trace!("removing def: {}", *def);
vec_utils::remove_value(&mut diff, def);
trace!("removing def: {}", def);
trace!("diff = {:?}", diff);
}
trace!("out - def = {:?}", diff);
......@@ -545,7 +541,7 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
for d in cf.mc().get_inst_reg_defines(n) {
for t in live.iter() {
if src.is_none() || (src.is_some() && *t != src.unwrap()) {
let from = ig.get_node(*d);
let from = ig.get_node(d);
let to = ig.get_node(*t);
if !ig.is_same_node(from, to) && !ig.is_adj(from, to) {
......@@ -561,11 +557,11 @@ pub fn build (cf: &CompiledFunction, func: &MuFunctionVersion) -> InterferenceGr
}
for d in cf.mc().get_inst_reg_defines(n) {
vec_utils::remove_value(live, *d);
vec_utils::remove_value(live, d);
}
for u in cf.mc().get_inst_reg_uses(n) {
live.push(*u);
live.push(u);
}
}
......
#![allow(dead_code)]
use ast::ir::*;
use ast::ptr::*;
use vm::VM;
use compiler;
use compiler::CompilerPass;
use compiler::machine_code::CompiledFunction;
use compiler::PassExecutionResult;
use ast::ir::*;
use vm::VM;
use compiler::backend::init_machine_regs_for_func;
use compiler::backend;
use std::collections::HashMap;
mod graph_coloring;
......@@ -49,9 +53,22 @@ impl RegisterAllocation {
let spills = coloring.spills();
if !spills.is_empty() {
unimplemented!();
let mut spilled_mem = HashMap::new();
// return Err(RegAllocFailure::FailedForSpilling);
// allocating frame slots for every spilled temp
for reg_id in spills.iter() {
let ssa_entry = match func.context.get_value(*reg_id) {
Some(entry) => entry,
None => panic!("The spilled register {} is not in func context", reg_id)
};
let mem = cf.frame.alloc_slot_for_spilling(ssa_entry.value().clone(), vm);
spilled_mem.insert(*reg_id, mem);
}
backend::spill_rewrite(&spilled_mem, func, &mut cf, vm);
return Err(RegAllocFailure::FailedForSpilling);
}
// replace regs
......@@ -76,7 +93,7 @@ impl RegisterAllocation {
cf.mc().trace_mc();
Ok(())
}
}
}
impl CompilerPass for RegisterAllocation {
......
......@@ -10,7 +10,7 @@ use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
pub struct CompiledFunction {
pub func_id: MuID,
pub func_ver_id: MuID,
pub temps: HashMap<MuID, MuID>, // assumes one temperary maps to one register
pub temps: HashMap<MuID, MuID>, // assumes one temporary maps to one register
// not emitting this
pub mc: Option<Box<MachineCode + Send + Sync>>,
......@@ -91,6 +91,8 @@ impl CompiledFunction {
}
}
use std::any::Any;
pub trait MachineCode {
fn trace_mc(&self);
fn trace_inst(&self, index: usize);
......@@ -105,8 +107,8 @@ pub trait MachineCode {
fn get_succs(&self, index: usize) -> &Vec<usize>;
fn get_preds(&self, index: usize) -> &Vec<usize>;
fn get_inst_reg_uses(&self, index: usize) -> &Vec<MuID>;
fn get_inst_reg_defines(&self, index: usize) -> &Vec<MuID>;
fn get_inst_reg_uses(&self, index: usize) -> Vec<MuID>;
fn get_inst_reg_defines(&self, index: usize) -> Vec<MuID>;
fn get_ir_block_livein(&self, block: &str) -> Option<&Vec<MuID>>;
fn get_ir_block_liveout(&self, block: &str) -> Option<&Vec<MuID>>;
......@@ -115,7 +117,17 @@ pub trait MachineCode {
fn get_all_blocks(&self) -> &Vec<MuName>;
fn get_block_range(&self, block: &str) -> Option<ops::Range<usize>>;
// functions for rewrite
/// replace a temp with a machine register (to_reg must be a machine register)
fn replace_reg(&mut self, from: MuID, to: MuID);
/// replace a temp with another temp
fn replace_tmp_for_inst(&mut self, from: MuID, to: MuID, inst: usize);
fn set_inst_nop(&mut self, index: usize);
fn as_any(&self) -> &Any;
}
pub trait MachineInst {
}
\ No newline at end of file
......@@ -28,7 +28,7 @@ fn use_value(val: &P<Value>, func_context: &mut FunctionContext) {
match val.v {
Value_::SSAVar(ref id) => {
let entry = func_context.values.get_mut(id).unwrap();
entry.use_count.fetch_add(1, Ordering::SeqCst);
entry.increase_use_count();
},
_ => {} // dont worry about constants
}
......@@ -68,7 +68,7 @@ impl CompilerPass for DefUse {
debug!("check use count for variables");
for entry in func.context.values.values() {
debug!("{}: {}", entry, entry.use_count.load(Ordering::SeqCst))
debug!("{}: {}", entry, entry.use_count())
}
}
}
......@@ -57,9 +57,9 @@ impl CompilerPass for TreeGen {
if possible_ssa_id.is_some() {
let entry_value = context.get_value_mut(possible_ssa_id.unwrap()).unwrap();
if entry_value.expr.is_some() {
if entry_value.has_expr() {
// replace the node with its expr
let expr = entry_value.expr.take().unwrap();
let expr = entry_value.take_expr();
trace!("{} replaced by {}", ops[index], expr);
ops[index] = TreeNode::new_inst(vm.next_id(), expr);
......@@ -79,9 +79,9 @@ impl CompilerPass for TreeGen {
// we can put the expression as a child node to its use
if left.len() == 1 {
let lhs = context.get_value_mut(left[0].extract_ssa_id().unwrap()).unwrap();
if lhs.use_count.load(Ordering::SeqCst) == 1{
if lhs.use_count() == 1{
if is_movable(&inst.v) {
lhs.expr = Some(inst.clone()); // FIXME: should be able to move the inst here
lhs.assign_expr(inst.clone()); // FIXME: should be able to move the inst here
trace!("yes");
trace!("");
......
......@@ -4,6 +4,8 @@ extern crate lazy_static;
extern crate log;
extern crate rustc_serialize;
extern crate simple_logger;
#[macro_use]
extern crate maplit;
#[macro_use]
pub extern crate ast;
......@@ -11,4 +13,4 @@ pub extern crate ast;
pub extern crate utils;
pub mod vm;
pub mod compiler;
pub mod runtime;
pub mod runtime;
use std::fmt;
pub fn is_identical_to_str_ignore_order<T: Ord + fmt::Display + Clone, Q: Ord + fmt::Display + Clone> (vec: &Vec<T>, mut expect: Vec<Q>) -> bool {
let mut vec_copy = vec.to_vec();
vec_copy.sort();
expect.sort();
let a = as_str(&vec_copy);
let b = as_str(&expect);
a == b
use std::fmt;
pub fn is_identical_to_str_ignore_order<T: Ord + fmt::Display + Clone, Q: Ord + fmt::Display + Clone> (vec: &Vec<T>, mut expect: Vec<Q>) -> bool {
let mut vec_copy = vec.to_vec();
vec_copy.sort();
expect.sort();
let a = as_str(&vec_copy);
let b = as_str(&expect);
a == b
}
pub fn is_identical_ignore_order<T: Ord + Clone> (vec: &Vec<T>, vec2: &Vec<T>) -> bool {
if vec.len() != vec2.len() {
return false;
}
pub fn is_identical_ignore_order<T: Ord + Clone> (vec: &Vec<T>, vec2: &Vec<T>) -> bool {
if vec.len() != vec2.len() {
let mut vec = vec.to_vec();
let mut vec2 = vec2.to_vec();
vec.sort();
vec2.sort();
for i in 0..vec.len() {
if vec[i] != vec2[i] {
return false;
}
let mut vec = vec.to_vec();
let mut vec2 = vec2.to_vec();