GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 855383c3 authored by qinsoon's avatar qinsoon

[wip] some refactoring

parent ac147763
......@@ -7,6 +7,11 @@ build = "build.rs"
[lib]
crate-type = ["staticlib", "rlib"]
[features]
default = ["aot"]
aot = []
jit = []
[build-dependencies]
gcc = "0.3"
......
......@@ -11,7 +11,7 @@
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Cargo &lt;mu&gt;" level="project" />
<orderEntry type="library" name="Rust &lt;mu&gt;" level="project" />
<orderEntry type="library" name="Cargo &lt;mu&gt;" level="project" />
</component>
</module>
\ No newline at end of file
......@@ -102,6 +102,7 @@ pub struct MuFunctionVersion {
pub func_id: MuID,
pub sig: P<MuFuncSig>,
pub original_ir: Option<FunctionContent>,
pub content: Option<FunctionContent>,
pub context: FunctionContext,
......@@ -138,26 +139,31 @@ impl MuFunctionVersion {
hdr: MuEntityHeader::unnamed(id),
func_id: func,
sig: sig,
original_ir: None,
content: None,
context: FunctionContext::new(),
block_trace: None}
block_trace: None
}
}
pub fn define(&mut self, content: FunctionContent) {
self.content = Some(content)
self.original_ir = Some(content.clone());
self.content = Some(content);
}
pub fn new_ssa(&mut self, id: MuID, ty: P<MuType>) -> P<TreeNode> {
self.context.values.insert(id, SSAVarEntry::new(id, ty.clone()));
let val = P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
});
self.context.values.insert(id, SSAVarEntry::new(val.clone()));
P(TreeNode {
hdr: MuEntityHeader::unnamed(id),
op: pick_op_code_for_ssa(&ty),
v: TreeNode_::Value(P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
}))
op: pick_op_code_for_ssa(&val.ty),
v: TreeNode_::Value(val)
})
}
......@@ -186,7 +192,7 @@ impl MuFunctionVersion {
}
}
#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable, RustcDecodable, Clone)]
pub struct FunctionContent {
pub entry: MuID,
pub blocks: HashMap<MuID, Block>
......@@ -245,16 +251,18 @@ impl FunctionContext {
}
pub fn make_temporary(&mut self, id: MuID, ty: P<MuType>) -> P<TreeNode> {
self.values.insert(id, SSAVarEntry::new(id, ty.clone()));
let val = P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
});
self.values.insert(id, SSAVarEntry::new(val.clone()));
P(TreeNode {
hdr: MuEntityHeader::unnamed(id),
op: pick_op_code_for_ssa(&ty),
v: TreeNode_::Value(P(Value{
hdr: MuEntityHeader::unnamed(id),
ty: ty,
v: Value_::SSAVar(id)
}))
op: pick_op_code_for_ssa(&val.ty),
v: TreeNode_::Value(val)
})
}
......@@ -267,7 +275,7 @@ impl FunctionContext {
}
}
#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable, RustcDecodable, Clone)]
pub struct Block {
pub hdr: MuEntityHeader,
pub content: Option<BlockContent>,
......@@ -298,7 +306,7 @@ impl Block {
}
}
#[derive(Debug, RustcEncodable, RustcDecodable)]
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
pub struct ControlFlow {
pub preds : Vec<MuID>,
pub succs : Vec<BlockEdge>
......@@ -356,7 +364,7 @@ pub enum EdgeKind {
Forward, Backward
}
#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable, RustcDecodable, Clone)]
pub struct BlockContent {
pub args: Vec<P<Value>>,
pub exn_arg: Option<P<Value>>,
......@@ -447,7 +455,7 @@ impl BlockContent {
}
}
#[derive(Debug, RustcEncodable, RustcDecodable)]
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
/// always use with P<TreeNode>
pub struct TreeNode {
pub hdr: MuEntityHeader,
......@@ -511,7 +519,7 @@ impl fmt::Display for TreeNode {
}
}
#[derive(Debug, RustcEncodable, RustcDecodable)]
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
pub enum TreeNode_ {
Value(P<Value>),
Instruction(Instruction)
......@@ -629,25 +637,23 @@ pub enum Value_ {
#[derive(Debug)]
pub struct SSAVarEntry {
id: MuID,
pub ty: P<MuType>,
val: P<Value>,
// how many times this entry is used
// availalbe after DefUse pass
pub use_count: AtomicUsize,
use_count: AtomicUsize,
// this field is only used during TreeGeneration pass
pub expr: Option<Instruction>
expr: Option<Instruction>
}
impl Encodable for SSAVarEntry {
fn encode<S: Encoder> (&self, s: &mut S) -> Result<(), S::Error> {
s.emit_struct("SSAVarEntry", 4, |s| {
try!(s.emit_struct_field("id", 0, |s| self.id.encode(s)));
try!(s.emit_struct_field("ty", 1, |s| self.ty.encode(s)));
s.emit_struct("SSAVarEntry", 3, |s| {
try!(s.emit_struct_field("val", 0, |s| self.val.encode(s)));
let count = self.use_count.load(Ordering::SeqCst);
try!(s.emit_struct_field("use_count", 2, |s| s.emit_usize(count)));
try!(s.emit_struct_field("expr", 3, |s| self.expr.encode(s)));
try!(s.emit_struct_field("use_count", 1, |s| s.emit_usize(count)));
try!(s.emit_struct_field("expr", 2, |s| self.expr.encode(s)));
Ok(())
})
}
......@@ -655,15 +661,13 @@ impl Encodable for SSAVarEntry {
impl Decodable for SSAVarEntry {
fn decode<D: Decoder>(d: &mut D) -> Result<SSAVarEntry, D::Error> {
d.read_struct("SSAVarEntry", 4, |d| {
let id = try!(d.read_struct_field("id", 0, |d| Decodable::decode(d)));
let ty = try!(d.read_struct_field("ty", 1, |d| Decodable::decode(d)));
let count = try!(d.read_struct_field("use_count", 2, |d| d.read_usize()));
let expr = try!(d.read_struct_field("expr", 3, |d| Decodable::decode(d)));
d.read_struct("SSAVarEntry", 3, |d| {
let val = try!(d.read_struct_field("val", 0, |d| Decodable::decode(d)));
let count = try!(d.read_struct_field("use_count", 1, |d| d.read_usize()));
let expr = try!(d.read_struct_field("expr", 2, |d| Decodable::decode(d)));
let ret = SSAVarEntry {
id: id,
ty: ty,
val: val,
use_count: ATOMIC_USIZE_INIT,
expr: expr
};
......@@ -676,10 +680,9 @@ impl Decodable for SSAVarEntry {
}
impl SSAVarEntry {
pub fn new(id: MuID, ty: P<MuType>) -> SSAVarEntry {
pub fn new(val: P<Value>) -> SSAVarEntry {
let ret = SSAVarEntry {
id: id,
ty: ty,
val: val,
use_count: ATOMIC_USIZE_INIT,
expr: None
};
......@@ -688,14 +691,37 @@ impl SSAVarEntry {
ret
}
pub fn ty(&self) -> &P<MuType> {
&self.val.ty
}
pub fn value(&self) -> &P<Value> {
&self.val
}
pub fn use_count(&self) -> usize {
self.use_count.load(Ordering::SeqCst)
}
pub fn increase_use_count(&self) {
self.use_count.fetch_add(1, Ordering::SeqCst);
}
pub fn has_expr(&self) -> bool {
self.expr.is_some()
}
pub fn assign_expr(&mut self, expr: Instruction) {
self.expr = Some(expr)
}
pub fn take_expr(&mut self) -> Instruction {
debug_assert!(self.has_expr());
self.expr.take().unwrap()
}
}
impl fmt::Display for SSAVarEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} #{}", self.ty, self.id)
write!(f, "{}", self.val)
}
}
......@@ -775,12 +801,21 @@ impl fmt::Display for MemoryLocation {
}
#[repr(C)]
#[derive(Debug)] // Display, PartialEq
#[derive(Debug)] // Display, PartialEq, Clone
pub struct MuEntityHeader {
pub id: MuID,
pub name: RwLock<Option<MuName>>
}
impl Clone for MuEntityHeader {
fn clone(&self) -> Self {
MuEntityHeader {
id: self.id,
name: RwLock::new(self.name.read().unwrap().clone())
}
}
}
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
impl Encodable for MuEntityHeader {
fn encode<S: Encoder> (&self, s: &mut S) -> Result<(), S::Error> {
......
......@@ -11,6 +11,7 @@ use compiler::machine_code::MachineCode;
use vm::VM;
use runtime::ValueLocation;
use utils::vec_utils;
use utils::string_utils;
use ast::ptr::P;
......@@ -112,6 +113,9 @@ impl MachineCode for ASMCode {
}
fn set_inst_nop(&mut self, index: usize) {
// FIXME: need to make sure it is fine that
// we do not update any information about this instruction
// e.g. uses, defines, etc.
self.code.remove(index);
self.code.insert(index, ASM::nop());
}
......@@ -286,11 +290,16 @@ impl ASMCodeGen {
}
fn add_asm_call(&mut self, code: String) {
// a call instruction will use all the argument registers
let mut uses : Vec<MuID> = self.prepare_machine_regs(x86_64::ARGUMENT_GPRs.iter());
uses.append(&mut self.prepare_machine_regs(x86_64::ARGUMENT_FPRs.iter()));
// defines: return registers
let mut defines : Vec<MuID> = self.prepare_machine_regs(x86_64::RETURN_GPRs.iter());
defines.append(&mut self.prepare_machine_regs(x86_64::RETURN_FPRs.iter()));
// defines: caller saved registers
vec_utils::append_unique(&mut defines, &mut self.prepare_machine_regs(x86_64::CALLER_SAVED_GPRs.iter()));
vec_utils::append_unique(&mut defines, &mut self.prepare_machine_regs(x86_64::CALLER_SAVED_FPRs.iter()));
self.add_asm_inst(code, defines, vec![], uses, vec![], false);
}
......
......@@ -37,6 +37,7 @@ pub struct InstructionSelection {
}
impl <'a> InstructionSelection {
#[cfg(feature = "aot")]
pub fn new() -> InstructionSelection {
InstructionSelection{
name: "Instruction Selection (x64)",
......@@ -51,6 +52,11 @@ impl <'a> InstructionSelection {
current_exn_blocks: HashMap::new()
}
}
#[cfg(feature = "jit")]
pub fn new() -> InstructionSelection {
unimplemented!()
}
// in this pass, we assume that
// 1. all temporaries will use 64bit registers
......
......@@ -85,6 +85,18 @@ lazy_static! {
R14.clone(),
R15.clone()
];
pub static ref CALLER_SAVED_GPRs : [P<Value>; 9] = [
RAX.clone(),
RCX.clone(),
RDX.clone(),
RSI.clone(),
RDI.clone(),
R8.clone(),
R9.clone(),
R10.clone(),
R11.clone()
];
pub static ref ALL_GPRs : [P<Value>; 15] = [
RAX.clone(),
......@@ -139,6 +151,25 @@ lazy_static!{
];
pub static ref CALLEE_SAVED_FPRs : [P<Value>; 0] = [];
pub static ref CALLER_SAVED_FPRs : [P<Value>; 16] = [
XMM0.clone(),
XMM1.clone(),
XMM2.clone(),
XMM3.clone(),
XMM4.clone(),
XMM5.clone(),
XMM6.clone(),
XMM7.clone(),
XMM8.clone(),
XMM9.clone(),
XMM10.clone(),
XMM11.clone(),
XMM12.clone(),
XMM13.clone(),
XMM14.clone(),
XMM15.clone(),
];
pub static ref ALL_FPRs : [P<Value>; 16] = [
XMM0.clone(),
......@@ -242,7 +273,7 @@ lazy_static! {
pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
for reg in ALL_MACHINE_REGs.values() {
let reg_id = reg.extract_ssa_id().unwrap();
let entry = SSAVarEntry::new(reg_id, reg.ty.clone());
let entry = SSAVarEntry::new(reg.clone());
func_context.values.insert(reg_id, entry);
}
......
......@@ -20,7 +20,7 @@ impl PeepholeOptimization {
let src : MuID = {
let uses = cf.mc().get_inst_reg_uses(inst);
if uses.len() != 1 {
if uses.len() == 0 {
// moving immediate to register, its not redundant
return;
}
......
......@@ -55,7 +55,7 @@ impl InterferenceGraph {
// add node property
let group = {
let ref ty = entry.ty;
let ref ty = entry.ty();
if types::is_scalar(ty) {
if types::is_fp(ty) {
backend::RegGroup::FPR
......
#![allow(dead_code)]
use ast::ir::*;
use ast::ptr::*;
use vm::VM;
use compiler;
use compiler::CompilerPass;
use compiler::machine_code::CompiledFunction;
use compiler::PassExecutionResult;
use ast::ir::*;
use vm::VM;
use compiler::backend::init_machine_regs_for_func;
use std::collections::HashMap;
mod graph_coloring;
pub enum RegAllocFailure {
......@@ -49,9 +52,22 @@ impl RegisterAllocation {
let spills = coloring.spills();
if !spills.is_empty() {
unimplemented!();
let mut spilled_mem = HashMap::new();
// allocating frame slots for every spilled temp
for reg_id in spills.iter() {
let ssa_entry = match func.context.get_value(*reg_id) {
Some(entry) => entry,
None => panic!("The spilled register {} is not in func context", reg_id)
};
let mem = cf.frame.alloc_slot_for_spilling(ssa_entry.value().clone(), vm);
// return Err(RegAllocFailure::FailedForSpilling);
spilled_mem.insert(*reg_id, mem);
}
self.spill_rewrite(&spilled_mem, func, &mut cf, vm);
return Err(RegAllocFailure::FailedForSpilling);
}
// replace regs
......@@ -76,7 +92,18 @@ impl RegisterAllocation {
cf.mc().trace_mc();
Ok(())
}
}
#[cfg(feature = "aot")]
fn spill_rewrite(
&mut self,
spills: &HashMap<MuID, P<Value>>,
func: &mut MuFunctionVersion,
compiled_func: &mut CompiledFunction,
vm: &VM)
{
unimplemented!()
}
}
impl CompilerPass for RegisterAllocation {
......
......@@ -115,7 +115,8 @@ pub trait MachineCode {
fn get_all_blocks(&self) -> &Vec<MuName>;
fn get_block_range(&self, block: &str) -> Option<ops::Range<usize>>;
// functions for rewrite
fn replace_reg(&mut self, from: MuID, to: MuID);
fn set_inst_nop(&mut self, index: usize);
}
......@@ -28,7 +28,7 @@ fn use_value(val: &P<Value>, func_context: &mut FunctionContext) {
match val.v {
Value_::SSAVar(ref id) => {
let entry = func_context.values.get_mut(id).unwrap();
entry.use_count.fetch_add(1, Ordering::SeqCst);
entry.increase_use_count();
},
_ => {} // dont worry about constants
}
......@@ -68,7 +68,7 @@ impl CompilerPass for DefUse {
debug!("check use count for variables");
for entry in func.context.values.values() {
debug!("{}: {}", entry, entry.use_count.load(Ordering::SeqCst))
debug!("{}: {}", entry, entry.use_count())
}
}
}
......@@ -57,9 +57,9 @@ impl CompilerPass for TreeGen {
if possible_ssa_id.is_some() {
let entry_value = context.get_value_mut(possible_ssa_id.unwrap()).unwrap();
if entry_value.expr.is_some() {
if entry_value.has_expr() {
// replace the node with its expr
let expr = entry_value.expr.take().unwrap();
let expr = entry_value.take_expr();
trace!("{} replaced by {}", ops[index], expr);
ops[index] = TreeNode::new_inst(vm.next_id(), expr);
......@@ -79,9 +79,9 @@ impl CompilerPass for TreeGen {
// we can put the expression as a child node to its use
if left.len() == 1 {
let lhs = context.get_value_mut(left[0].extract_ssa_id().unwrap()).unwrap();
if lhs.use_count.load(Ordering::SeqCst) == 1{
if lhs.use_count() == 1{
if is_movable(&inst.v) {
lhs.expr = Some(inst.clone()); // FIXME: should be able to move the inst here
lhs.assign_expr(inst.clone()); // FIXME: should be able to move the inst here
trace!("yes");
trace!("");
......
......@@ -9,7 +9,6 @@ use self::mu::ast::ir::*;
use self::mu::compiler::*;
use std::sync::Arc;
use std::sync::atomic::Ordering;
#[test]
fn test_use_count() {
......@@ -28,13 +27,13 @@ fn test_use_count() {
compiler.compile(&mut func_ver);
assert!(func_ver.context.get_value(vm.id_of("blk_0_n_3")).unwrap().use_count.load(Ordering::SeqCst) == 2, "blk_0_n_3 use should be 2");
assert!(func_ver.context.get_value(vm.id_of("blk_0_v48")).unwrap().use_count.load(Ordering::SeqCst) == 1, "blk_0_v48 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_2_v53")).unwrap().use_count.load(Ordering::SeqCst) == 1, "blk_2_v53 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_1_n_3")).unwrap().use_count.load(Ordering::SeqCst) == 2, "blk_1_n_3 use should be 2");
assert!(func_ver.context.get_value(vm.id_of("blk_1_v50")).unwrap().use_count.load(Ordering::SeqCst) == 1, "blk_1_v50 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_1_v51")).unwrap().use_count.load(Ordering::SeqCst) == 1, "blk_1_v51 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_1_v52")).unwrap().use_count.load(Ordering::SeqCst) == 1, "blk_1_v52 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_0_n_3")).unwrap().use_count() == 2, "blk_0_n_3 use should be 2");
assert!(func_ver.context.get_value(vm.id_of("blk_0_v48")).unwrap().use_count() == 1, "blk_0_v48 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_2_v53")).unwrap().use_count() == 1, "blk_2_v53 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_1_n_3")).unwrap().use_count() == 2, "blk_1_n_3 use should be 2");
assert!(func_ver.context.get_value(vm.id_of("blk_1_v50")).unwrap().use_count() == 1, "blk_1_v50 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_1_v51")).unwrap().use_count() == 1, "blk_1_v51 use should be 1");
assert!(func_ver.context.get_value(vm.id_of("blk_1_v52")).unwrap().use_count() == 1, "blk_1_v52 use should be 1");
}
#[test]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment