GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 2d836b52 authored by qinsoon's avatar qinsoon

[wip] keep working

parent 8038fb6d
......@@ -9,7 +9,7 @@ use std::cell::RefCell;
#[derive(Debug, Clone)]
pub struct Instruction {
pub value : Option<Vec<P<TreeNode>>>,
pub value : Option<Vec<P<Value>>>,
pub ops : RefCell<Vec<P<TreeNode>>>,
pub v: Instruction_
}
......
......@@ -35,10 +35,13 @@ impl MuFunction {
self.content = Some(content)
}
pub fn new_ssa(&mut self, id: MuID, tag: MuTag, ty: P<MuType>) -> P<TreeNode> {
pub fn new_ssa(&mut self, tag: MuTag, ty: P<MuType>) -> P<TreeNode> {
let id = TreeNode::get_id();
self.context.values.insert(id, ValueEntry{id: id, tag: tag, ty: ty.clone(), use_count: Cell::new(0), expr: None});
P(TreeNode {
id: id,
op: pick_op_code_for_ssa(&ty),
v: TreeNode_::Value(P(Value{
tag: tag,
......@@ -50,6 +53,7 @@ impl MuFunction {
pub fn new_constant(&mut self, v: P<Value>) -> P<TreeNode> {
P(TreeNode{
id: TreeNode::get_id(),
op: pick_op_code_for_const(&v.ty),
v: TreeNode_::Value(v)
})
......@@ -190,13 +194,27 @@ pub struct BlockContent {
#[derive(Debug, Clone)]
/// always use with P<TreeNode>
pub struct TreeNode {
pub id: MuID,
pub op: OpCode,
pub v: TreeNode_,
}
use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
static CUR_ID : AtomicUsize = ATOMIC_USIZE_INIT;
impl TreeNode {
pub fn get_id() -> MuID {
let ret = CUR_ID.load(Ordering::SeqCst);
CUR_ID.store(ret + 1, Ordering::SeqCst);
return ret;
}
pub fn new_inst(v: Instruction) -> P<TreeNode> {
P(TreeNode{op: pick_op_code_for_inst(&v), v: TreeNode_::Instruction(v)})
P(TreeNode{
id: TreeNode::get_id(),
op: pick_op_code_for_inst(&v),
v: TreeNode_::Instruction(v),
})
}
pub fn extract_ssa_id(&self) -> Option<MuID> {
......@@ -210,6 +228,20 @@ impl TreeNode {
_ => None
}
}
pub fn clone_value(&self) -> Option<P<Value>> {
match self.v {
TreeNode_::Value(ref val) => Some(val.clone()),
_ => None
}
}
pub fn into_value(self) -> Option<P<Value>> {
match self.v {
TreeNode_::Value(val) => Some(val),
_ => None
}
}
}
/// use +() to display a node
......@@ -272,7 +304,27 @@ impl Value {
}
_ => false
}
}
}
pub fn extract_ssa_id(&self) -> Option<MuID> {
match self.v {
Value_::SSAVar(id) => Some(id),
_ => None
}
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.v {
Value_::SSAVar(id) => {
write!(f, "+({} %{}#{})", self.ty, self.tag, id)
},
Value_::Constant(ref c) => {
write!(f, "+({} {})", self.ty, c)
}
}
}
}
#[derive(Debug, Clone)]
......
......@@ -186,6 +186,22 @@ pub enum AtomicRMWOp {
UMIN
}
pub fn is_int_cmp(op: CmpOp) -> bool {
match op {
CmpOp::EQ
| CmpOp::NE
| CmpOp::SGE
| CmpOp::SGT
| CmpOp::SLE
| CmpOp::SLT
| CmpOp::UGE
| CmpOp::UGT
| CmpOp::ULE
| CmpOp::ULT => true,
_ => false
}
}
pub fn pick_op_code_for_inst(inst: &Instruction) -> OpCode {
match inst.v {
Instruction_::BinOp(op, _, _) => OpCode::Binary(op),
......
pub mod inst_sel;
mod temp;
pub use compiler::backend::temp::Temporary;
#[cfg(target_arch = "x86_64")]
mod x86_64;
......
use ast::types::*;
use ast::ir::*;
pub struct Temporary {
pub hll_id: MuID
}
\ No newline at end of file
......@@ -5,6 +5,7 @@ use compiler::backend::x86_64::CodeGenerator;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
use ast::inst::*;
pub struct ASMCodeGen {
foo: usize
......@@ -18,22 +19,86 @@ impl ASMCodeGen {
impl CodeGenerator for ASMCodeGen {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
}
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: &P<Value>) {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32) {
trace!("emit: cmp {} {}", op1, op2);
}
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
}
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: &P<Value>) {
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: mov {} -> {}", src, dest);
}
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
}
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: mov {} -> {}", src, dest);
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
}
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
}
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
}
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
}
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
}
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
}
fn emit_jmp(&mut self, dest: &Destination) {
trace!("emit: jmp {}", dest.target);
}
fn emit_je(&mut self, dest: &Destination) {
trace!("emit: je {}", dest.target);
}
fn emit_jne(&mut self, dest: &Destination) {
trace!("emit: jne {}", dest.target);
}
fn emit_ja(&mut self, dest: &Destination) {
trace!("emit: ja {}", dest.target);
}
fn emit_jae(&mut self, dest: &Destination) {
trace!("emit: jae {}", dest.target);
}
fn emit_jb(&mut self, dest: &Destination) {
trace!("emit: jb {}", dest.target);
}
fn emit_jbe(&mut self, dest: &Destination) {
trace!("emit: jbe {}", dest.target);
}
fn emit_call(&mut self, func: &P<Value>) {
trace!("emit: call {}", func);
}
fn emit_ret(&mut self) {
trace!("emit: ret");
}
}
\ No newline at end of file
use ast::ptr::P;
use ast::ir::*;
use ast::inst::*;
pub trait CodeGenerator {
fn emit_cmp_r64_r64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: u32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: u32);
fn emit_jmp(&mut self, dest: &Destination);
fn emit_je(&mut self, dest: &Destination);
fn emit_jne(&mut self, dest: &Destination);
fn emit_ja(&mut self, dest: &Destination);
fn emit_jae(&mut self, dest: &Destination);
fn emit_jb(&mut self, dest: &Destination);
fn emit_jbe(&mut self, dest: &Destination);
fn emit_call(&mut self, func: &P<Value>);
fn emit_ret(&mut self);
}
\ No newline at end of file
This diff is collapsed.
......@@ -8,11 +8,13 @@ pub use compiler::backend::x86_64::asm_backend::ASMCodeGen;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
pub fn is_valid_x86_imm(op: &P<Value>) -> bool {
let ty : &MuType_ = &op.ty;
match ty {
&MuType_::Int(len) if len <= 32 => true,
use std::u32;
match op.v {
Value_::Constant(Constant::Int(val)) if val <= u32::MAX as usize => {
true
},
_ => false
}
}
\ No newline at end of file
use ast::ir::*;
use compiler::backend::Temporary;
use std::collections::HashMap;
pub struct CompiledFunction {
pub fn_name: MuTag,
pub temps: HashMap<MuID, Temporary>
}
\ No newline at end of file
......@@ -3,6 +3,7 @@ use std::collections::HashMap;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
use vm::CompiledFunction;
use std::cell::RefCell;
......@@ -10,7 +11,9 @@ pub struct VMContext {
constants: HashMap<MuTag, P<Value>>,
types: HashMap<MuTag, P<MuType>>,
func_sigs: HashMap<MuTag, P<MuFuncSig>>,
funcs: HashMap<MuTag, RefCell<MuFunction>>
funcs: HashMap<MuTag, RefCell<MuFunction>>,
compiled_funcs: HashMap<MuTag, RefCell<CompiledFunction>>
}
impl VMContext {
......@@ -19,7 +22,8 @@ impl VMContext {
constants: HashMap::new(),
types: HashMap::new(),
func_sigs: HashMap::new(),
funcs: HashMap::new()
funcs: HashMap::new(),
compiled_funcs: HashMap::new()
}
}
......@@ -55,6 +59,12 @@ impl VMContext {
self.funcs.insert(func.fn_name, RefCell::new(func));
}
pub fn add_compiled_func (&mut self, func: CompiledFunction) {
debug_assert!(self.funcs.contains_key(func.fn_name));
self.compiled_funcs.insert(func.fn_name, RefCell::new(func));
}
pub fn get_func(&self, fn_name: MuTag) -> Option<&RefCell<MuFunction>> {
self.funcs.get(fn_name)
}
......
pub mod context;
\ No newline at end of file
pub mod context;
mod compiled_func;
pub use vm::compiled_func::CompiledFunction;
\ No newline at end of file
......@@ -42,7 +42,7 @@ pub fn sum() -> VMContext {
// %entry(<@int_64> %n):
let mut blk_entry = Block::new("entry");
let blk_entry_n = func.new_ssa(0, "blk_entry_n", type_def_int64.clone());
let blk_entry_n = func.new_ssa("blk_entry_n", type_def_int64.clone());
let const_def_int64_0_local = func.new_constant(const_def_int64_0.clone()); // FIXME: why we need a local version?
let const_def_int64_1_local = func.new_constant(const_def_int64_1.clone());
......@@ -65,30 +65,30 @@ pub fn sum() -> VMContext {
// %head(<@int_64> %n, <@int_64> %s, <@int_64> %i):
let mut blk_head = Block::new("head");
let blk_head_n = func.new_ssa(1, "blk_head_n", type_def_int64.clone());
let blk_head_s = func.new_ssa(2, "blk_head_s", type_def_int64.clone());
let blk_head_i = func.new_ssa(3, "blk_head_i", type_def_int64.clone());
let blk_head_n = func.new_ssa("blk_head_n", type_def_int64.clone());
let blk_head_s = func.new_ssa("blk_head_s", type_def_int64.clone());
let blk_head_i = func.new_ssa("blk_head_i", type_def_int64.clone());
// %s2 = ADD %s %i
let blk_head_s2 = func.new_ssa(4, "blk_head_s2", type_def_int64.clone());
let blk_head_s2 = func.new_ssa("blk_head_s2", type_def_int64.clone());
let blk_head_inst0 = TreeNode::new_inst(Instruction {
value: Some(vec![blk_head_s2.clone()]),
value: Some(vec![blk_head_s2.clone_value().unwrap()]),
ops: RefCell::new(vec![blk_head_s.clone(), blk_head_i.clone()]),
v: Instruction_::BinOp(BinOp::Add, 0, 1)
});
// %i2 = ADD %i 1
let blk_head_i2 = func.new_ssa(5, "blk_head_i2", type_def_int64.clone());
let blk_head_i2 = func.new_ssa("blk_head_i2", type_def_int64.clone());
let blk_head_inst1 = TreeNode::new_inst(Instruction {
value: Some(vec![blk_head_i2.clone()]),
value: Some(vec![blk_head_i2.clone_value().unwrap()]),
ops: RefCell::new(vec![blk_head_i.clone(), const_def_int64_1_local.clone()]),
v: Instruction_::BinOp(BinOp::Add, 0, 1)
});
// %cond = UGT %i %n
let blk_head_cond = func.new_ssa(6, "blk_head_cond", type_def_int1.clone());
let blk_head_cond = func.new_ssa("blk_head_cond", type_def_int1.clone());
let blk_head_inst2 = TreeNode::new_inst(Instruction {
value: Some(vec![blk_head_cond.clone()]),
value: Some(vec![blk_head_cond.clone_value().unwrap()]),
ops: RefCell::new(vec![blk_head_i.clone(), blk_head_n.clone()]),
v: Instruction_::CmpOp(CmpOp::UGT, 0, 1)
});
......@@ -120,7 +120,7 @@ pub fn sum() -> VMContext {
// %ret(<@int_64> %s):
let mut blk_ret = Block::new("ret");
let blk_ret_s = func.new_ssa(7, "blk_ret_s", type_def_int64.clone());
let blk_ret_s = func.new_ssa("blk_ret_s", type_def_int64.clone());
// RET %s
let blk_ret_term = TreeNode::new_inst(Instruction{
......@@ -183,13 +183,13 @@ pub fn factorial() -> VMContext {
// %blk_0(<@int_64> %n_3):
let mut blk_0 = Block::new("blk_0");
let blk_0_n_3 = func.new_ssa(0, "blk_0_n_3", type_def_int64.clone());
let blk_0_n_3 = func.new_ssa("blk_0_n_3", type_def_int64.clone());
let const_def_int64_1_local = func.new_constant(const_def_int64_1.clone());
// %v48 = EQ <@int_64> %n_3 @int_64_1
let blk_0_v48 = func.new_ssa(1, "blk_0_v48", type_def_int64.clone());
let blk_0_v48 = func.new_ssa("blk_0_v48", type_def_int64.clone());
let blk_0_inst0 = TreeNode::new_inst(Instruction {
value: Some(vec![blk_0_v48.clone()]),
value: Some(vec![blk_0_v48.clone_value().unwrap()]),
ops: RefCell::new(vec![blk_0_n_3.clone(), const_def_int64_1_local.clone()]),
v: Instruction_::CmpOp(CmpOp::EQ, 0, 1)
});
......@@ -221,7 +221,7 @@ pub fn factorial() -> VMContext {
// %blk_2(<@int_64> %v53):
let mut blk_2 = Block::new("blk_2");
let blk_2_v53 = func.new_ssa(2, "blk_2_v53", type_def_int64.clone());
let blk_2_v53 = func.new_ssa("blk_2_v53", type_def_int64.clone());
// RET %v53
let blk_2_term = TreeNode::new_inst(Instruction{
......@@ -239,21 +239,21 @@ pub fn factorial() -> VMContext {
// %blk_1(<@int_64> %n_3):
let mut blk_1 = Block::new("blk_1");
let blk_1_n_3 = func.new_ssa(3, "blk_1_n_3", type_def_int64.clone());
let blk_1_n_3 = func.new_ssa("blk_1_n_3", type_def_int64.clone());
// %v50 = SUB <@int_64> %n_3 @int_64_1
let blk_1_v50 = func.new_ssa(4, "blk_1_v50", type_def_int64.clone());
let blk_1_v50 = func.new_ssa("blk_1_v50", type_def_int64.clone());
let blk_1_inst0 = TreeNode::new_inst(Instruction{
value: Some(vec![blk_1_v50.clone()]),
value: Some(vec![blk_1_v50.clone_value().unwrap()]),
ops: RefCell::new(vec![blk_1_n_3.clone(), const_def_int64_1_local.clone()]),
v: Instruction_::BinOp(BinOp::Sub, 0, 1)
});
// %v51 = CALL <@fac_sig> @fac (%v50)
let blk_1_v51 = func.new_ssa(5, "blk_1_v51", type_def_int64.clone());
let blk_1_v51 = func.new_ssa("blk_1_v51", type_def_int64.clone());
let blk_1_inst1 = TreeNode::new_inst(Instruction{
value: Some(vec![blk_1_v51.clone()]),
ops: RefCell::new(vec![func.new_ssa(6, "blk_1_fac", P(MuType::funcref(fac_sig.clone()))), blk_1_v50.clone()]),
value: Some(vec![blk_1_v51.clone_value().unwrap()]),
ops: RefCell::new(vec![func.new_ssa("blk_1_fac", P(MuType::funcref(fac_sig.clone()))), blk_1_v50.clone()]),
v: Instruction_::ExprCall {
data: CallData {
func: 0,
......@@ -265,9 +265,9 @@ pub fn factorial() -> VMContext {
});
// %v52 = MUL <@int_64> %n_3 %v51
let blk_1_v52 = func.new_ssa(7, "blk_1_v52", type_def_int64.clone());
let blk_1_v52 = func.new_ssa("blk_1_v52", type_def_int64.clone());
let blk_1_inst2 = TreeNode::new_inst(Instruction{
value: Some(vec![blk_1_v52.clone()]),
value: Some(vec![blk_1_v52.clone_value().unwrap()]),
ops: RefCell::new(vec![blk_1_n_3.clone(), blk_1_v51.clone()]),
v: Instruction_::BinOp(BinOp::Mul, 0, 1)
});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment