...
 
Commits (4)
......@@ -1031,6 +1031,16 @@ impl Value {
}
}
pub fn is_const_one(&self) -> bool {
match self.v {
Value_::Constant(Constant::Int(val)) if val == 1 => true,
Value_::Constant(Constant::IntEx(ref vec)) => {
vec[0] == 1 && vec[1..].iter().all(|x| *x == 0)
}
_ => false
}
}
/// disguises a value as another type.
/// This is usually used for treat an integer type as an integer of a different length
/// This method is unsafe
......@@ -1073,6 +1083,13 @@ impl Value {
}
}
pub fn extract_func_const(&self) -> Option<MuID> {
match self.v {
Value_::Constant(Constant::FuncRef(id)) => Some(id),
_ => None
}
}
pub fn extract_int_const(&self) -> Option<u64> {
match self.v {
Value_::Constant(Constant::Int(val)) => Some(val),
......
......@@ -296,6 +296,13 @@ impl MuType {
}
}
pub fn is_uptr(&self) -> bool {
match self.v {
MuType_::UPtr(_) => true,
_ => false
}
}
/// is this type an aggregated type? (consisted of other types)
pub fn is_aggregate(&self) -> bool {
match self.v {
......@@ -433,6 +440,15 @@ impl MuType {
}
}
/// gets the function signature for FuncRef or UFuncPtr, return None if the type is not
/// those two types
pub fn get_func_sig(&self) -> Option<P<MuFuncSig>> {
match self.v {
MuType_::FuncRef(ref sig) | MuType_::UFuncPtr(ref sig) => Some(sig.clone()),
_ => None
}
}
/// gets the length (in bit) of a integer/pointer type (assume pointer types are always 64 bits)
// FIXME: should deprecate this function, and get the length from BackendType
pub fn get_int_length(&self) -> Option<usize> {
......
......@@ -1415,7 +1415,7 @@ impl ASMCodeGen {
op: &P<Value>,
loc: usize
) -> (String, LinkedHashMap<MuID, Vec<ASMLocation>>) {
debug_assert!(op.is_mem());
debug_assert!(op.is_mem(), "op is not mem: {}", op);
// temps/regs used
let mut ids: Vec<MuID> = vec![];
......@@ -1731,8 +1731,8 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} {}", inst, op1, op2);
let (mem, mut uses) = self.prepare_mem(op2, inst.len() + 1);
let (reg, id1, loc1) = self.prepare_reg(op1, inst.len() + 1 + mem.len() + 1);
let (reg, id1, loc1) = self.prepare_reg(op1, inst.len() + 1);
let (mem, mut uses) = self.prepare_mem(op2, inst.len() + 1 + reg.len() + 1);
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
......@@ -1741,7 +1741,7 @@ impl ASMCodeGen {
uses.insert(id1, vec![loc1.clone()]);
}
let asm = format!("{} {},{}", inst, mem, reg);
let asm = format!("{} {},{}", inst, reg, mem);
self.add_asm_inst(asm, linked_hashmap!{}, uses, true)
}
......@@ -1832,10 +1832,7 @@ impl ASMCodeGen {
/// emits an instruction (use 1 reg 1 mem, define the reg)
fn internal_binop_def_r_mem(&mut self, inst: &str, dest: &P<Value>, src: &P<Value>) {
let len = match dest.ty.get_int_length() {
Some(n) if n == 64 | 32 | 16 | 8 => n,
_ => panic!("unimplemented int types: {}", dest.ty)
};
let len = check_op_len(dest);
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
......@@ -2256,10 +2253,30 @@ impl ASMCodeGen {
)
}
/// emits an instruction (use 2 fpregs, define 1st fpreg)
fn internal_fp_binop_def_r_mem(&mut self, inst: &str, dest: Reg, src: Reg) {
/// emits an instruction (use 1 fpreg 1 memory operand, define 1st fpreg)
fn internal_fp_binop_def_r_mem(&mut self, inst: &str, dest: Reg, src: Mem) {
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
unimplemented!()
let (mem, mut uses) = self.prepare_mem(src, inst.len() + 1);
let (reg, id1, loc1) = self.prepare_fpreg(dest, inst.len() + 1 + mem.len() + 1);
if uses.contains_key(&id1) {
let mut locs = uses.get_mut(&id1).unwrap();
vec_utils::add_unique(locs, loc1.clone());
} else {
uses.insert(id1, vec![loc1.clone()]);
}
let asm = format!("{} {},{}", inst, mem, reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id1 => vec![loc1]
},
uses,
true
)
}
/// emits a move instruction (reg -> fpreg)
......
......@@ -33,7 +33,75 @@ impl TreeGen {
}
fn is_movable(inst: &Instruction) -> bool {
!inst.has_side_effect()
is_suitable_child(inst)
}
/// is this instruction suitable to be tree child (we may find pattern for it)?
fn is_suitable_child(inst: &Instruction) -> bool {
use ast::inst::Instruction_::*;
match inst.v {
Return(_) |
ThreadExit |
Throw(_) |
TailCall(_) |
Branch1(_) |
Branch2 { .. } |
Watchpoint { .. } |
WPBranch { .. } |
Call { .. } |
CCall { .. } |
SwapStackExc { .. } |
SwapStackKill { .. } |
Switch { .. } |
ExnInstruction { .. } |
PrintHex(_) |
SetRetval(_) |
KillStack(_) |
CurrentStack |
SwapStackExpr { .. } |
CommonInst_Tr64IsFp(_) |
CommonInst_Tr64IsInt(_) |
CommonInst_Tr64IsRef(_) |
CommonInst_Tr64FromFp(_) |
CommonInst_Tr64FromInt(_) |
CommonInst_Tr64FromRef(_, _) |
CommonInst_Tr64ToFp(_) |
CommonInst_Tr64ToInt(_) |
CommonInst_Tr64ToRef(_) |
CommonInst_Tr64ToTag(_) |
ExprCall { .. } |
ExprCCall { .. } |
New(_) |
AllocA(_) |
NewHybrid(_, _) |
AllocAHybrid(_, _) |
NewStack(_) |
NewThread { .. } |
NewFrameCursor(_) |
Select { .. } |
Fence(_) |
CommonInst_SetThreadLocal(_) |
CommonInst_Pin(_) |
CommonInst_Unpin(_) |
CommonInst_GetAddr(_) |
CmpXchg { .. } |
AtomicRMW { .. } |
Store { .. } => false,
BinOp(_, _, _) |
BinOpWithStatus(_, _, _, _) |
CmpOp(_, _, _) |
ConvOp { .. } |
Load { .. } |
GetIRef(_) |
GetFieldIRef { .. } |
GetElementIRef { .. } |
ShiftIRef { .. } |
GetVarPartIRef { .. } |
CommonInst_GetThreadLocal |
Move(_) => true
}
}
impl CompilerPass for TreeGen {
......@@ -51,8 +119,8 @@ impl CompilerPass for TreeGen {
// then we replace the use of the SSA with the actual variable
// we are doing it in two steps
// 1. if we see an expression that generates an SSA which is used only once, we take out
// the expression node
// 1. if we see an expression that generates an SSA which is used only once and used
// in its next instruction, we take out the expression node
// 2. if we see an SSA that is used only once (and it is this place for sure), we replace it
// with the expression node
// because of SSA form, it is guaranteed to see 1 before 2 for SSA variables.
......@@ -71,7 +139,8 @@ impl CompilerPass for TreeGen {
trace!("check block {}", label);
trace!("");
for mut node in body.into_iter() {
for i in 0..body.len() {
let mut node = body[i].clone();
trace!("check inst: {}", node);
match &mut node.v {
&mut TreeNode_::Instruction(ref mut inst) => {
......@@ -105,6 +174,7 @@ impl CompilerPass for TreeGen {
// * it generates only one value
// * the value is used only once
// * the instruction is movable
// * the value is used in the next instruction
trace!("check if we should fold the inst");
if inst.value.is_some() {
let left = inst.value.as_ref().unwrap();
......@@ -112,11 +182,24 @@ impl CompilerPass for TreeGen {
// if left is _one_ variable that is used once
// we can put the expression as a child node to its use
if left.len() == 1 {
let ref val_lhs = left[0];
let lhs = context
.get_value_mut(left[0].extract_ssa_id().unwrap())
.unwrap();
if lhs.use_count() == 1 {
if is_movable(&inst) {
let next_inst_uses_lhs = {
if i != body.len() - 1 {
let ref next_inst = body[i + 1].as_inst_ref();
next_inst
.ops
.iter()
.any(|x| x.as_value() == val_lhs)
} else {
// this is the last instruction
false
}
};
if is_movable(&inst) && next_inst_uses_lhs {
// FIXME: should be able to move the inst here
lhs.assign_expr(inst.clone());
......@@ -124,7 +207,7 @@ impl CompilerPass for TreeGen {
trace!("");
continue;
} else {
trace!("no, not movable");
trace!("no, not movable or not used by next inst");
}
} else {
trace!("no, use count more than 1");
......
......@@ -1401,8 +1401,10 @@ def test_float():
points = [None] * n
for i in xrange(n):
points[i] = Point(i)
print points[i]
for p in points:
p.normalize()
print p
return maximize(points)
POINTS = 100
......
# Copyright 2017 The Australian National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util import execute, compile_bundle, load_bundle, get_function;
import pytest;
import ctypes;
def test_gt_mem_r():
lib = load_bundle(
"""
.funcsig sig = (int<64> uptr<int<64>>) -> (int<64>)
.funcdef test_gt_mem_r <sig>
{
entry(<int<64>> x <uptr<int<64>>> ptr):
y = LOAD PTR <int<64>> ptr
cond = SGT <int<64>> x y
BRANCH2 cond ret1() ret0()
ret1():
RET <int<64>> 1
ret0():
RET <int<64>> 0
}
""", "test_gt_mem_r"
)
zero = ctypes.c_int64(0);
ptr = ctypes.addressof(zero);
cmp_gt_zero = get_function(lib.test_gt_mem_r, [ctypes.c_int64, ctypes.c_int64], ctypes.c_int64)
assert(cmp_gt_zero(ctypes.c_int64(1), ptr) == 1)
assert(cmp_gt_zero(ctypes.c_int64(0), ptr) == 0)
assert(cmp_gt_zero(ctypes.c_int64(-1), ptr) == 0)
def test_gt_val_mem_r():
lib = load_bundle(
"""
.funcsig sig = (int<64> uptr<int<64>>) -> (int<8>)
.funcdef test_gt_val_mem_r <sig>
{
entry(<int<64>> x <uptr<int<64>>> ptr):
y = LOAD PTR <int<64>> ptr
cond = SGT <int<64>> x y
res = ZEXT <int<1> int<8>> cond
RET res
}
""", "test_gt_val_mem_r"
)
zero = ctypes.c_int64(0);
ptr = ctypes.addressof(zero);
cmp_gt_zero = get_function(lib.test_gt_val_mem_r, [ctypes.c_int64, ctypes.c_voidp], ctypes.c_int8)
assert(cmp_gt_zero(ctypes.c_int64(1), ptr) == 1)
assert(cmp_gt_zero(ctypes.c_int64(0), ptr) == 0)
assert(cmp_gt_zero(ctypes.c_int64(-1), ptr) == 0)
def test_gt_r_mem():
lib = load_bundle(
"""
.funcsig sig = (uptr<int<64>> int<64>) -> (int<64>)
.funcdef test_gt_r_mem <sig>
{
entry(<uptr<int<64>>> ptr <int<64>> y):
x = LOAD PTR <int<64>> ptr
cond = SGT <int<64>> x y
BRANCH2 cond ret1() ret0()
ret1():
RET <int<64>> 1
ret0():
RET <int<64>> 0
}
""", "test_gt_r_mem"
)
zero = ctypes.c_int64(0);
ptr = ctypes.addressof(zero);
cmp_gt_zero = get_function(lib.test_gt_r_mem, [ctypes.c_int64, ctypes.c_int64], ctypes.c_int64)
assert(cmp_gt_zero(ptr, ctypes.c_int64(1)) == 0)
assert(cmp_gt_zero(ptr, ctypes.c_int64(0)) == 0)
assert(cmp_gt_zero(ptr, ctypes.c_int64(-1)) == 1)
def test_gt_mem_f():
lib = load_bundle(
"""
.funcsig sig = (double uptr<double>) -> (int<64>)
.funcdef test_gt_mem_f <sig>
{
entry(<double> x <uptr<double>> ptr):
y = LOAD PTR <double> ptr
cond = FOGT <double> x y
BRANCH2 cond ret1() ret0()
ret1():
RET <int<64>> 1
ret0():
RET <int<64>> 0
}
""", "test_gt_mem_f"
)
zero = ctypes.c_double(0);
ptr = ctypes.addressof(zero);
cmp_gt_zero = get_function(lib.test_gt_mem_f, [ctypes.c_double, ctypes.c_voidp], ctypes.c_int64)
assert(cmp_gt_zero(ctypes.c_double(1), ptr) == 1)
assert(cmp_gt_zero(ctypes.c_double(0), ptr) == 0)
assert(cmp_gt_zero(ctypes.c_double(-1), ptr) == 0)
def test_gt_f_mem():
lib = load_bundle(
"""
.funcsig sig = (uptr<double> double) -> (int<64>)
.funcdef test_gt_f_mem <sig>
{
entry(<uptr<double>> ptr <double> y):
x = LOAD PTR <double> ptr
cond = FOGT <double> x y
BRANCH2 cond ret1() ret0()
ret1():
RET <int<64>> 1
ret0():
RET <int<64>> 0
}
""", "test_gt_f_mem"
)
zero = ctypes.c_double(0);
ptr = ctypes.addressof(zero);
cmp_gt_zero = get_function(lib.test_gt_f_mem, [ctypes.c_voidp, ctypes.c_double], ctypes.c_int64)
assert(cmp_gt_zero(ptr, ctypes.c_double(1)) == 0)
assert(cmp_gt_zero(ptr, ctypes.c_double(0)) == 0)
assert(cmp_gt_zero(ptr, ctypes.c_double(-1)) == 1)
def test_eq_f_zero():
lib = load_bundle(
"""
.funcsig sig = (double) -> (int<8>)
.funcdef test_eq_f_zero <sig>
{
entry(<double> x):
cond = FOEQ <double> x <double> 0.00 d
res = ZEXT <int<1> int<8>> cond
RET res
}
""", "test_eq_f_zero"
)
eq_zero = get_function(lib.test_eq_f_zero, [ctypes.c_double], ctypes.c_int8)
assert(eq_zero(ctypes.c_double(0)) == 1)
assert(eq_zero(ctypes.c_double(1)) == 0)
assert(eq_zero(ctypes.c_double(-1)) == 0)
\ No newline at end of file
......@@ -239,5 +239,4 @@ def test_exc_pass_values():
}
""", "test_exc_pass_values");
assert(execute("test_exc_pass_values") == 4);
assert(execute("test_exc_pass_values") == 4);
\ No newline at end of file