GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

fixed formatting issues

parent 612f3aa0
Pipeline #3553 passed with stages
in 47 minutes and 59 seconds
This diff is collapsed.
This diff is collapsed.
......@@ -46,15 +46,19 @@ macro_rules! impl_mu_entity {
($entity: ty) => {
impl MuEntity for $entity {
#[inline(always)]
fn id(&self) -> MuID {self.hdr.id()}
fn id(&self) -> MuID {
self.hdr.id()
}
#[inline(always)]
fn name(&self) -> MuName {self.hdr.name()}
fn name(&self) -> MuName {
self.hdr.name()
}
fn as_entity(&self) -> &MuEntity {
let ref_ty : &$entity = self;
let ref_ty: &$entity = self;
ref_ty as &MuEntity
}
}
}
};
}
/// select between two values based on condition
......@@ -65,12 +69,12 @@ macro_rules! select_value {
} else {
$res2
}
}
};
}
#[macro_use]
pub mod ir;
pub mod inst;
pub mod types;
pub mod ptr;
pub mod op;
pub mod ptr;
pub mod types;
......@@ -36,7 +36,7 @@ pub enum BinOp {
FSub,
FMul,
FDiv,
FRem
FRem,
}
impl fmt::Display for BinOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -49,7 +49,7 @@ impl BinOp {
use op::BinOp::*;
match self {
FAdd | FSub | FMul | FDiv | FRem => true,
_ => false
_ => false,
}
}
}
......@@ -83,7 +83,7 @@ pub enum CmpOp {
FULT,
FULE,
FUNE,
FUNO
FUNO,
}
impl fmt::Display for CmpOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -116,7 +116,7 @@ impl CmpOp {
FUGT => FULT,
FULT => FUGT,
_ => self // all other comparisons are symmetric
_ => self, // all other comparisons are symmetric
}
}
......@@ -161,7 +161,7 @@ impl CmpOp {
FONE => FUEQ,
FFALSE => FTRUE,
FTRUE => FFALSE
FTRUE => FFALSE,
}
}
......@@ -173,7 +173,7 @@ impl CmpOp {
SLT => ULT,
SGT => UGT,
SLE => ULE,
_ => self
_ => self,
}
}
......@@ -181,7 +181,7 @@ impl CmpOp {
use op::CmpOp::*;
match self {
SGE | SLT | SGT | SLE => true,
_ => false
_ => false,
}
}
......@@ -189,7 +189,7 @@ impl CmpOp {
use op::CmpOp::*;
match self {
EQ | NE | SGE | SGT | SLE | SLT | UGE | UGT | ULE | ULT => true,
_ => false
_ => false,
}
}
......@@ -200,14 +200,14 @@ impl CmpOp {
use op::CmpOp::*;
match self {
EQ | NE => true,
_ => false
_ => false,
}
}
pub fn is_ult_cmp(self) -> bool {
use op::CmpOp::*;
match self {
UGE | UGT | ULE | ULT => true,
_ => false
_ => false,
}
}
......@@ -215,7 +215,7 @@ impl CmpOp {
use op::CmpOp::*;
match self {
EQ | NE | FORD | FUNO | FUNE | FUEQ | FONE | FOEQ => true,
_ => false
_ => false,
}
}
}
......@@ -233,7 +233,7 @@ pub enum ConvOp {
SITOFP,
BITCAST,
REFCAST,
PTRCAST
PTRCAST,
}
impl fmt::Display for ConvOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
......@@ -253,7 +253,7 @@ pub enum AtomicRMWOp {
MAX,
MIN,
UMAX,
UMIN
UMIN,
}
impl fmt::Display for AtomicRMWOp {
......
This diff is collapsed.
......@@ -16,8 +16,8 @@ use ast::ir::*;
use ast::ptr::P;
use runtime::ValueLocation;
use compiler::backend::{Mem, Reg};
use compiler::machine_code::MachineCode;
use compiler::backend::{Reg, Mem};
pub trait CodeGenerator {
fn start_code(&mut self, func_name: MuName, entry: MuName) -> ValueLocation;
......@@ -76,7 +76,6 @@ pub trait CodeGenerator {
take the ZR (and vice versa)
*/
// loads
// supports the full full range of addressing modes
fn emit_ldr(&mut self, dest: Reg /*GPR or FPR*/, src: Mem, signed: bool);
......@@ -94,7 +93,7 @@ pub trait CodeGenerator {
&mut self,
dest1: Reg, /*GPR or FPR*/
dest2: Reg, /*GPR or FPR*/
src: Mem
src: Mem,
); // [base, #simm7]
// Stores
......@@ -114,7 +113,7 @@ pub trait CodeGenerator {
&mut self,
dest: Mem,
src1: Reg, /*GPR or FPR*/
src2: Reg /*GPR or FPR*/
src2: Reg, /*GPR or FPR*/
); // [base, #simm7]
// Calls
......@@ -125,7 +124,7 @@ pub trait CodeGenerator {
pe: Option<MuName>,
args: Vec<P<Value>>,
ret: Vec<P<Value>>,
is_native: bool
is_native: bool,
) -> Option<ValueLocation>;
fn emit_blr(
&mut self,
......@@ -133,7 +132,7 @@ pub trait CodeGenerator {
func: Reg,
pe: Option<MuName>,
args: Vec<P<Value>>,
ret: Vec<P<Value>>
ret: Vec<P<Value>>,
) -> Option<ValueLocation>;
// Branches
fn emit_b(&mut self, dest_name: MuName);
......@@ -147,7 +146,7 @@ pub trait CodeGenerator {
args: Vec<P<Value>>,
ret: Vec<P<Value>>,
is_native: bool,
may_return: bool
may_return: bool,
) -> Option<ValueLocation>;
fn emit_br_call(
&mut self,
......@@ -156,10 +155,9 @@ pub trait CodeGenerator {
pe: Option<MuName>,
args: Vec<P<Value>>,
ret: Vec<P<Value>>,
may_return: bool
may_return: bool,
) -> Option<ValueLocation>;
fn emit_ret(&mut self, src: Reg);
fn emit_cbnz(&mut self, src: Reg, dest_name: MuName);
fn emit_cbz(&mut self, src: Reg, dest_name: MuName);
......@@ -241,7 +239,7 @@ pub trait CodeGenerator {
src1: Reg, /*GPR or SP*/
src2: Reg,
signed: bool,
shift: u8
shift: u8,
);
fn emit_adds_ext(
&mut self,
......@@ -249,7 +247,7 @@ pub trait CodeGenerator {
src1: Reg, /*GPR or SP*/
src2: Reg,
signed: bool,
shift: u8
shift: u8,
);
fn emit_sub_ext(
&mut self,
......@@ -257,7 +255,7 @@ pub trait CodeGenerator {
src1: Reg, /*GPR or SP*/
src2: Reg,
signed: bool,
shift: u8
shift: u8,
);
fn emit_subs_ext(
&mut self,
......@@ -265,7 +263,7 @@ pub trait CodeGenerator {
src1: Reg, /*GPR or SP*/
src2: Reg,
signed: bool,
shift: u8
shift: u8,
);
// Multiplication
......@@ -334,7 +332,7 @@ pub trait CodeGenerator {
dest: Reg, /*GPR or SP*/
src1: Reg, /*GPR or SP*/
src2: u16,
shift: bool
shift: bool,
);
fn emit_adds_imm(&mut self, dest: Reg, src1: Reg /*GPR or SP*/, src2: u16, shift: bool);
fn emit_sub_imm(
......@@ -342,7 +340,7 @@ pub trait CodeGenerator {
dest: Reg, /*GPR or SP*/
src1: Reg, /*GPR or SP*/
src2: u16,
shift: bool
shift: bool,
);
fn emit_subs_imm(&mut self, dest: Reg, src1: Reg /*GPR or SP*/, src2: u16, shift: bool);
......@@ -365,28 +363,28 @@ pub trait CodeGenerator {
dest: Reg, /*64*/
src1: Reg, /*32*/
src2: Reg, /*32*/
src3: Reg /*64*/
src3: Reg, /*64*/
);
fn emit_smsubl(
&mut self,
dest: Reg, /*64*/
src1: Reg, /*32*/
src2: Reg, /*32*/
src3: Reg /*64*/
src3: Reg, /*64*/
);
fn emit_umaddl(
&mut self,
dest: Reg, /*64*/
src1: Reg, /*32*/
src2: Reg, /*32*/
src3: Reg /*64*/
src3: Reg, /*64*/
);
fn emit_umsubl(
&mut self,
dest: Reg, /*64*/
src1: Reg, /*32*/
src2: Reg, /*32*/
src3: Reg /*64*/
src3: Reg, /*64*/
);
fn emit_fmadd(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg);
fn emit_fmsub(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg);
......
This diff is collapsed.
......@@ -1430,11 +1430,11 @@ impl ASMCodeGen {
match op.v {
// offset(base,index,scale)
Value_::Memory(MemoryLocation::Address {
ref base,
ref offset,
ref index,
scale,
}) => {
ref base,
ref offset,
ref index,
scale,
}) => {
// deal with offset
if offset.is_some() {
let offset = offset.as_ref().unwrap();
......@@ -1511,11 +1511,11 @@ impl ASMCodeGen {
loc_cursor += 1;
}
Value_::Memory(MemoryLocation::Symbolic {
ref base,
ref label,
is_global,
is_native,
}) => {
ref base,
ref label,
is_global,
is_native,
}) => {
let label = if is_native {
"/*C*/".to_string() + label.as_str()
} else {
......@@ -4094,7 +4094,7 @@ pub fn emit_context_with_reloc(
"\t{}",
directive_equiv(demangled_name, global_cell_name.clone())
)
.unwrap();
.unwrap();
}
}
......@@ -4143,7 +4143,7 @@ pub fn emit_context_with_reloc(
"\t.quad {}\n",
symbol(&mangle_name(label.clone()))
))
.unwrap();
.unwrap();
} else {
// otherwise this offset is plain data
......
use ast::ir::*;
use ast::ptr::*;
use ast::types::*;
use compiler::backend::RegGroup;
use compiler::backend::x86_64;
use compiler::backend::BackendType;
use compiler::backend::RegGroup;
use utils::ByteSize;
use vm::VM;
......@@ -12,7 +12,7 @@ pub enum CallConvResult {
GPR(P<Value>),
GPREX(P<Value>, P<Value>),
FPR(P<Value>),
STACK
STACK,
}
pub mod mu {
......@@ -21,8 +21,8 @@ pub mod mu {
pub mod swapstack {
pub use super::c::compute_arguments;
pub use super::c::compute_stack_args;
pub use super::c::compute_arguments as compute_return_values;
pub use super::c::compute_stack_args;
pub use super::c::compute_stack_args as compute_stack_retvals;
}
......@@ -168,7 +168,7 @@ pub mod c {
/// returns a tuple of (size, offset for each values on stack)
pub fn compute_stack_locations(
stack_val_tys: &Vec<P<MuType>>,
vm: &VM
vm: &VM,
) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_val_tys, vm);
......
......@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use ast::ptr::P;
use ast::ir::*;
use ast::ptr::P;
use runtime::ValueLocation;
use compiler::backend::{Mem, Reg};
use compiler::machine_code::MachineCode;
use compiler::backend::{Reg, Mem};
/// CodeGenerator provides an interface to emit x86_64 code for instruction selection.
/// This allows us to implement the other parts of the compiler (mostly instruction selection)
......@@ -81,8 +81,8 @@ pub trait CodeGenerator {
fn emit_mov_r_mem(&mut self, dest: Reg, src: Mem); // load
fn emit_mov_r_r(&mut self, dest: Reg, src: Reg);
fn emit_mov_mem_r(&mut self, dest: Mem, src: Reg); // store
// we can infer imm length from Reg, but cannot from Mem
// because mem may only have type as ADDRESS_TYPE
// we can infer imm length from Reg, but cannot from Mem
// because mem may only have type as ADDRESS_TYPE
fn emit_mov_mem_imm(&mut self, dest: Mem, src: i32, oplen: usize); // store
fn emit_mov_mem_r_callee_saved(&mut self, dest: Mem, src: Reg); // store callee saved register
......@@ -238,7 +238,7 @@ pub trait CodeGenerator {
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool
is_native: bool,
) -> ValueLocation;
fn emit_call_near_r64(
&mut self,
......@@ -246,7 +246,7 @@ pub trait CodeGenerator {
func: &P<Value>,
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
defs: Vec<P<Value>>,
) -> ValueLocation;
fn emit_call_near_mem64(
&mut self,
......@@ -254,7 +254,7 @@ pub trait CodeGenerator {
func: &P<Value>,
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
defs: Vec<P<Value>>,
) -> ValueLocation;
// sometimes we use jmp as a call (but without pushing return address)
......@@ -265,7 +265,7 @@ pub trait CodeGenerator {
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>,
is_native: bool
is_native: bool,
) -> ValueLocation;
fn emit_call_jmp_indirect(
&mut self,
......@@ -273,7 +273,7 @@ pub trait CodeGenerator {
func: &P<Value>,
pe: Option<MuName>,
uses: Vec<P<Value>>,
defs: Vec<P<Value>>
defs: Vec<P<Value>>,
) -> ValueLocation;
fn emit_ret(&mut self);
......
......@@ -4165,9 +4165,9 @@ impl<'a> InstructionSelection {
let cur_node = cur_node.unwrap();
match cur_node.v {
TreeNode_::Instruction(Instruction {
v: Instruction_::CCall { .. },
..
}) => unimplemented!(),
v: Instruction_::CCall { .. },
..
}) => unimplemented!(),
_ => {
// wont have an exception branch, ignore
}
......
......@@ -39,17 +39,17 @@ pub use compiler::backend::x86_64::asm_backend::emit_context_with_reloc;
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::spill_rewrite;
use utils::Address;
use utils::ByteSize;
use ast::ptr::P;
use ast::ir::*;
use ast::ptr::P;
use ast::types::*;
use compiler::backend::RegGroup;
use vm::VM;
use std::sync::Arc;
use utils::Address;
use utils::ByteSize;
use vm::VM;
use utils::LinkedHashMap;
use std::collections::HashMap;
use utils::LinkedHashMap;
// number of normal callee saved registers (excluding RSP and RBP)
pub const CALLEE_SAVED_COUNT: usize = 5;
......@@ -58,64 +58,62 @@ pub const CALLEE_SAVED_COUNT: usize = 5;
macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) ->
$r32: ident, $r16: ident, $r8l: ident, $r8h: ident) => {
lazy_static!{
pub static ref $r64 : P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32 : P<Value> = GPR!($id64 +1, stringify!($r32), UINT32_TYPE);
pub static ref $r16 : P<Value> = GPR!($id64 +2, stringify!($r16), UINT16_TYPE);
pub static ref $r8l : P<Value> = GPR!($id64 +3, stringify!($r8l), UINT8_TYPE);
pub static ref $r8h : P<Value> = GPR!($id64 +4, stringify!($r8h), UINT8_TYPE);
pub static ref $alias : [P<Value>; 5] = [$r64.clone(), $r32.clone(), $r16.clone(),
$r8l.clone(), $r8h.clone()];
lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32: P<Value> = GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> = GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8l: P<Value> = GPR!($id64 + 3, stringify!($r8l), UINT8_TYPE);
pub static ref $r8h: P<Value> = GPR!($id64 + 4, stringify!($r8h), UINT8_TYPE);
pub static ref $alias: [P<Value>; 5] = [
$r64.clone(),
$r32.clone(),
$r16.clone(),
$r8l.clone(),
$r8h.clone()
];
}
};
($alias: ident: ($id64: expr, $r64: ident) -> $r32: ident, $r16: ident, $r8: ident) => {
lazy_static!{
pub static ref $r64 : P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32 : P<Value> = GPR!($id64 +1, stringify!($r32), UINT32_TYPE);
pub static ref $r16 : P<Value> = GPR!($id64 +2, stringify!($r16), UINT16_TYPE);
pub static ref $r8 : P<Value> = GPR!($id64 +3, stringify!($r8) , UINT8_TYPE );
pub static ref $alias : [P<Value>; 4] = [$r64.clone(), $r32.clone(),
$r16.clone(), $r8.clone()];
lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r32: P<Value> = GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> = GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8: P<Value> = GPR!($id64 + 3, stringify!($r8), UINT8_TYPE);
pub static ref $alias: [P<Value>; 4] =
[$r64.clone(), $r32.clone(), $r16.clone(), $r8.clone()];
}
};
($alias: ident: ($id64: expr, $r64: ident)) => {
lazy_static!{
pub static ref $r64 : P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $alias : [P<Value>; 4] = [$r64.clone(), $r64.clone(),
$r64.clone(), $r64.clone()];
lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $alias: [P<Value>; 4] =
[$r64.clone(), $r64.clone(), $r64.clone(), $r64.clone()];
}
};
}
/// a macro to declare a general purpose register
macro_rules! GPR {
($id:expr, $name: expr, $ty: ident) => {
{
P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: $ty.clone(),
v: Value_::SSAVar($id)
})
}
};
($id:expr, $name: expr, $ty: ident) => {{
P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: $ty.clone(),
v: Value_::SSAVar($id),
})
}};
}
/// a macro to declare a floating point register
macro_rules! FPR {
($id:expr, $name: expr) => {
{
P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: DOUBLE_TYPE.clone(),
v: Value_::SSAVar($id)
})
}
};
($id:expr, $name: expr) => {{
P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: DOUBLE_TYPE.clone(),
v: Value_::SSAVar($id),
})
}};
}
// declare all general purpose registers for x86_64
......@@ -188,7 +186,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
if id < FPR_ID_START {
let vec = match GPR_ALIAS_TABLE.get(&id) {
Some(vec) => vec,
None => panic!("didnt find {} as GPR", id)
None => panic!("didnt find {} as GPR", id),
};
match length {
......@@ -197,7 +195,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
16 => vec[2].clone(),
8 => vec[3].clone(),
1 => vec[3].clone(),
_ => panic!("unexpected length {} for {}", length, vec[0])
_ => panic!("unexpected length {} for {}", length, vec[0]),
}
} else {
for r in ALL_FPRS.iter() {
......@@ -218,7 +216,7 @@ pub fn is_aliased(id1: MuID, id2: MuID) -> bool {
macro_rules! is_match {
($a1: expr, $a2: expr; $b: expr) => {
$a1 == $b.id() || $a2 == $b.id()
}
};
};
if is_match!(id1, id2; AH) {
......@@ -244,7 +242,7 @@ pub fn get_color_for_precolored(id: MuID) -> MuID {
if id < FPR_ID_START {
match GPR_ALIAS_LOOKUP.get(&id) {
Some(val) => val.id(),
None => panic!("cannot find GPR {}", id)
None => panic!("cannot find GPR {}", id),
}
} else {
// we do not have alias for FPRs
......@@ -261,7 +259,7 @@ pub fn check_op_len(op: &P<Value>) -> usize {
Some(16) => 16,
Some(8) => 8,
Some(1) => 8,
_ => panic!("unsupported register length for x64: {}", op.ty)
_ => panic!("unsupported register length for x64: {}", op.ty),
}
}
......@@ -331,7 +329,7 @@ lazy_static! {
pub const FPR_ID_START: usize = 100;
lazy_static!{
lazy_static! {
// floating point registers, we use SSE registers