WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 81a87444 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Implemented 128-bit arithmetici.

parent fe287e80
......@@ -21,6 +21,7 @@ ast = {path = "src/ast"}
utils = {path = "src/utils"}
gc = {path = "src/gc"}
field-offset = "0.1.1"
libloading = "0.3"
lazy_static = "0.1.15"
......@@ -34,4 +35,5 @@ time = "0.1.34"
maplit = "0.1.4"
docopt = "0.6"
petgraph = "0.4.1"
extprim = "*"
\ No newline at end of file
extprim = "*"
num-traits = "*"
......@@ -735,6 +735,14 @@ impl Value {
})
}
pub fn is_int_ex_const(&self) -> bool {
match self.v {
Value_::Constant(Constant::IntEx(_)) => true,
_ => false
}
}
pub fn is_int_const(&self) -> bool {
match self.v {
Value_::Constant(Constant::Int(_)) => true,
......@@ -742,7 +750,13 @@ impl Value {
_ => false
}
}
pub fn is_fp_const(&self) -> bool {
match self.v {
Value_::Constant(Constant::Float(_)) => true,
Value_::Constant(Constant::Double(_)) => true,
_ => false
}
}
pub fn extract_int_const(&self) -> u64 {
match self.v {
Value_::Constant(Constant::Int(val)) => val,
......@@ -751,6 +765,13 @@ impl Value {
}
}
pub fn extract_int_ex_const(&self) -> Vec<u64> {
match self.v {
Value_::Constant(Constant::IntEx(ref val)) => val.clone(),
_ => panic!("expect int ex const")
}
}
pub fn extract_ssa_id(&self) -> Option<MuID> {
match self.v {
Value_::SSAVar(id) => Some(id),
......
......@@ -210,7 +210,7 @@ impl CmpOp {
FUGT => FULT,
FULT => FUGT,
_ => self, // all other comparisons are reflexive
_ => self, // all other comparisons are symmetric
}
}
pub fn invert(self) -> CmpOp {
......@@ -256,6 +256,18 @@ impl CmpOp {
FTRUE => FFALSE,
}
}
// gets the unsigned version of the comparison
pub fn get_unsigned(self) -> CmpOp {
use op::CmpOp::*;
match self {
SGE => UGE,
SLT => ULT,
SGT => UGT,
SLE => ULE,
_ => self,
}
}
pub fn is_signed(self) -> bool {
use op::CmpOp::*;
match self {
......@@ -263,6 +275,14 @@ impl CmpOp {
_ => false
}
}
pub fn is_symmetric(self) -> bool {
use op::CmpOp::*;
match self {
EQ | NE | FORD| FUNO| FUNE | FUEQ | FONE | FOEQ => true,
_ => false
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
......
#![allow(dead_code)]
use compiler::backend::AOT_EMIT_CONTEXT_FILE;
use compiler::backend::RegGroup;
use utils::ByteSize;
......@@ -16,7 +18,6 @@ use utils::LinkedHashMap;
use ast::ptr::P;
use ast::ir::*;
use ast::types::*;
use std::str;
use std::usize;
......@@ -272,17 +273,6 @@ impl ASMCode {
let ref blocks = self.blocks;
let ref mut asm = self.code;
let block_start = {
let mut ret = vec![];
for block in blocks.values() {
if TRACE_CFA {
trace!("Block starts at {}", block.start_inst);
}
ret.push(block.start_inst);
}
ret
};
for i in 0..n_insts {
if TRACE_CFA {
trace!("---inst {}---", i);
......@@ -994,13 +984,11 @@ impl ASMCodeGen {
}
fn add_asm_label(&mut self, code: String) {
let l = self.line();
trace!("emit: {}", code);
self.cur_mut().code.push(ASMInst::symbolic(code));
}
fn add_asm_block_label(&mut self, code: String, block_name: MuName) {
let l = self.line();
trace!("emit: [{}]{}", block_name, code);
self.cur_mut().code.push(ASMInst::symbolic(code));
}
......@@ -1819,8 +1807,8 @@ impl ASMCodeGen {
let inst = inst.to_string();
trace!("emit: \t{} {} -> {},{}", inst, src, dest1, dest2);
let (reg1, id1, loc1) = self.prepare_reg(dest1, 3 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest2, 3 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest1, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest2, inst.len() + 1 + reg1.len() + 1);
let (mem, uses) = self.prepare_mem(src, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
let asm = format!("{} {},{},{}", inst, reg1, reg2, mem);
......@@ -1931,8 +1919,8 @@ impl ASMCodeGen {
let inst = inst.to_string();
trace!("emit: \t{} {},{} -> {}", inst, src1, src2, dest);
let (reg1, id1, loc1) = self.prepare_reg(src2, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(src1, inst.len() + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src1, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(src2, inst.len() + 1 + reg1.len() + 1);
let (mem, mut uses) = self.prepare_mem(dest, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
if is_zero_register_id(id1) {
......@@ -1968,8 +1956,8 @@ impl ASMCodeGen {
trace!("emit: \t{} {},{} -> {},{}", inst, src1, src2, dest, status);
let (reg1, id1, loc1) = self.prepare_reg(status, inst.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(src2, inst.len() + 1 + reg1.len() + 1);
let (reg3, id3, loc3) = self.prepare_reg(src1, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(src1, inst.len() + 1 + reg1.len() + 1);
let (reg3, id3, loc3) = self.prepare_reg(src2, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1);
let (mem, mut uses) = self.prepare_mem(dest, inst.len() + 1 + reg1.len() + 1 + reg2.len() + 1 + reg3.len() + 1);
if is_zero_register_id(id2) {
......@@ -2172,23 +2160,28 @@ impl CodeGenerator for ASMCodeGen {
}
}
fn add_cfi_sections(&mut self, arg: &str) { self.add_asm_symbolic(format!(".cfi_sections {}", arg)); }
fn add_cfi_startproc(&mut self) {
self.add_asm_symbolic("\t.cfi_startproc".to_string());
self.add_asm_symbolic(".cfi_startproc".to_string());
}
fn add_cfi_endproc(&mut self) {
self.add_asm_symbolic("\t.cfi_endproc".to_string());
self.add_asm_symbolic(".cfi_endproc".to_string());
}
fn add_cfi_def_cfa_register(&mut self, reg: Reg) {
let reg = self.asm_reg_op(reg);
self.add_asm_symbolic(format!("\t.cfi_def_cfa_register {}", reg));
self.add_asm_symbolic(format!(".cfi_def_cfa_register {}", reg));
}
fn add_cfi_def_cfa_offset(&mut self, offset: i32) {
self.add_asm_symbolic(format!("\t.cfi_def_cfa_offset {}", offset));
self.add_asm_symbolic(format!(".cfi_def_cfa_offset {}", offset));
}
fn add_cfi_def_cfa(&mut self, reg: Reg, offset: i32) {
let reg = self.asm_reg_op(reg);
self.add_asm_symbolic(format!(".cfi_def_cfa {}, {}", reg, offset));
}
fn add_cfi_offset(&mut self, reg: Reg, offset: i32) {
let reg = self.asm_reg_op(reg);
self.add_asm_symbolic(format!("\t.cfi_offset {}, {}", reg, offset));
self.add_asm_symbolic(format!(".cfi_offset {}, {}", reg, offset));
}
fn emit_frame_grow(&mut self) {
......@@ -2745,10 +2738,10 @@ pub fn emit_code(fv: &mut MuFunctionVersion, vm: &VM) {
Ok(file) => file
};
file.write("\t.arch armv8-a\n".as_bytes()).unwrap();
file.write(".arch armv8-a\n".as_bytes()).unwrap();
// constants in text section
file.write("\t.text\n".as_bytes()).unwrap();
file.write(".text\n".as_bytes()).unwrap();
write_const_min_align(&mut file);
......@@ -2784,7 +2777,7 @@ fn write_const_min_align(f: &mut File) {
#[cfg(target_os = "linux")]
fn write_align(f: &mut File, align: ByteSize) {
use std::io::Write;
f.write_fmt(format_args!("\t.balign {}\n", check_min_align(align))).unwrap();
f.write_fmt(format_args!(".balign {}\n", check_min_align(align))).unwrap();
}
fn write_const(f: &mut File, constant: P<Value>, loc: P<Value>) {
......@@ -2815,30 +2808,30 @@ fn write_const_value(f: &mut File, constant: P<Value>) {
&Constant::Int(val) => {
let len = ty.get_int_length().unwrap();
match len {
8 => f.write_fmt(format_args!("\t.byte {}\n", val as u8 )).unwrap(),
16 => f.write_fmt(format_args!("\t.word {}\n", val as u16)).unwrap(),
32 => f.write_fmt(format_args!("\t.long {}\n", val as u32)).unwrap(),
64 => f.write_fmt(format_args!("\t.quad {}\n", val as u64)).unwrap(),
8 => f.write_fmt(format_args!(".byte {}\n", val as u8 )).unwrap(),
16 => f.write_fmt(format_args!(".word {}\n", val as u16)).unwrap(),
32 => f.write_fmt(format_args!(".long {}\n", val as u32)).unwrap(),
64 => f.write_fmt(format_args!(".quad {}\n", val as u64)).unwrap(),
_ => panic!("unimplemented int length: {}", len)
}
}
&Constant::Float(val) => {
let bytes: [u8; 4] = unsafe {mem::transmute(val)};
f.write("\t.long ".as_bytes()).unwrap();
f.write(".long ".as_bytes()).unwrap();
f.write(&bytes).unwrap();
f.write("\n".as_bytes()).unwrap();
}
&Constant::Double(val) => {
let bytes: [u8; 8] = unsafe {mem::transmute(val)};
f.write("\t.quad ".as_bytes()).unwrap();
f.write(".quad ".as_bytes()).unwrap();
f.write(&bytes).unwrap();
f.write("\n".as_bytes()).unwrap();
}
&Constant::NullRef => {
f.write_fmt(format_args!("\t.quad 0\n")).unwrap()
f.write_fmt(format_args!(".quad 0\n")).unwrap()
}
&Constant::ExternSym(ref name) => {
f.write_fmt(format_args!("\t.quad {}\n", name)).unwrap()
f.write_fmt(format_args!(".quad {}\n", name)).unwrap()
}
&Constant::List(ref vals) => {
for val in vals {
......@@ -2871,10 +2864,10 @@ pub fn emit_context_with_reloc(vm: &VM,
};
// bss
file.write_fmt(format_args!("\t.bss\n")).unwrap();
file.write_fmt(format_args!(".bss\n")).unwrap();
// data
file.write("\t.data\n".as_bytes()).unwrap();
file.write(".data\n".as_bytes()).unwrap();
{
use runtime::mm;
......@@ -2943,20 +2936,20 @@ pub fn emit_context_with_reloc(vm: &VM,
let load_ref = unsafe {cur_addr.load::<Address>()};
if load_ref.is_zero() {
// write 0
file.write("\t.quad 0\n".as_bytes()).unwrap();
file.write(".quad 0\n".as_bytes()).unwrap();
} else {
let label = match relocatable_refs.get(&load_ref) {
Some(label) => label,
None => panic!("cannot find label for address {}, it is not dumped by GC (why GC didn't trace to it)", load_ref)
};
file.write_fmt(format_args!("\t.quad {}\n", label.clone())).unwrap();
file.write_fmt(format_args!(".quad {}\n", label.clone())).unwrap();
}
} else if fields.contains_key(&cur_addr) {
// write uptr (or other relocatable value) with label
let label = fields.get(&cur_addr).unwrap();
file.write_fmt(format_args!("\t.quad {}\n", label.clone())).unwrap();
file.write_fmt(format_args!(".quad {}\n", label.clone())).unwrap();
} else {
// write plain word (as bytes)
let next_word_addr = cur_addr.plus(POINTER_SIZE);
......@@ -3002,7 +2995,7 @@ fn write_data_bytes(f: &mut File, from: Address, to: Address) {
use std::io::Write;
if from < to {
f.write("\t.byte ".as_bytes()).unwrap();
f.write(".byte ".as_bytes()).unwrap();
let mut cursor = from;
while cursor < to {
......@@ -3025,7 +3018,7 @@ fn directive_globl(name: String) -> String {
}
fn directive_comm(name: String, size: ByteSize, align: ByteSize) -> String {
format!("\t.comm {},{},{}", name, size, align)
format!(".comm {},{},{}", name, size, align)
}
use compiler::machine_code::CompiledFunction;
......
......@@ -23,8 +23,10 @@ pub trait CodeGenerator {
fn end_block(&mut self, block_name: MuName);
// add CFI info
fn add_cfi_sections(&mut self, arg: &str);
fn add_cfi_startproc(&mut self);
fn add_cfi_endproc(&mut self);
fn add_cfi_def_cfa(&mut self, reg: Reg, offset: i32);
fn add_cfi_def_cfa_register(&mut self, reg: Reg);
fn add_cfi_def_cfa_offset(&mut self, offset: i32);
fn add_cfi_offset(&mut self, reg: Reg, offset: i32);
......
#![allow(unused_variables)]
//#![allow(unused_variables)]
#![warn(unused_imports)]
#![warn(unreachable_code)]
#![warn(dead_code)]
#![allow(dead_code)]
use ast::ir::*;
use ast::ptr::*;
use ast::inst::*;
use ast::op;
use ast::op::*;
use ast::types;
use ast::types::*;
use vm::VM;
use runtime::mm;
......@@ -40,6 +40,7 @@ pub struct InstructionSelection {
current_callsite_id: usize,
current_frame: Option<Frame>,
current_block: Option<MuName>,
current_block_in_ir: Option<MuName>,
current_func_start: Option<ValueLocation>,
// key: block id, val: callsite that names the block as exception block
current_exn_callsites: HashMap<MuID, Vec<ValueLocation>>,
......@@ -62,6 +63,12 @@ impl <'a> InstructionSelection {
current_callsite_id: 0,
current_frame: None,
current_block: None,
current_block_in_ir: None, // it is possible the block is newly created in instruction selection
// but sometimes we want to know its control flow
// so we need to track what block it is from the IR
// FIXME: ideally we should not create new blocks in instruction selection
// see Issue #6
current_func_start: None,
// key: block id, val: callsite that names the block as exception block
current_exn_callsites: HashMap::new(),
......@@ -81,6 +88,7 @@ impl <'a> InstructionSelection {
// in this pass, we assume that
// * we do not need to backup/restore caller-saved registers
// if any of these assumption breaks, we will need to re-emit the code
//#[allow(unused_variables)]
fn instruction_select(&mut self, node: &'a TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
trace!("instsel on node#{} {}", node.id(), node);
......@@ -91,14 +99,17 @@ impl <'a> InstructionSelection {
Instruction_::Branch2 { cond, ref true_dest, ref false_dest, true_prob } => {
trace!("instsel on BRANCH2");
let (fallthrough_dest, branch_dest, branch_if_true) = {
if true_prob > 0.5f32 {
let cur_block = f_content.get_block_by_name(self.current_block_in_ir.as_ref().unwrap().clone());
let next_block_in_trace = cur_block.control_flow.get_hottest_succ().unwrap();
if next_block_in_trace == true_dest.target {
(true_dest, false_dest, false)
} else {
(false_dest, true_dest, true)
}
};
let ops = inst.ops.read().unwrap();
let ref ops = inst.ops;
self.process_dest(&ops, fallthrough_dest, f_content, f_context, vm);
self.process_dest(&ops, branch_dest, f_content, f_context, vm);
......@@ -109,22 +120,41 @@ impl <'a> InstructionSelection {
if self.match_cmp_res(cond) {
trace!("emit cmp_res-branch2");
let mut cmpop = self.emit_cmp_res(cond, f_content, f_context, vm);
if !branch_if_true {
cmpop = cmpop.invert();
}
let cond = get_condition_codes(cmpop);
// Emit a CBNZ for 128-bit comparisons that are not symmetric
let use_cbnz = self.is_int128_asym_cmp(cond);
let tmp_cond =
if use_cbnz { Some(make_temporary(f_context, UINT1_TYPE.clone(), vm)) }
else { None };
let cond_box =
if use_cbnz { Some(Box::new(tmp_cond.as_ref().unwrap().clone())) }
else { None };
let mut cmpop = self.emit_cmp_res(cond, cond_box, f_content, f_context, vm);
if use_cbnz {
if !branch_if_true {
self.backend.emit_cbz(tmp_cond.as_ref().unwrap(), branch_target);
} else {
self.backend.emit_cbnz(tmp_cond.as_ref().unwrap(), branch_target);
}
if cmpop == op::CmpOp::FFALSE {
; // Do nothing
} else if cmpop == op::CmpOp::FTRUE {
self.backend.emit_b(branch_target);
} else {
self.backend.emit_b_cond(cond[0], branch_target.clone());
if !branch_if_true {
cmpop = cmpop.invert();
}
if cond.len() == 2 {
self.backend.emit_b_cond(cond[1], branch_target);
let cond = get_condition_codes(cmpop);
if cmpop == op::CmpOp::FFALSE {
; // Do nothing
} else if cmpop == op::CmpOp::FTRUE {
self.backend.emit_b(branch_target);
} else {
self.backend.emit_b_cond(cond[0], branch_target.clone());
if cond.len() == 2 {
self.backend.emit_b_cond(cond[1], branch_target);
}
}
}
} else {
......@@ -136,13 +166,22 @@ impl <'a> InstructionSelection {
self.backend.emit_tbz(&cond_reg, 0, branch_target.clone());
}
};
// it is possible that the fallthrough block is scheduled somewhere else
// we need to explicitly jump to it
self.finish_block(&vec![]);
let fallthrough_temp_block = format!("{}_{}_branch_fallthrough", self.current_fv_id, node.id());
self.start_block(fallthrough_temp_block, &vec![]);
let fallthrough_target = f_content.get_block(fallthrough_dest.target).name().unwrap();
self.backend.emit_b(fallthrough_target);
},
Instruction_::Select { cond, true_val, false_val } => {
use ast::op::CmpOp::*;
trace!("instsel on SELECT");
let ops = inst.ops.read().unwrap();
let ref ops = inst.ops;
let ref cond = ops[cond];
let ref true_val = ops[true_val];
......@@ -153,7 +192,7 @@ impl <'a> InstructionSelection {
// moving integers/pointers
// generate compare
let cmpop = if self.match_cmp_res(cond) {
self.emit_cmp_res(cond, f_content, f_context, vm)
self.emit_cmp_res(cond, None, f_content, f_context, vm)
} else if self.match_ireg(cond) {
let tmp_cond = self.emit_ireg(cond, f_content, f_context, vm);
self.backend.emit_cmp_imm(&tmp_cond, 0, false);
......@@ -192,7 +231,7 @@ impl <'a> InstructionSelection {
}
}
} else {
// moving vectors, floatingpoints
// moving vectors
unimplemented!()
}
},
......@@ -201,7 +240,7 @@ impl <'a> InstructionSelection {
use ast::op::CmpOp::*;
trace!("instsel on CMPOP");
let ops = inst.ops.read().unwrap();
let ref ops = inst.ops;
let ref op1 = ops[op1];
let ref op2 = ops[op2];
......@@ -210,27 +249,30 @@ impl <'a> InstructionSelection {
debug_assert!(tmp_res.ty.get_int_length().is_some());
debug_assert!(tmp_res.ty.get_int_length().unwrap() == 1);
let cmpop = self.emit_cmp_res_op(op, &op1, &op2, f_content, f_context, vm);
let cmpop = self.emit_cmp_res_op(op, Some(Box::new(tmp_res.clone())), &op1, &op2, f_content, f_context, vm);
let cond = get_condition_codes(cmpop);
if cmpop == FFALSE {
emit_mov_u64(self.backend.as_mut(), &tmp_res, 0);
} else if cmpop == FTRUE {
emit_mov_u64(self.backend.as_mut(), &tmp_res, 1);
} else {
self.backend.emit_cset(&tmp_res, cond[0]);
// emit_cmp_res_op will set tmp_res for 128-bit assymettric comparisons
if !self.is_int128_asym_cmp(node) {
if cmpop == FFALSE {
emit_mov_u64(self.backend.as_mut(), &tmp_res, 0);
} else if cmpop == FTRUE {
emit_mov_u64(self.backend.as_mut(), &tmp_res, 1);
} else {
self.backend.emit_cset(&tmp_res, cond[0]);
// Note: some compariosns can't be computed based on a single aarch64 flag
// insted they are computed as a condition OR NOT another condition.
if cond.len() == 2 {
self.backend.emit_csinc(&tmp_res, &tmp_res, &WZR, invert_condition_code(cond[1]));
// Note: some compariosns can't be computed based on a single aarch64 flag
// insted they are computed as a condition OR NOT another condition.
if cond.len() == 2 {
self.backend.emit_csinc(&tmp_res, &tmp_res, &WZR, invert_condition_code(cond[1]));
}
}
}
}
Instruction_::Branch1(ref dest) => {
trace!("instsel on BRANCH1");
let ops = inst.ops.read().unwrap();
let ref ops = inst.ops;
self.process_dest(&ops, dest, f_content, f_context, vm);
......@@ -243,13 +285,13 @@ impl <'a> InstructionSelection {
Instruction_::Switch { cond, ref default, ref branches } => {
trace!("instsel on SWITCH");
let ops = inst.ops.read().unwrap();
let ref ops = inst.ops;
let ref cond = ops[cond];
if self.match_ireg(cond) {
let tmp_cond = self.emit_ireg(cond, f_content, f_context, vm);
self.emit_zext(&tmp_cond);
emit_zext(self.backend.as_mut(), &tmp_cond);
// emit each branch
for &(case_op_index, ref case_dest) in branches {
......@@ -275,7 +317,7 @@ impl <'a> InstructionSelection {
self.backend.emit_cmp_imm(&tmp_cond, imm_op2 as u16, imm_shift);
} else {
let tmp_case_op = self.emit_ireg(case_op, f_content, f_context, vm);
self.emit_zext(&tmp_case_op);
emit_zext(self.backend.as_mut(), &tmp_case_op);
self.backend.emit_cmp(&tmp_cond, &tmp_case_op);
}
......@@ -341,7 +383,7 @@ impl <'a> InstructionSelection {
trace!("instsel on RETURN");
// prepare return regs
let ref ops = inst.ops.read().unwrap();
let ref ops = inst.ops;
// TODO: Are vals in the same order as the return types in the functions signature?
let ret_tys = vals.iter().map(|i| node_type(&ops[*i])).collect();
......@@ -352,15 +394,25 @@ impl <'a> InstructionSelection {
if self.compute_return_allocation(&ret_type, &vm) > 0 {
// Load the saved value of XR into temp_xr
self.emit_load_base_offset(&temp_xr, &FP, -8, f_context, vm);
emit_load_base_offset(self.backend.as_mut(), &temp_xr, &FP, -8, f_context, vm);
}
let n = ret_tys.len(); // number of return values
if n == 0 {