To protect your data, the CISO officer has suggested users to enable GitLab 2FA as soon as possible.

Commit 7e244053 authored by qinsoon's avatar qinsoon
Browse files

branch2 test works fine. milsum runs.

parent 5c3c66af
**/target/*
**/__pycache__/*
**/.cache/*
emit/*
**/emit/*
**/temp/*
Cargo.lock
*.log
......
......@@ -195,8 +195,10 @@ pub struct FunctionContent {
impl fmt::Debug for FunctionContent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let entry = self.get_entry_block();
write!(f, "Entry block: ").unwrap();
write!(f, "{:?}\n", entry).unwrap();
write!(f, "Body:").unwrap();
for blk_id in self.blocks.keys() {
let block = self.get_block(*blk_id);
write!(f, "{:?}\n", block).unwrap();
......
......@@ -173,6 +173,26 @@ pub enum CmpOp {
FUNO
}
impl CmpOp {
pub fn swap_operands(self) -> CmpOp {
use op::CmpOp::*;
match self {
EQ => EQ,
NE => NE,
SGE => SLT,
SGT => SLE,
SLE => SGT,
SLT => SGE,
UGE => ULT,
UGT => ULE,
ULE => UGT,
ULT => UGE,
_ => unimplemented!()
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub enum ConvOp {
TRUNC,
......
......@@ -1099,21 +1099,21 @@ macro_rules! binop_no_def_r_r {
}
}
macro_rules! binop_no_def_r_imm {
macro_rules! binop_no_def_imm_r {
($func_name: ident, $inst: expr, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, op1: &P<Value>, op2: $imm_ty) {
fn $func_name (&mut self, op1: $imm_ty, op2: &P<Value>) {
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {} {}", inst, op1, op2);
let (reg1, id1, loc1) = self.prepare_reg(op1, inst.len() + 1 + 1 + op2.to_string().len() + 1);
let (reg2, id2, loc2) = self.prepare_reg(op2, inst.len() + 1 + 1 + op1.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, op2, reg1);
let asm = format!("{} ${},{}", inst, op1, reg2);
self.add_asm_inst(
asm,
hashmap!{},
hashmap!{
id1 => vec![loc1]
id2 => vec![loc2]
},
false
)
......@@ -1122,16 +1122,16 @@ macro_rules! binop_no_def_r_imm {
}
}
macro_rules! binop_no_def_r_mem {
macro_rules! binop_no_def_mem_r {
($func_name: ident, $inst:expr, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {} {}", inst, op1, op2);
let (reg, id1, loc1) = self.prepare_reg(op1, inst.len() + 1);
let (mem, mut uses) = self.prepare_mem(op2, inst.len() + 1 + reg.len() + 1);
let (mem, mut uses) = self.prepare_mem(op1, inst.len() + 1);
let (reg, id1, loc1) = self.prepare_reg(op2, inst.len() + 1 + mem.len() + 1);
let asm = format!("{} {},{}", inst, reg, mem);
let asm = format!("{} {},{}", inst, mem, reg);
// merge use vec
if uses.contains_key(&id1) {
......@@ -1539,15 +1539,15 @@ impl CodeGenerator for ASMCodeGen {
binop_no_def_r_r!(emit_cmp_r16_r16, "cmp", 16);
binop_no_def_r_r!(emit_cmp_r8_r8 , "cmp", 8 );
binop_no_def_r_imm!(emit_cmp_r64_imm32, "cmp", 64, i32);
binop_no_def_r_imm!(emit_cmp_r32_imm32, "cmp", 32, i32);
binop_no_def_r_imm!(emit_cmp_r16_imm16, "cmp", 16, i16);
binop_no_def_r_imm!(emit_cmp_r8_imm8 , "cmp", 8 , i8 );
binop_no_def_imm_r!(emit_cmp_imm32_r64, "cmp", 64, i32);
binop_no_def_imm_r!(emit_cmp_imm32_r32, "cmp", 32, i32);
binop_no_def_imm_r!(emit_cmp_imm16_r16, "cmp", 16, i16);
binop_no_def_imm_r!(emit_cmp_imm8_r8 , "cmp", 8 , i8 );
binop_no_def_r_mem!(emit_cmp_r64_mem64, "cmp", 64);
binop_no_def_r_mem!(emit_cmp_r32_mem32, "cmp", 32);
binop_no_def_r_mem!(emit_cmp_r16_mem16, "cmp", 16);
binop_no_def_r_mem!(emit_cmp_r8_mem8 , "cmp", 8 );
binop_no_def_mem_r!(emit_cmp_mem64_r64, "cmp", 64);
binop_no_def_mem_r!(emit_cmp_mem32_r32, "cmp", 32);
binop_no_def_mem_r!(emit_cmp_mem16_r16, "cmp", 16);
binop_no_def_mem_r!(emit_cmp_mem8_r8 , "cmp", 8 );
// mov
......
......@@ -27,20 +27,20 @@ pub trait CodeGenerator {
// comparison
fn emit_cmp_r64_r64 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r64_imm32(&mut self, op1: Reg, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: Reg, op2: Mem);
fn emit_cmp_imm32_r64(&mut self, op1: i32, op2: Reg);
fn emit_cmp_mem64_r64(&mut self, op1: Mem, op2: Reg);
fn emit_cmp_r32_r32 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r32_imm32(&mut self, op1: Reg, op2: i32);
fn emit_cmp_r32_mem32(&mut self, op1: Reg, op2: Mem);
fn emit_cmp_imm32_r32(&mut self, op1: i32, op2: Reg);
fn emit_cmp_mem32_r32(&mut self, op1: Mem, op2: Reg);
fn emit_cmp_r16_r16 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r16_imm16(&mut self, op1: Reg, op2: i16);
fn emit_cmp_r16_mem16(&mut self, op1: Reg, op2: Mem);
fn emit_cmp_imm16_r16(&mut self, op1: i16, op2: Reg);
fn emit_cmp_mem16_r16(&mut self, op1: Mem, op2: Reg);
fn emit_cmp_r8_r8 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r8_imm8 (&mut self, op1: Reg, op2: i8);
fn emit_cmp_r8_mem8 (&mut self, op1: Reg, op2: Mem);
fn emit_cmp_imm8_r8(&mut self, op1: i8, op2: Reg);
fn emit_cmp_mem8_r8(&mut self, op1: Mem, op2: Reg);
// gpr move
......
......@@ -73,8 +73,8 @@ impl <'a> InstructionSelection {
TreeNode_::Instruction(ref inst) => {
match inst.v {
Instruction_::Branch2{cond, ref true_dest, ref false_dest, true_prob} => {
// move this to trace generation
// assert here
// 'branch_if_true' == true, we emit cjmp the same as CmpOp (je for EQ, jne for NE)
// 'branch_if_true' == false, we emit opposite cjmp as CmpOp (jne for EQ, je for NE)
let (fallthrough_dest, branch_dest, branch_if_true) = {
if true_prob > 0.5f32 {
(true_dest, false_dest, false)
......@@ -95,16 +95,76 @@ impl <'a> InstructionSelection {
if self.match_cmp_res(cond) {
trace!("emit cmp_eq-branch2");
match self.emit_cmp_res(cond, f_content, f_context, vm) {
op::CmpOp::EQ => self.backend.emit_je(branch_target),
op::CmpOp::NE => self.backend.emit_jne(branch_target),
op::CmpOp::UGE => self.backend.emit_jae(branch_target),
op::CmpOp::UGT => self.backend.emit_ja(branch_target),
op::CmpOp::ULE => self.backend.emit_jbe(branch_target),
op::CmpOp::ULT => self.backend.emit_jb(branch_target),
op::CmpOp::SGE => self.backend.emit_jge(branch_target),
op::CmpOp::SGT => self.backend.emit_jg(branch_target),
op::CmpOp::SLE => self.backend.emit_jle(branch_target),
op::CmpOp::SLT => self.backend.emit_jl(branch_target),
op::CmpOp::EQ => {
if branch_if_true {
self.backend.emit_je(branch_target);
} else {
self.backend.emit_jne(branch_target);
}
},
op::CmpOp::NE => {
if branch_if_true {
self.backend.emit_jne(branch_target);
} else {
self.backend.emit_je(branch_target);
}
},
op::CmpOp::UGE => {
if branch_if_true {
self.backend.emit_jae(branch_target);
} else {
self.backend.emit_jb(branch_target);
}
},
op::CmpOp::UGT => {
if branch_if_true {
self.backend.emit_ja(branch_target);
} else {
self.backend.emit_jbe(branch_target);
}
},
op::CmpOp::ULE => {
if branch_if_true {
self.backend.emit_jbe(branch_target);
} else {
self.backend.emit_ja(branch_target);
}
},
op::CmpOp::ULT => {
if branch_if_true {
self.backend.emit_jb(branch_target);
} else {
self.backend.emit_jae(branch_target);
}
},
op::CmpOp::SGE => {
if branch_if_true {
self.backend.emit_jge(branch_target);
} else {
self.backend.emit_jl(branch_target);
}
},
op::CmpOp::SGT => {
if branch_if_true {
self.backend.emit_jg(branch_target);
} else {
self.backend.emit_jle(branch_target);
}
},
op::CmpOp::SLE => {
if branch_if_true {
self.backend.emit_jle(branch_target);
} else {
self.backend.emit_jg(branch_target);
}
},
op::CmpOp::SLT => {
if branch_if_true {
self.backend.emit_jl(branch_target);
} else {
self.backend.emit_jge(branch_target);
}
},
_ => unimplemented!()
}
} else if self.match_ireg(cond) {
......@@ -113,7 +173,7 @@ impl <'a> InstructionSelection {
let cond_reg = self.emit_ireg(cond, f_content, f_context, vm);
// emit: cmp cond_reg 1
self.backend.emit_cmp_r64_imm32(&cond_reg, 1);
self.backend.emit_cmp_imm32_r64(1, &cond_reg);
// emit: je #branch_dest
self.backend.emit_je(branch_target);
} else {
......@@ -745,12 +805,12 @@ impl <'a> InstructionSelection {
// ASM: cmp %end, [%tl + allocator_offset + limit_offset]
let limit_offset = *thread::ALLOCATOR_OFFSET + *mm::ALLOCATOR_LIMIT_OFFSET;
let mem_limit = self.make_memory_op_base_offset(&tmp_tl, limit_offset as i32, ADDRESS_TYPE.clone(), vm);
self.backend.emit_cmp_r64_mem64(&tmp_end, &mem_limit);
self.backend.emit_cmp_mem64_r64(&mem_limit, &tmp_end);
// branch to slow path if end > limit
// ASM: jl alloc_slow
// branch to slow path if end > limit (end - limit > 0)
// ASM: jg alloc_slow
let slowpath = format!("{}_allocslow", node.id());
self.backend.emit_jl(slowpath.clone());
self.backend.emit_jg(slowpath.clone());
// update cursor
// ASM: mov %end -> [%tl + allocator_offset + cursor_offset]
......@@ -1503,24 +1563,29 @@ impl <'a> InstructionSelection {
let op2 = &ops[op2];
if op::is_int_cmp(op) {
if self.match_ireg(op1) && self.match_ireg(op2) {
if self.match_ireg(op1) && self.match_iimm(op2) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
let iimm_op2 = self.node_iimm_to_i32(op2);
// we adopt at&t syntax
// so CMP op1 op2
// is actually CMP op2 op1 (in machine code)
self.backend.emit_cmp_imm32_r64(iimm_op2, &reg_op1);
self.backend.emit_cmp_r64_r64(&reg_op1, &reg_op2);
} else if self.match_ireg(op1) && self.match_iimm(op2) {
return op;
} else if self.match_ireg(op1) && self.match_ireg(op2) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let iimm_op2 = self.node_iimm_to_i32(op2);
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_cmp_r64_r64(&reg_op2, &reg_op1);
self.backend.emit_cmp_r64_imm32(&reg_op1, iimm_op2);
return op;
} else {
unimplemented!()
}
} else {
unimplemented!()
}
op
}
_ => panic!("expect cmp res to emit")
......@@ -1798,12 +1863,12 @@ impl <'a> InstructionSelection {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
if self.match_ireg(src) {
let src_reg = self.emit_ireg(src, f_content, f_context, vm);
self.backend.emit_mov_r64_r64(dest, &src_reg);
} else if self.match_iimm(src) {
if self.match_iimm(src) {
let src_imm = self.node_iimm_to_i32(src);
self.backend.emit_mov_r64_imm32(dest, src_imm);
} else if self.match_ireg(src) {
let src_reg = self.emit_ireg(src, f_content, f_context, vm);
self.backend.emit_mov_r64_r64(dest, &src_reg);
} else {
panic!("expected an int type op");
}
......
......@@ -20,7 +20,9 @@ fn test_sum() {
let lib = testutil::compile_fnc("sum", &sum);
unsafe {
let sumptr: ll::Symbol<unsafe extern fn (u64) -> u64> = lib.get(b"sum").unwrap();
assert!(sumptr(5) == 10);
assert!(sumptr(10) == 45);
println!("sum(5) = {}", sumptr(5));
assert!(sumptr(5) == 15);
println!("sun(10) = {}", sumptr(10));
assert!(sumptr(10) == 55);
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment