GitLab will be partched to the latest stable version on 15 July 2020 at 2.00pm (AEDT) to 2.30pm (AEDT) due to Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to us at N110 (b) CSIT building.

Commit 1b8bd2ad authored by qinsoon's avatar qinsoon

[wip] simple_spill fails nondeterministic

parent 1c55b5dd
......@@ -1073,19 +1073,20 @@ fn op_postfix(op_len: usize) -> &'static str {
}
}
// cmp implementation
macro_rules! emit_cmp_r_r {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
// general instruction emission
macro_rules! binop_no_def_r_r {
($cg: expr, $inst: expr, $op1: expr, $op2: expr, $op_len: expr) => {
{
// with postfix
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {} {}", inst, $op1, $op2);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(op2, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = $cg.prepare_reg($op1, inst.len() + 1);
let (reg2, id2, loc2) = $cg.prepare_reg($op2, inst.len() + 1 + reg1.len() + 1);
let asm = format!("cmp{} {},{}", op_postfix($op_len), reg1, reg2);
let asm = format!("{} {},{}", inst, reg1, reg2);
self.add_asm_inst(
$cg.add_asm_inst(
asm,
hashmap!{},
hashmap!{
......@@ -1093,21 +1094,22 @@ macro_rules! emit_cmp_r_r {
id2 => vec![loc2]
},
false
);
)
}
}
}
macro_rules! emit_cmp_r_imm {
($func_name: ident, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, op1: &P<Value>, op2: $imm_ty) {
trace!("emit: cmp {} {}", op1, op2);
macro_rules! binop_no_def_r_imm {
($cg: expr, $inst: expr, $op1: expr, $op2_imm: expr, $op_len: expr) => {
{
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {} {}", inst, $op1, $op2_imm);
let (reg1, id1, loc1) = self.prepare_reg(op1, 4 + 1 + 1 + op2.to_string().len() + 1);
let (reg1, id1, loc1) = $cg.prepare_reg($op1, inst.len() + 1 + 1 + $op2_imm.to_string().len() + 1);
let asm = format!("cmp{} ${},{}", op_postfix($op_len), op2, reg1);
let asm = format!("cmp{} ${},{}", op_postfix($op_len), $op2_imm, reg1);
self.add_asm_inst(
$cg.add_asm_inst(
asm,
hashmap!{},
hashmap!{
......@@ -1115,19 +1117,21 @@ macro_rules! emit_cmp_r_imm {
},
false
)
}
}
}
macro_rules! emit_cmp_r_mem {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
trace!("emit: cmp {} {}", op1, op2);
macro_rules! binop_no_def_r_mem {
($cg: expr, $inst:expr, $op1: expr, $op2_mem: expr, $op_len: expr) => {
{
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {} {}", inst, $op1, $op2_mem);
let (reg, id1, loc1) = self.prepare_reg(op1, 4 + 1);
let (mem, mut uses) = self.prepare_mem(op2, 4 + 1 + reg.len() + 1);
let (reg, id1, loc1) = $cg.prepare_reg($op1, inst.len() + 1);
let (mem, mut uses) = $cg.prepare_mem($op2_mem, inst.len() + 1 + reg.len() + 1);
let asm = format!("cmp{} {},{}", op_postfix($op_len), reg, mem);
let asm = format!("{} {},{}", inst, reg, mem);
// merge use vec
if uses.contains_key(&id1) {
......@@ -1136,7 +1140,7 @@ macro_rules! emit_cmp_r_mem {
uses.insert(id1, vec![loc1]);
}
self.add_asm_inst(
$cg.add_asm_inst(
asm,
hashmap!{},
uses,
......@@ -1146,6 +1150,58 @@ macro_rules! emit_cmp_r_mem {
}
}
macro_rules! binop_def_r_r {
($cg: expr, $inst: expr, $dest: expr, $src: expr, $op_len: expr) => {
{
let inst = $inst.to_string() + &op_postfix($op_len);
trace!("emit: {} {}, {} -> {}", inst, $src, $dest, $dest);
let (reg1, id1, loc1) = $cg.prepare_reg($src, inst.len() + 1);
let (reg2, id2, loc2) = $cg.prepare_reg($dest, inst.len() + 1 + reg1.len() + 1);
let asm = format!("{} {},{}", inst, reg1, reg2);
$cg.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
},
false
)
}
}
}
// cmp implementation
macro_rules! emit_cmp_r_r {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
binop_no_def_r_r!(self, "cmp", op1, op2, $op_len)
}
}
}
macro_rules! emit_cmp_r_imm {
($func_name: ident, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, op1: &P<Value>, op2: $imm_ty) {
binop_no_def_r_imm!(self, "cmp", op1, op2, $op_len)
}
}
}
macro_rules! emit_cmp_r_mem {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, op1: &P<Value>, op2: &P<Value>) {
binop_no_def_r_mem!(self, "cmp", op1, op2, $op_len)
}
}
}
macro_rules! emit_mov_r_r {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
......@@ -1287,21 +1343,27 @@ macro_rules! emit_lea_r {
macro_rules! emit_and_r_r {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
binop_def_r_r!(self, "and", dest, src, $op_len)
}
}
}
macro_rules! emit_and_r_imm {
($func_name: ident, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, dest: &P<Value>, src: $imm_ty) {
trace!("emit: and {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("and{} {},{}", op_postfix($op_len), reg1, reg2);
let asm = format!("and{} ${},{}", op_postfix($op_len), src, reg1);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
id1 => vec![loc1.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
id1 => vec![loc1]
},
false
)
......@@ -1309,22 +1371,52 @@ macro_rules! emit_and_r_r {
}
}
macro_rules! emit_and_r_imm {
($func_name: ident, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, dest: &P<Value>, src: $imm_ty) {
trace!("emit: and {}, {} -> {}", src, dest, dest);
macro_rules! emit_and_r_mem {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit and {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let (mem, mut uses) = self.prepare_mem(src, 4 + 1);
let (reg, id1, loc1) = self.prepare_reg(dest, 4 + 1 + mem.len() + 1);
let asm = format!("and{} ${},{}", op_postfix($op_len), src, reg1);
if uses.contains_key(&id1) {
uses.get_mut(&id1).unwrap().push(loc1.clone());
} else {
uses.insert(id1, vec![loc1.clone()]);
}
let asm = format!("and{} {},{}", op_postfix($op_len), mem, reg);
self.add_asm_inst(
asm,
hashmap!{
id1 => vec![loc1.clone()]
id1 => vec![loc1]
},
uses,
true
)
}
}
}
macro_rules! emit_xor_r_r {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: xor {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("xor{} {},{}", op_postfix($op_len), reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1]
id1 => vec![loc1.clone()],
id2 => vec![loc2.clone()]
},
false
)
......@@ -1332,10 +1424,10 @@ macro_rules! emit_and_r_imm {
}
}
macro_rules! emit_and_r_mem {
macro_rules! emit_xor_r_mem {
($func_name: ident, $op_len: expr) => {
fn $func_name (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit and {}, {} -> {}", src, dest, dest);
trace!("emit: xor {}, {} -> {}", src, dest, dest);
let (mem, mut uses) = self.prepare_mem(src, 4 + 1);
let (reg, id1, loc1) = self.prepare_reg(dest, 4 + 1 + mem.len() + 1);
......@@ -1346,7 +1438,7 @@ macro_rules! emit_and_r_mem {
uses.insert(id1, vec![loc1.clone()]);
}
let asm = format!("and{} {},{}", op_postfix($op_len), mem, reg);
let asm = format!("xor{} {},{}", op_postfix($op_len), mem, reg);
self.add_asm_inst(
asm,
......@@ -1360,6 +1452,29 @@ macro_rules! emit_and_r_mem {
}
}
macro_rules! emit_xor_r_imm {
($func_name: ident, $op_len: expr, $imm_ty: ty) => {
fn $func_name (&mut self, dest: &P<Value>, src: $imm_ty) {
trace!("emit: xor {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("xor{} ${},{}", op_postfix($op_len), src, reg1);
self.add_asm_inst(
asm,
hashmap!{
id1 => vec![loc1.clone()]
},
hashmap!{
id1 => vec![loc1]
},
false
)
}
}
}
impl CodeGenerator for ASMCodeGen {
fn start_code(&mut self, func_name: MuName) -> ValueLocation {
self.cur = Some(Box::new(ASMCode {
......@@ -1581,50 +1696,21 @@ impl CodeGenerator for ASMCodeGen {
emit_and_r_mem!(emit_and_r16_mem16, 16);
emit_and_r_mem!(emit_and_r8_mem8 , 8 );
fn emit_xor_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: xor {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("xorq {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1.clone()],
id2 => vec![loc2.clone()]
},
false
)
}
fn emit_xor_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: xor {}, {} -> {}", src, dest, dest);
unimplemented!()
}
fn emit_xor_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: xor {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("xorq ${},{}", src, reg1);
self.add_asm_inst(
asm,
hashmap!{
id1 => vec![loc1.clone()]
},
hashmap!{
id1 => vec![loc1]
},
false
)
}
// xor
emit_xor_r_r!(emit_xor_r64_r64, 64);
emit_xor_r_r!(emit_xor_r32_r32, 32);
emit_xor_r_r!(emit_xor_r16_r16, 16);
emit_xor_r_r!(emit_xor_r8_r8 , 8 );
emit_xor_r_mem!(emit_xor_r64_mem64, 64);
emit_xor_r_mem!(emit_xor_r32_mem32, 32);
emit_xor_r_mem!(emit_xor_r16_mem16, 16);
emit_xor_r_mem!(emit_xor_r8_mem8 , 8 );
emit_xor_r_imm!(emit_xor_r64_imm32, 64, i32);
emit_xor_r_imm!(emit_xor_r32_imm32, 32, i32);
emit_xor_r_imm!(emit_xor_r16_imm16, 16, i16);
emit_xor_r_imm!(emit_xor_r8_imm8 , 8 , i8 );
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
......
......@@ -4,6 +4,9 @@ use runtime::ValueLocation;
use compiler::machine_code::MachineCode;
pub type Reg<'a> = &'a P<Value>;
pub type Mem<'a> = &'a P<Value>;
pub trait CodeGenerator {
fn start_code(&mut self, func_name: MuName) -> ValueLocation;
fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode + Sync + Send>, ValueLocation);
......@@ -23,75 +26,101 @@ pub trait CodeGenerator {
fn emit_nop(&mut self, bytes: usize);
// comparison
fn emit_cmp_r64_r64 (&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r64_r64 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r64_imm32(&mut self, op1: Reg, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: Reg, op2: Mem);
fn emit_cmp_r32_r32 (&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r32_imm32(&mut self, op1: &P<Value>, op2: i32);
fn emit_cmp_r32_mem32(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r32_r32 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r32_imm32(&mut self, op1: Reg, op2: i32);
fn emit_cmp_r32_mem32(&mut self, op1: Reg, op2: Mem);
fn emit_cmp_r16_r16 (&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r16_imm16(&mut self, op1: &P<Value>, op2: i16);
fn emit_cmp_r16_mem16(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r16_r16 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r16_imm16(&mut self, op1: Reg, op2: i16);
fn emit_cmp_r16_mem16(&mut self, op1: Reg, op2: Mem);
fn emit_cmp_r8_r8 (&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r8_imm8 (&mut self, op1: &P<Value>, op2: i8);
fn emit_cmp_r8_mem8 (&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_cmp_r8_r8 (&mut self, op1: Reg, op2: Reg);
fn emit_cmp_r8_imm8 (&mut self, op1: Reg, op2: i8);
fn emit_cmp_r8_mem8 (&mut self, op1: Reg, op2: Mem);
// gpr move
fn emit_mov_r64_imm32 (&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64 (&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64 (&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r32_imm32 (&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r32_mem32 (&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r32_r32 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem32_r32 (&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem32_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_imm32 (&mut self, dest: Reg, src: i32);
fn emit_mov_r64_mem64 (&mut self, dest: Reg, src: Mem); // load
fn emit_mov_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_mov_mem64_r64 (&mut self, dest: Mem, src: Reg); // store
fn emit_mov_mem64_imm32(&mut self, dest: Mem, src: i32);
fn emit_mov_r32_imm32 (&mut self, dest: Reg, src: i32);
fn emit_mov_r32_mem32 (&mut self, dest: Reg, src: Mem); // load
fn emit_mov_r32_r32 (&mut self, dest: Reg, src: Reg);
fn emit_mov_mem32_r32 (&mut self, dest: Mem, src: Reg); // store
fn emit_mov_mem32_imm32(&mut self, dest: Mem, src: i32);
fn emit_mov_r16_imm16 (&mut self, dest: &P<Value>, src: i16);
fn emit_mov_r16_mem16 (&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r16_r16 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem16_r16 (&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem16_imm16(&mut self, dest: &P<Value>, src: i16);
fn emit_mov_r8_imm8 (&mut self, dest: &P<Value>, src: i8);
fn emit_mov_r8_mem8 (&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r8_r8 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem8_r8 (&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem8_imm8 (&mut self, dest: &P<Value>, src: i8);
fn emit_mov_r16_imm16 (&mut self, dest: Reg, src: i16);
fn emit_mov_r16_mem16 (&mut self, dest: Reg, src: Mem); // load
fn emit_mov_r16_r16 (&mut self, dest: Reg, src: Reg);
fn emit_mov_mem16_r16 (&mut self, dest: Mem, src: Reg); // store
fn emit_mov_mem16_imm16(&mut self, dest: Mem, src: i16);
fn emit_mov_r8_imm8 (&mut self, dest: Reg, src: i8);
fn emit_mov_r8_mem8 (&mut self, dest: Reg, src: Mem); // load
fn emit_mov_r8_r8 (&mut self, dest: Reg, src: Mem);
fn emit_mov_mem8_r8 (&mut self, dest: Mem, src: Reg); // store
fn emit_mov_mem8_imm8 (&mut self, dest: Mem, src: i8);
// lea
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_lea_r64(&mut self, dest: Reg, src: Reg);
// and
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_and_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_and_r64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_and_r32_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r32_r32 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r32_mem32(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r32_imm32(&mut self, dest: Reg, src: i32);
fn emit_and_r32_r32 (&mut self, dest: Reg, src: Reg);
fn emit_and_r32_mem32(&mut self, dest: Reg, src: Mem);
fn emit_and_r16_imm16(&mut self, dest: &P<Value>, src: i16);
fn emit_and_r16_r16 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r16_mem16(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r16_imm16(&mut self, dest: Reg, src: i16);
fn emit_and_r16_r16 (&mut self, dest: Reg, src: Reg);
fn emit_and_r16_mem16(&mut self, dest: Reg, src: Mem);
fn emit_and_r8_imm8 (&mut self, dest: &P<Value>, src: i8);
fn emit_and_r8_r8 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r8_mem8 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r8_imm8 (&mut self, dest: Reg, src: i8);
fn emit_and_r8_r8 (&mut self, dest: Reg, src: Reg);
fn emit_and_r8_mem8 (&mut self, dest: Reg, src: Mem);
fn emit_xor_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_xor_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_xor_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_add_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32);
// xor
fn emit_xor_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_xor_r64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_xor_r64_imm32(&mut self, dest: Reg, src: i32);
fn emit_xor_r32_r32 (&mut self, dest: Reg, src: Reg);
fn emit_xor_r32_mem32(&mut self, dest: Reg, src: Mem);
fn emit_xor_r32_imm32(&mut self, dest: Reg, src: i32);
fn emit_xor_r16_r16 (&mut self, dest: Reg, src: Reg);
fn emit_xor_r16_mem16(&mut self, dest: Reg, src: Reg);
fn emit_xor_r16_imm16(&mut self, dest: Reg, src: i16);
fn emit_xor_r8_r8 (&mut self, dest: Reg, src: Reg);
fn emit_xor_r8_mem8 (&mut self, dest: Reg, src: Reg);
fn emit_xor_r8_imm8 (&mut self, dest: Reg, src: i8);
// and
fn emit_add_r64_r64 (&mut self, dest: Reg, src: Reg);
fn emit_add_r64_mem64(&mut self, dest: Reg, src: Mem);
fn emit_add_r64_imm32(&mut self, dest: Reg, src: i32);
//
// fn emit_add_r32_r32 (&mut self, dest: Reg, src: Reg);
// fn emit_add_r32_mem32(&mut self, dest: Reg, src: Mem);
// fn emit_add_r32_imm32(&mut self, dest: Reg, src: i32);
//
// fn emit_add_r16_r16 (&mut self, dest: Reg, src: Reg);
// fn emit_add_r16_mem16(&mut self, dest: Reg, src: Mem);
// fn emit_add_r16_imm16(&mut self, dest: Reg, src: i16);
//
// fn emit_add_r8_r8 (&mut self, dest: Reg, src: Reg);
// fn emit_add_r8_mem8(&mut self, dest: Reg, src: Mem);
// fn emit_add_r8_imm8(&mut self, dest: Reg, src: i8);
fn emit_addsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_addsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment