WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit b12737c0 authored by qinsoon's avatar qinsoon
Browse files

use setbyte to implement comparison as value, added a few tests

parent 44a861c4
......@@ -1175,6 +1175,23 @@ impl ASMCodeGen {
self.cur.take().unwrap()
}
fn internal_uniop_def_r(&mut self, inst: &str, op: &P<Value>) {
trace!("emit: {} {}", inst, op);
let (reg, id, loc) = self.prepare_reg(op, inst.len() + 1);
let asm = format!("{} {}", inst, reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
}
fn internal_binop_no_def_r_r(&mut self, inst: &str, op1: &P<Value>, op2: &P<Value>) {
let len = check_op_len(op1);
......@@ -1892,71 +1909,46 @@ impl CodeGenerator for ASMCodeGen {
// set byte
fn emit_sets_r8(&mut self, dest: Reg) {
trace!("emit: sets {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("sets {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
self.internal_uniop_def_r("sets", dest)
}
fn emit_setz_r8(&mut self, dest: Reg) {
trace!("emit: setz {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("setz {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
self.internal_uniop_def_r("setz", dest)
}
fn emit_seto_r8(&mut self, dest: Reg) {
trace!("emit: seto {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("seto {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
self.internal_uniop_def_r("seto", dest)
}
fn emit_setb_r8(&mut self, dest: Reg) {
trace!("emit: setb {}", dest);
let (reg, id, loc) = self.prepare_reg(dest, 4 + 1);
let asm = format!("setb {}", reg);
self.add_asm_inst(
asm,
linked_hashmap!{
id => vec![loc]
},
linked_hashmap!{},
false
)
self.internal_uniop_def_r("setb", dest)
}
fn emit_seta_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("seta", dest)
}
fn emit_setae_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setae", dest)
}
fn emit_setb_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setb", dest)
}
fn emit_setbe_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setbe", dest)
}
fn emit_sete_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("sete", dest)
}
fn emit_setg_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setg", dest)
}
fn emit_setge_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setge", dest)
}
fn emit_setl_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setl", dest)
}
fn emit_setle_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setle", dest)
}
fn emit_setne_r (&mut self, dest: Reg) {
self.internal_uniop_def_r("setne", dest)
}
// cmov src -> dest
......
......@@ -54,6 +54,17 @@ pub trait CodeGenerator {
fn emit_seto_r8 (&mut self, dest: Reg);
fn emit_setb_r8 (&mut self, dest: Reg);
fn emit_seta_r (&mut self, dest: Reg);
fn emit_setae_r (&mut self, dest: Reg);
fn emit_setb_r (&mut self, dest: Reg);
fn emit_setbe_r (&mut self, dest: Reg);
fn emit_sete_r (&mut self, dest: Reg);
fn emit_setg_r (&mut self, dest: Reg);
fn emit_setge_r (&mut self, dest: Reg);
fn emit_setl_r (&mut self, dest: Reg);
fn emit_setle_r (&mut self, dest: Reg);
fn emit_setne_r (&mut self, dest: Reg);
// gpr conditional move
fn emit_cmova_r_r (&mut self, dest: Reg, src: Reg);
......
......@@ -431,43 +431,32 @@ impl <'a> InstructionSelection {
let ref op2 = ops[op2];
let tmp_res = self.get_result_value(node);
debug_assert!(tmp_res.ty.get_int_length().is_some());
debug_assert!(tmp_res.ty.get_int_length().unwrap() == 1);
// cmov only take (16/32/64bits registers)
// so we use 64bits registers here and truncate later
// make res64, and set to zero
let tmp_res64 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_xor_r_r(&tmp_res64, &tmp_res64);
// set tmp1 as 1 (cmov doesnt allow immediate or reg8 as operand)
let tmp_1 = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r_imm(&tmp_1, 1);
// cmov 1 to result
// set byte to result
match self.emit_cmp_res(node, f_content, f_context, vm) {
EQ => self.backend.emit_cmove_r_r (&tmp_res64, &tmp_1),
NE => self.backend.emit_cmovne_r_r(&tmp_res64, &tmp_1),
SGE => self.backend.emit_cmovge_r_r(&tmp_res64, &tmp_1),
SGT => self.backend.emit_cmovg_r_r (&tmp_res64, &tmp_1),
SLE => self.backend.emit_cmovle_r_r(&tmp_res64, &tmp_1),
SLT => self.backend.emit_cmovl_r_r (&tmp_res64, &tmp_1),
UGE => self.backend.emit_cmovae_r_r(&tmp_res64, &tmp_1),
UGT => self.backend.emit_cmova_r_r (&tmp_res64, &tmp_1),
ULE => self.backend.emit_cmovbe_r_r(&tmp_res64, &tmp_1),
ULT => self.backend.emit_cmovb_r_r (&tmp_res64, &tmp_1),
FOEQ | FUEQ => self.backend.emit_cmove_r_r (&tmp_res64, &tmp_1),
FONE | FUNE => self.backend.emit_cmovne_r_r(&tmp_res64, &tmp_1),
FOGT | FUGT => self.backend.emit_cmova_r_r (&tmp_res64, &tmp_1),
FOGE | FUGE => self.backend.emit_cmovae_r_r(&tmp_res64, &tmp_1),
FOLT | FULT => self.backend.emit_cmovb_r_r (&tmp_res64, &tmp_1),
FOLE | FULE => self.backend.emit_cmovbe_r_r(&tmp_res64, &tmp_1),
EQ => self.backend.emit_sete_r (&tmp_res),
NE => self.backend.emit_setne_r(&tmp_res),
SGE => self.backend.emit_setge_r(&tmp_res),
SGT => self.backend.emit_setg_r (&tmp_res),
SLE => self.backend.emit_setle_r(&tmp_res),
SLT => self.backend.emit_setl_r (&tmp_res),
UGE => self.backend.emit_setae_r(&tmp_res),
UGT => self.backend.emit_seta_r (&tmp_res),
ULE => self.backend.emit_setbe_r(&tmp_res),
ULT => self.backend.emit_setb_r (&tmp_res),
FOEQ | FUEQ => self.backend.emit_sete_r (&tmp_res),
FONE | FUNE => self.backend.emit_setne_r(&tmp_res),
FOGT | FUGT => self.backend.emit_seta_r (&tmp_res),
FOGE | FUGE => self.backend.emit_setae_r(&tmp_res),
FOLT | FULT => self.backend.emit_setb_r (&tmp_res),
FOLE | FULE => self.backend.emit_setbe_r(&tmp_res),
_ => unimplemented!()
}
// truncate tmp_res64 to tmp_res (probably u8)
self.backend.emit_mov_r_r(&tmp_res, unsafe {&tmp_res64.as_type(UINT8_TYPE.clone())});
}
Instruction_::Branch1(ref dest) => {
......
......@@ -49,6 +49,8 @@ pub use self::sidemap::MINIMAL_ALIGNMENT;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::OBJECT_HEADER_SIZE;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::OBJECT_HEADER_OFFSET;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::REF_BITS_LEN;
#[cfg(feature = "use-sidemap")]
pub use self::sidemap::OBJ_START_BIT;
......
extern crate libloading;
use mu::ast::types::*;
use mu::ast::ir::*;
use mu::ast::inst::*;
use mu::ast::op::*;
use mu::vm::*;
use mu::compiler::*;
use mu::runtime::mm::objectmodel::OBJECT_HEADER_OFFSET;
use mu::utils::LinkedHashMap;
use std::sync::Arc;
use std::sync::RwLock;
use mu::testutil::aot;
use mu::testutil;
use test_compiler::test_call::gen_ccall_exit;
#[test]
fn test_get_field_iref1() {
let lib = testutil::compile_fnc("get_field_iref1", &get_field_iref1);
unsafe {
let get_field_iref1 : libloading::Symbol<unsafe extern fn(u64) -> u64> = lib.get(b"get_field_iref1").unwrap();
let addr = 0x10000000;
let res = get_field_iref1(addr);
println!("get_field_iref1({}) = {}", addr, res);
assert!(addr + 8 == res);
}
}
fn get_field_iref1() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) ref_int64 = mu_ref(int64));
typedef! ((vm) iref_int64 = mu_iref(int64));
typedef! ((vm) mystruct = mu_struct(int64, int64, ref_int64));
typedef! ((vm) ref_mystruct = mu_ref(mystruct));
typedef! ((vm) iref_mystruct = mu_iref(mystruct));
funcsig! ((vm) sig = (ref_mystruct) -> (iref_int64));
funcdecl! ((vm) <sig> get_field_iref1);
funcdef! ((vm) <sig> get_field_iref1 VERSION get_field_iref1_v1);
block! ((vm, get_field_iref1_v1) blk_entry);
ssa! ((vm, get_field_iref1_v1) <ref_mystruct> x);
ssa! ((vm, get_field_iref1_v1) <iref_mystruct> x_);
inst! ((vm, get_field_iref1_v1) blk_entry_get_iref:
x_ = GETIREF x
);
ssa! ((vm, get_field_iref1_v1) <iref_int64> ret);
inst! ((vm, get_field_iref1_v1) blk_entry_get_field_iref1:
ret = GETFIELDIREF x_ (is_ptr: false, index: 1)
);
inst! ((vm, get_field_iref1_v1) blk_entry_ret:
RET (ret)
);
define_block! ((vm, get_field_iref1_v1) blk_entry(x) {
blk_entry_get_iref, blk_entry_get_field_iref1, blk_entry_ret
});
define_func_ver!((vm) get_field_iref1_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_get_iref() {
let lib = testutil::compile_fnc("get_iref", &get_iref);
unsafe {
let get_iref : libloading::Symbol<unsafe extern fn(u64) -> u64> = lib.get(b"get_iref").unwrap();
let addr = 0x10000000;
let res = get_iref(addr);
println!("get_iref({}) = {}", addr, res);
assert!(addr == res);
}
}
fn get_iref() -> VM {
let vm = VM::new();
typedef! ((vm) int64 = mu_int(64));
typedef! ((vm) ref_int64 = mu_ref(int64));
typedef! ((vm) iref_int64 = mu_iref(int64));
funcsig! ((vm) sig = (ref_int64) -> (iref_int64));
funcdecl! ((vm) <sig> get_iref);
funcdef! ((vm) <sig> get_iref VERSION get_iref_v1);
block! ((vm, get_iref_v1) blk_entry);
ssa! ((vm, get_iref_v1) <ref_int64> x);
ssa! ((vm, get_iref_v1) <iref_int64> ret);
inst! ((vm, get_iref_v1) blk_entry_get_iref:
ret = GETIREF x
);
inst! ((vm, get_iref_v1) blk_entry_ret:
RET (ret)
);
define_block! ((vm, get_iref_v1) blk_entry(x) {
blk_entry_get_iref, blk_entry_ret
});
define_func_ver!((vm) get_iref_v1 (entry: blk_entry) {blk_entry});
vm
}
#[test]
fn test_struct() {
VM::start_logging_trace();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment