GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit 8f2ff707 authored by John Zhang's avatar John Zhang

Merge branch 'master' into jit-test

parents 4bfc9b22 d9758e2c
#!/bin/sh
RUST_BACKTRACE=1 RUST_TEST_THREADS=1 cargo test "$@"
RUSTFLAGS=-Zincremental=target/incr-cache RUST_BACKTRACE=1 RUST_TEST_THREADS=1 cargo test "$@"
......@@ -1376,27 +1376,70 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let (reg1, id1, loc1) = self.prepare_reg(src, 5 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 5 + 1 + reg1.len() + 1);
let asm = format!("addq {},{}", reg1, reg2);
let asm = format!("movsd {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
id2 => vec![loc2]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
id1 => vec![loc1]
},
false
)
}
// load
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (mem, uses) = self.prepare_mem(src, 5 + 1);
let (reg, id2, loc2) = self.prepare_reg(dest, 5 + 1 + mem.len() + 1);
let asm = format!("movsd {},{}", mem, reg);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2]
},
uses,
true
)
}
// store
fn emit_movsd_mem64_f64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: movsd {} -> {}", src, dest);
let (reg, id1, loc1) = self.prepare_reg(src, 5 + 1);
let (mem, mut uses) = self.prepare_mem(dest, 5 + 1 + reg.len() + 1);
// the register we used for the memory location is counted as 'use'
// use the vec from mem as 'use' (push use reg from src to it)
if uses.contains_key(&id1) {
uses.get_mut(&id1).unwrap().push(loc1);
} else {
uses.insert(id1, vec![loc1]);
}
let asm = format!("movsd {},{}", reg, mem);
self.add_asm_inst(
asm,
hashmap!{},
uses,
true
)
}
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: lea {} -> {}", src, dest);
......@@ -1455,6 +1498,27 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("addq {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
},
false
)
}
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
unimplemented!()
......@@ -1479,6 +1543,32 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_addsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: addsd {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 5 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 5 + 1 + reg1.len() + 1);
let asm = format!("addsd {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1],
id2 => vec![loc2]
},
false
)
}
fn emit_addsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: addsd {}, {} -> {}", dest, src, dest);
unimplemented!()
}
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: sub {}, {} -> {}", dest, src, dest);
......
......@@ -26,26 +26,33 @@ pub trait CodeGenerator {
fn emit_cmp_r64_imm32(&mut self, op1: &P<Value>, op2: i32);
fn emit_cmp_r64_mem64(&mut self, op1: &P<Value>, op2: &P<Value>);
fn emit_mov_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_r64_imm32 (&mut self, dest: &P<Value>, src: i32);
fn emit_mov_r64_mem64 (&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_mov_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_mov_mem64_r64 (&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_mov_mem64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_movsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_movsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>); // load
fn emit_movsd_mem64_f64(&mut self, dest: &P<Value>, src: &P<Value>); // store
fn emit_lea_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_and_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_addsd_f64_f64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_addsd_f64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_sub_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_mul_r64(&mut self, src: &P<Value>);
fn emit_mul_r64 (&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
fn emit_jmp(&mut self, dest: MuName);
......
......@@ -143,7 +143,9 @@ lazy_static!{
XMM1.clone()
];
pub static ref ARGUMENT_FPRs : [P<Value>; 6] = [
pub static ref ARGUMENT_FPRs : [P<Value>; 8] = [
XMM0.clone(),
XMM1.clone(),
XMM2.clone(),
XMM3.clone(),
XMM4.clone(),
......
......@@ -2,6 +2,8 @@ extern crate mu;
#[macro_use]
extern crate log;
extern crate simple_logger;
#[macro_use]
extern crate maplit;
mod test_ir;
mod test_compiler;
......
......@@ -6,3 +6,4 @@ mod test_compiler;
mod test_alloc;
mod test_exception;
mod test_thread;
mod test_floatingpoint;
\ No newline at end of file
extern crate mu;
extern crate log;
extern crate simple_logger;
extern crate libloading;
use self::mu::ast::types::*;
use self::mu::ast::ir::*;
use self::mu::ast::inst::*;
use self::mu::ast::op::*;
use self::mu::vm::*;
use self::mu::compiler::*;
use std::sync::RwLock;
use std::sync::Arc;
use aot;
#[test]
fn test_fp_add() {
simple_logger::init_with_level(log::LogLevel::Trace).ok();
let vm = Arc::new(fp_add());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("fp_add");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
backend::emit_context(&vm);
let dylib = aot::link_dylib(vec![Mu("fp_add")], "libfp_add.dylib");
let lib = libloading::Library::new(dylib.as_os_str()).unwrap();
unsafe {
let fp_add : libloading::Symbol<unsafe extern fn(f64, f64) -> f64> = lib.get(b"fp_add").unwrap();
let fp_add_1_1 = fp_add(1f64, 1f64);
println!("fp_add(1, 1) = {}", fp_add_1_1);
assert!(fp_add_1_1 == 2f64);
}
}
fn fp_add() -> VM {
let vm = VM::new();
// .typedef @double = double
let type_def_double = vm.declare_type(vm.next_id(), MuType_::double());
vm.set_name(type_def_double.as_entity(), Mu("double"));
// .funcsig @fp_add_sig = (@double @double) -> (@double)
let fp_add_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_double.clone()], vec![type_def_double.clone(), type_def_double.clone()]);
vm.set_name(fp_add_sig.as_entity(), Mu("fp_add_sig"));
// .funcdecl @fp_add <@fp_add_sig>
let func_id = vm.next_id();
let func = MuFunction::new(func_id, fp_add_sig.clone());
vm.set_name(func.as_entity(), Mu("fp_add"));
vm.declare_func(func);
// .funcdef @fp_add VERSION @fp_add_v1 <@fp_add_sig>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, fp_add_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("fp_add_v1"));
// %entry(<@double> %a, <@double> %b):
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
let blk_entry_a = func_ver.new_ssa(vm.next_id(), type_def_double.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let blk_entry_b = func_ver.new_ssa(vm.next_id(), type_def_double.clone());
vm.set_name(blk_entry_b.as_entity(), Mu("blk_entry_b"));
// %r = FADD %a %b
let blk_entry_r = func_ver.new_ssa(vm.next_id(), type_def_double.clone());
vm.set_name(blk_entry_r.as_entity(), Mu("blk_entry_r"));
let blk_entry_add = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_r.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone(), blk_entry_b.clone()]),
v: Instruction_::BinOp(BinOp::FAdd, 0, 1)
});
// RET %r
let blk_entry_term = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_r.clone()]),
v: Instruction_::Return(vec![0])
});
blk_entry.content = Some(BlockContent{
args: vec![blk_entry_a.clone_value(), blk_entry_b.clone_value()],
exn_arg: None,
body: vec![blk_entry_add, blk_entry_term],
keepalives: None
});
func_ver.define(FunctionContent{
entry: blk_entry.id(),
blocks: hashmap!{
blk_entry.id() => blk_entry
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment