GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit 4b0515a0 authored by qinsoon's avatar qinsoon

sdiv

parent a592b1d9
......@@ -1737,6 +1737,78 @@ impl CodeGenerator for ASMCodeGen {
true
)
}
fn emit_idiv_r64 (&mut self, src: &P<Value>) {
trace!("emit: idiv rdx:rax, {} -> quotient: rax + remainder: rdx", src);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let (reg, id, loc) = self.prepare_reg(src, 4 + 1);
let asm = format!("idivq {}", reg);
self.add_asm_inst(
asm,
hashmap!{
rdx => vec![],
rax => vec![],
},
hashmap!{
id => vec![loc],
rdx => vec![],
rax => vec![]
},
false
)
}
fn emit_idiv_mem64(&mut self, src: &P<Value>) {
trace!("emit: idiv rdx:rax, {} -> quotient: rax + remainder: rdx", src);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let (mem, mut uses) = self.prepare_mem(src, 4 + 1);
// merge use vec
if !uses.contains_key(&rdx) {
uses.insert(rdx, vec![]);
}
if !uses.contains_key(&rax) {
uses.insert(rax, vec![]);
}
let asm = format!("idivq {}", mem);
self.add_asm_inst(
asm,
hashmap!{
rdx => vec![],
rax => vec![]
},
uses,
true
)
}
fn emit_cqo(&mut self) {
trace!("emit: cqo rax -> rdx:rax");
let rax = self.prepare_machine_reg(&x86_64::RAX);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let asm = format!("cqto");
self.add_asm_inst(
asm,
hashmap!{
rdx => vec![]
},
hashmap!{
rax => vec![],
},
false
)
}
fn emit_jmp(&mut self, dest_name: MuName) {
trace!("emit: jmp {}", dest_name);
......
......@@ -59,8 +59,12 @@ pub trait CodeGenerator {
fn emit_mul_r64 (&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
fn emit_div_r64 (&mut self, src: &P<Value>);
fn emit_div_mem64(&mut self, src: &P<Value>);
fn emit_div_r64 (&mut self, src: &P<Value>);
fn emit_div_mem64 (&mut self, src: &P<Value>);
fn emit_idiv_r64 (&mut self, src: &P<Value>);
fn emit_idiv_mem64(&mut self, src: &P<Value>);
fn emit_cqo(&mut self);
fn emit_jmp(&mut self, dest: MuName);
fn emit_je(&mut self, dest: MuName);
......
......@@ -304,34 +304,22 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
// mov op1 -> rax
let rax = x86_64::RAX.clone();
self.emit_move_value_to_value(&rax, &op1.clone_value(), f_content, f_context, vm);
// xorq rdx, rdx -> rdx
let rdx = x86_64::RDX.clone();
self.backend.emit_xor_r64_r64(&rdx, &rdx);
// div op2
if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.emit_udiv(op1, op2, f_content, f_context, vm);
self.backend.emit_div_r64(&op2.clone_value());
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
// mov rax -> result
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &x86_64::RAX);
},
op::BinOp::Sdiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
self.backend.emit_div_mem64(&mem_op2);
} else if self.match_iimm(op2) {
// moving to a temp
unimplemented!()
} else {
unimplemented!();
}
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &rax);
}
self.backend.emit_mov_r64_r64(&res_tmp, &x86_64::RAX);
},
// floating point
op::BinOp::FAdd => {
......@@ -618,6 +606,79 @@ impl <'a> InstructionSelection {
self.backend.emit_lea_r64(dest, &mem);
}
fn emit_udiv (
&mut self,
op1: &P<TreeNode>, op2: &P<TreeNode>,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM)
{
// mov op1 -> rax
let rax = x86_64::RAX.clone();
self.emit_move_value_to_value(&rax, &op1.clone_value(), f_content, f_context, vm);
// xorq rdx, rdx -> rdx
let rdx = x86_64::RDX.clone();
self.backend.emit_xor_r64_r64(&rdx, &rdx);
// div op2
if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_div_r64(&op2.clone_value());
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
self.backend.emit_div_mem64(&mem_op2);
} else if self.match_iimm(op2) {
let imm = self.node_iimm_to_i32(op2);
// moving to a temp
let temp = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r64_imm32(&temp, imm);
// div tmp
self.backend.emit_div_r64(&temp);
} else {
unimplemented!();
}
}
fn emit_idiv (
&mut self,
op1: &P<TreeNode>, op2: &P<TreeNode>,
f_content: &FunctionContent,
f_context: &mut FunctionContext,
vm: &VM)
{
// mov op1 -> rax
let rax = x86_64::RAX.clone();
self.emit_move_value_to_value(&rax, &op1.clone_value(), f_content, f_context, vm);
// cqo
self.backend.emit_cqo();
// idiv op2
if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_idiv_r64(&op2.clone_value());
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
self.backend.emit_idiv_mem64(&mem_op2);
} else if self.match_iimm(op2) {
let imm = self.node_iimm_to_i32(op2);
// moving to a temp
let temp = self.make_temporary(f_context, UINT64_TYPE.clone(), vm);
self.backend.emit_mov_r64_imm32(&temp, imm);
// idiv temp
self.backend.emit_idiv_r64(&temp);
} else {
unimplemented!();
}
}
fn emit_get_threadlocal (
&mut self,
......
......@@ -11,8 +11,6 @@ use self::mu::vm::*;
use self::mu::testutil;
use std::sync::RwLock;
use std::sync::Arc;
use mu::testutil::aot;
#[test]
fn test_udiv() {
......@@ -91,5 +89,89 @@ fn udiv() -> VM {
vm.define_func_version(func_ver);
vm
}
#[test]
fn test_sdiv() {
let lib = testutil::compile_fnc("sdiv", &sdiv);
unsafe {
let sdiv : libloading::Symbol<unsafe extern fn(i64, i64) -> i64> = lib.get(b"sdiv").unwrap();
let sdiv_8_2 = sdiv(8, 2);
println!("sdiv(8, 2) = {}", sdiv_8_2);
assert!(sdiv_8_2 == 4);
let sdiv_8_m2 = sdiv(8, -2i64);
println!("sdiv(8, -2) = {}", sdiv_8_m2);
assert!(sdiv_8_m2 == -4i64);
}
}
fn sdiv() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(type_def_int64.as_entity(), Mu("int64"));
// .funcsig @sdiv_sig = (@int64 @int64) -> (@int64)
let sdiv_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_int64.clone()], vec![type_def_int64.clone(), type_def_int64.clone()]);
vm.set_name(sdiv_sig.as_entity(), Mu("sdiv_sig"));
// .funcdecl @sdiv <@sdiv_sig>
let func_id = vm.next_id();
let func = MuFunction::new(func_id, sdiv_sig.clone());
vm.set_name(func.as_entity(), Mu("sdiv"));
vm.declare_func(func);
// .funcdef @sdiv VERSION @sdiv_v1 <@sdiv_sig>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, sdiv_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("sdiv_v1"));
// %entry(<@int64> %a, <@int64> %b):
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
let blk_entry_a = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let blk_entry_b = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_b.as_entity(), Mu("blk_entry_b"));
// %r = SDIV %a %b
let blk_entry_r = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_r.as_entity(), Mu("blk_entry_r"));
let blk_entry_add = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_r.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone(), blk_entry_b.clone()]),
v: Instruction_::BinOp(BinOp::Sdiv, 0, 1)
});
// RET %r
let blk_entry_term = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_r.clone()]),
v: Instruction_::Return(vec![0])
});
blk_entry.content = Some(BlockContent{
args: vec![blk_entry_a.clone_value(), blk_entry_b.clone_value()],
exn_arg: None,
body: vec![blk_entry_add, blk_entry_term],
keepalives: None
});
func_ver.define(FunctionContent{
entry: blk_entry.id(),
blocks: hashmap!{
blk_entry.id() => blk_entry
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment