GitLab will be upgraded on June 2nd 2020 at 2.00 pm (AEDT) to 3.00 pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available. If you have any concerns with this, please talk to local Gitlab admin team.

Commit 8b156b16 authored by qinsoon's avatar qinsoon

udiv (divide by zero not considered)

parent e606ea2e
......@@ -1497,6 +1497,51 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_xor_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: xor {}, {} -> {}", src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(src, 4 + 1);
let (reg2, id2, loc2) = self.prepare_reg(dest, 4 + 1 + reg1.len() + 1);
let asm = format!("xorq {},{}", reg1, reg2);
self.add_asm_inst(
asm,
hashmap!{
id2 => vec![loc2.clone()]
},
hashmap!{
id1 => vec![loc1.clone()],
id2 => vec![loc2.clone()]
},
false
)
}
fn emit_xor_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: xor {}, {} -> {}", src, dest, dest);
unimplemented!()
}
fn emit_xor_r64_imm32(&mut self, dest: &P<Value>, src: i32) {
trace!("emit: xor {}, {} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("xorq ${},{}", src, reg1);
self.add_asm_inst(
asm,
hashmap!{
id1 => vec![loc1.clone()]
},
hashmap!{
id1 => vec![loc1]
},
false
)
}
fn emit_add_r64_r64(&mut self, dest: &P<Value>, src: &P<Value>) {
trace!("emit: add {}, {} -> {}", dest, src, dest);
......@@ -1640,6 +1685,58 @@ impl CodeGenerator for ASMCodeGen {
trace!("emit: mul rax, {} -> rax", src);
unimplemented!()
}
fn emit_div_r64 (&mut self, src: &P<Value>) {
trace!("emit: div rdx:rax, {} -> quotient: rax + remainder: rdx", src);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let (reg, id, loc) = self.prepare_reg(src, 4 + 1);
let asm = format!("divq {}", reg);
self.add_asm_inst(
asm,
hashmap!{
rdx => vec![],
rax => vec![],
},
hashmap!{
id => vec![loc],
rdx => vec![],
rax => vec![]
},
false
)
}
fn emit_div_mem64(&mut self, src: &P<Value>) {
trace!("emit: div rdx:rax, {} -> quotient: rax + remainder: rdx", src);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let (mem, mut uses) = self.prepare_mem(src, 4 + 1);
// merge use vec
if !uses.contains_key(&rdx) {
uses.insert(rdx, vec![]);
}
if !uses.contains_key(&rax) {
uses.insert(rax, vec![]);
}
let asm = format!("divq {}", mem);
self.add_asm_inst(
asm,
hashmap!{
rdx => vec![],
rax => vec![]
},
uses,
true
)
}
fn emit_jmp(&mut self, dest_name: MuName) {
trace!("emit: jmp {}", dest_name);
......
......@@ -40,6 +40,10 @@ pub trait CodeGenerator {
fn emit_and_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_and_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_xor_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_xor_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_xor_r64_imm32(&mut self, dest: &P<Value>, src: i32);
fn emit_add_r64_r64 (&mut self, dest: &P<Value>, src: &P<Value>);
fn emit_add_r64_mem64(&mut self, dest: &P<Value>, src: &P<Value>);
......@@ -54,6 +58,9 @@ pub trait CodeGenerator {
fn emit_mul_r64 (&mut self, src: &P<Value>);
fn emit_mul_mem64(&mut self, src: &P<Value>);
fn emit_div_r64 (&mut self, src: &P<Value>);
fn emit_div_mem64(&mut self, src: &P<Value>);
fn emit_jmp(&mut self, dest: MuName);
fn emit_je(&mut self, dest: MuName);
......
......@@ -300,6 +300,38 @@ impl <'a> InstructionSelection {
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &rax);
},
op::BinOp::Udiv => {
let op1 = &ops[op1];
let op2 = &ops[op2];
// mov op1 -> rax
let rax = x86_64::RAX.clone();
self.emit_move_value_to_value(&rax, &op1.clone_value(), f_content, f_context, vm);
// xorq rdx, rdx -> rdx
let rdx = x86_64::RDX.clone();
self.backend.emit_xor_r64_r64(&rdx, &rdx);
// div op2
if self.match_ireg(op2) {
let reg_op2 = self.emit_ireg(op2, f_content, f_context, vm);
self.backend.emit_div_r64(&op2.clone_value());
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
self.backend.emit_div_mem64(&mem_op2);
} else if self.match_iimm(op2) {
// moving to a temp
unimplemented!()
} else {
unimplemented!();
}
// mov rax -> result
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &rax);
}
// floating point
op::BinOp::FAdd => {
......@@ -407,7 +439,7 @@ impl <'a> InstructionSelection {
let hdr_size = mm::objectmodel::OBJECT_HEADER_SIZE;
if hdr_size == 0 {
self.emit_general_move(&op, &res_tmp, f_content, f_context, vm);
self.emit_move_node_to_value(&res_tmp, &op, f_content, f_context, vm);
} else {
self.emit_lea_base_offset(&res_tmp, &op.clone_value(), hdr_size as i32, vm);
}
......@@ -929,7 +961,7 @@ impl <'a> InstructionSelection {
let ref target_args = f_content.get_block(dest.target).content.as_ref().unwrap().args;
let ref target_arg = target_args[i];
self.emit_general_move(&arg, target_arg, f_content, f_context, vm);
self.emit_move_node_to_value(target_arg, &arg, f_content, f_context, vm);
},
&DestArg::Freshbound(_) => unimplemented!()
}
......@@ -1197,14 +1229,7 @@ impl <'a> InstructionSelection {
fn node_iimm_to_i32(&mut self, op: &P<TreeNode>) -> i32 {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
Value_::Constant(Constant::Int(val)) => {
val as i32
},
_ => panic!("expected iimm")
}
},
TreeNode_::Value(ref pv) => self.value_iimm_to_i32(pv),
_ => panic!("expected iimm")
}
}
......@@ -1217,6 +1242,17 @@ impl <'a> InstructionSelection {
_ => panic!("expected iimm")
}
}
fn value_iimm_to_i32(&mut self, op: &P<Value>) -> i32 {
match op.v {
Value_::Constant(Constant::Int(val)) => {
debug_assert!(x86_64::is_valid_x86_imm(op));
val as i32
},
_ => panic!("expected iimm")
}
}
fn node_mem_to_value(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
match op.v {
......@@ -1332,7 +1368,7 @@ impl <'a> InstructionSelection {
}
}
fn emit_general_move(&mut self, src: &P<TreeNode>, dest: &P<Value>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &P<TreeNode>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) {
......@@ -1351,6 +1387,43 @@ impl <'a> InstructionSelection {
panic!("unexpected type for move");
}
}
fn emit_move_value_to_value(&mut self, dest: &P<Value>, src: &P<Value>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dest_ty = dest.ty;
let ref src_ty = src.ty;
if types::is_scalar(src_ty) && !types::is_fp(src_ty) {
// gpr mov
if dest.is_int_reg() && src.is_int_reg() {
self.backend.emit_mov_r64_r64(dest, src);
} else if dest.is_int_reg() && src.is_mem() {
self.backend.emit_mov_r64_mem64(dest, src);
} else if dest.is_int_reg() && src.is_int_const() {
let imm = self.value_iimm_to_i32(src);
self.backend.emit_mov_r64_imm32(dest, imm);
} else if dest.is_mem() && src.is_int_reg() {
self.backend.emit_mov_mem64_r64(dest, src);
} else if dest.is_mem() && src.is_int_const() {
let imm = self.value_iimm_to_i32(src);
self.backend.emit_mov_mem64_imm32(dest, imm);
} else {
panic!("unexpected gpr mov between {} -> {}", src, dest);
}
} else if types::is_scalar(src_ty) && types::is_fp(src_ty) {
// fpr mov
if dest.is_fp_reg() && src.is_fp_reg() {
self.backend.emit_movsd_f64_f64(dest, src);
} else if dest.is_fp_reg() && src.is_mem() {
self.backend.emit_movsd_f64_mem64(dest, src);
} else if dest.is_mem() && src.is_fp_reg() {
self.backend.emit_movsd_mem64_f64(dest, src);
} else {
panic!("unexpected fpr mov between {} -> {}", src, dest);
}
} else {
panic!("unexpected mov of type {}", src_ty)
}
}
fn emit_landingpad(&mut self, exception_arg: &P<Value>, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
// get thread local and add offset to get exception_obj
......
......@@ -7,4 +7,5 @@ mod test_alloc;
mod test_exception;
mod test_thread;
mod test_floatingpoint;
mod test_int;
\ No newline at end of file
mod test_int;
mod test_binop;
\ No newline at end of file
extern crate mu;
extern crate log;
extern crate simple_logger;
extern crate libloading;
use self::mu::ast::types::*;
use self::mu::ast::ir::*;
use self::mu::ast::inst::*;
use self::mu::ast::op::*;
use self::mu::vm::*;
use self::mu::testutil;
use std::sync::RwLock;
use std::sync::Arc;
use mu::testutil::aot;
#[test]
fn test_udiv() {
let lib = testutil::compile_fnc("udiv", &udiv);
unsafe {
let udiv : libloading::Symbol<unsafe extern fn(u64, u64) -> u64> = lib.get(b"udiv").unwrap();
let udiv_8_2 = udiv(8, 2);
println!("udiv(8, 2) = {}", udiv_8_2);
assert!(udiv_8_2 == 4);
}
}
fn udiv() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(type_def_int64.as_entity(), Mu("int64"));
// .funcsig @udiv_sig = (@int64 @int64) -> (@int64)
let udiv_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_int64.clone()], vec![type_def_int64.clone(), type_def_int64.clone()]);
vm.set_name(udiv_sig.as_entity(), Mu("udiv_sig"));
// .funcdecl @udiv <@udiv_sig>
let func_id = vm.next_id();
let func = MuFunction::new(func_id, udiv_sig.clone());
vm.set_name(func.as_entity(), Mu("udiv"));
vm.declare_func(func);
// .funcdef @udiv VERSION @udiv_v1 <@udiv_sig>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, udiv_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("udiv_v1"));
// %entry(<@int64> %a, <@int64> %b):
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
let blk_entry_a = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let blk_entry_b = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_b.as_entity(), Mu("blk_entry_b"));
// %r = UDIV %a %b
let blk_entry_r = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_r.as_entity(), Mu("blk_entry_r"));
let blk_entry_add = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_r.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone(), blk_entry_b.clone()]),
v: Instruction_::BinOp(BinOp::Udiv, 0, 1)
});
// RET %r
let blk_entry_term = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_r.clone()]),
v: Instruction_::Return(vec![0])
});
blk_entry.content = Some(BlockContent{
args: vec![blk_entry_a.clone_value(), blk_entry_b.clone_value()],
exn_arg: None,
body: vec![blk_entry_add, blk_entry_term],
keepalives: None
});
func_ver.define(FunctionContent{
entry: blk_entry.id(),
blocks: hashmap!{
blk_entry.id() => blk_entry
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment