Commit d1741cad authored by John Zhang's avatar John Zhang

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents cea68f04 cf8d1126
Pipeline #51 failed with stage
in 9 minutes and 9 seconds
......@@ -1790,6 +1790,81 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_shl_r64_cl (&mut self, dest: &P<Value>) {
trace!("emit shl {}, CL -> {}", dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 3 + 1);
let rcx = self.prepare_machine_reg(&x86_64::RCX);
let asm = format!("shlq %cl,{}", reg1);
self.add_asm_inst(
asm,
hashmap!{
id1 => vec![loc1.clone()]
},
hashmap!{
id1 => vec![loc1],
rcx => vec![]
},
false
)
}
fn emit_shl_mem64_cl (&mut self, dest: &P<Value>) {
trace!("emit shl {}, CL -> {}", dest, dest);
let (mem, mut uses) = self.prepare_mem(dest, 4 + 1 + 3 + 1);
let rcx = self.prepare_machine_reg(&x86_64::RCX);
if !uses.contains_key(&rcx) {
uses.insert(rcx, vec![]);
}
let asm = format!("shlq %cl,{}", mem);
self.add_asm_inst(
asm,
hashmap!{},
uses,
true
)
}
fn emit_shl_r64_imm8 (&mut self, dest: &P<Value>, src: i8) {
trace!("emit shl {},{} -> {}", dest, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("shlq ${},{}", src, reg1);
self.add_asm_inst(
asm,
hashmap!{
id1 => vec![loc1.clone()]
},
hashmap!{
id1 => vec![loc1]
},
false
)
}
fn emit_shl_mem64_imm8(&mut self, dest: &P<Value>, src: i8) {
trace!("emit shl {},{} -> {}", dest, src, dest);
let (mem, mut uses) = self.prepare_mem(dest, 4 + 1 + 1 + src.to_string().len() + 1);
let asm = format!("shlq ${},{}", src, mem);
self.add_asm_inst(
asm,
hashmap!{},
uses,
true
)
}
fn emit_cqo(&mut self) {
trace!("emit: cqo rax -> rdx:rax");
......@@ -1944,7 +2019,7 @@ impl CodeGenerator for ASMCodeGen {
let rsp = self.prepare_machine_reg(&x86_64::RSP);
let asm = format!("pushq {}", src);
let asm = format!("pushq ${}", src);
self.add_asm_inst(
asm,
......
......@@ -64,6 +64,11 @@ pub trait CodeGenerator {
fn emit_idiv_r64 (&mut self, src: &P<Value>);
fn emit_idiv_mem64(&mut self, src: &P<Value>);
fn emit_shl_r64_cl (&mut self, dest: &P<Value>);
fn emit_shl_mem64_cl (&mut self, dest: &P<Value>);
fn emit_shl_r64_imm8 (&mut self, dest: &P<Value>, src: i8);
fn emit_shl_mem64_imm8(&mut self, dest: &P<Value>, src: i8);
fn emit_cqo(&mut self);
fn emit_jmp(&mut self, dest: MuName);
......
......@@ -195,7 +195,7 @@ impl <'a> InstructionSelection {
trace!("emit add-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let reg_op2 = self.emit_mem(&ops[op2]);
let reg_op2 = self.emit_mem(&ops[op2], vm);
let res_tmp = self.get_result_value(node);
// mov op1, res
......@@ -239,7 +239,7 @@ impl <'a> InstructionSelection {
trace!("emit sub-ireg-mem");
let reg_op1 = self.emit_ireg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2]);
let mem_op2 = self.emit_mem(&ops[op2], vm);
let res_tmp = self.get_result_value(node);
// mov op1, res
......@@ -266,7 +266,7 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r64_imm32(&rax, imm_op1);
} else if self.match_mem(op1) {
let mem_op1 = self.emit_mem(op1);
let mem_op1 = self.emit_mem(op1, vm);
self.backend.emit_mov_r64_mem64(&rax, &mem_op1);
} else {
......@@ -289,7 +289,7 @@ impl <'a> InstructionSelection {
self.backend.emit_mul_r64(&res_tmp);
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
let mem_op2 = self.emit_mem(op2, vm);
self.backend.emit_mul_mem64(&mem_op2);
} else {
......@@ -341,6 +341,42 @@ impl <'a> InstructionSelection {
self.backend.emit_mov_r64_r64(&res_tmp, &x86_64::RDX);
},
op::BinOp::Shl => {
let op1 = &ops[op1];
let op2 = &ops[op2];
if self.match_ireg(op1) {
let tmp_op1 = self.emit_ireg(op1, f_content, f_context, vm);
if self.match_ireg(op2) {
let tmp_op2 = self.emit_ireg(op2, f_content, f_context, vm);
// mov op2 -> rcx
self.backend.emit_mov_r64_r64(&x86_64::RCX, &tmp_op2);
// shl op1, cl -> op1
self.backend.emit_shld_r64_cl(&tmp_op1);
// mov op1 -> result
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &tmp_op1);
} else if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2) as i8;
// shl op1, op2 -> op1
self.backend.emit_shld_r64_imm8(&tmp_op1, imm_op2);
// mov op1 -> result
let res_tmp = self.get_result_value(node);
self.backend.emit_mov_r64_r64(&res_tmp, &tmp_op1);
} else {
panic!("unexpected op2 (not ireg not iimm): {}", op2);
}
} else if self.match_mem(op1) {
unimplemented!()
}
}
// floating point
op::BinOp::FAdd => {
if self.match_fpreg(&ops[op1]) && self.match_fpreg(&ops[op2]) {
......@@ -358,7 +394,7 @@ impl <'a> InstructionSelection {
trace!("emit add-fpreg-mem");
let reg_op1 = self.emit_fpreg(&ops[op1], f_content, f_context, vm);
let mem_op2 = self.emit_mem(&ops[op2]);
let mem_op2 = self.emit_mem(&ops[op2], vm);
let res_tmp = self.get_result_value(node);
// mov op1, res
......@@ -392,7 +428,7 @@ impl <'a> InstructionSelection {
_ => panic!("didnt expect order {:?} with store inst", order)
}
let resolved_loc = self.node_mem_to_value(loc_op, vm);
let resolved_loc = self.node_addr_to_value(loc_op, vm);
let res_temp = self.get_result_value(node);
if self.match_ireg(node) {
......@@ -417,7 +453,7 @@ impl <'a> InstructionSelection {
}
};
let resolved_loc = self.node_mem_to_value(loc_op, vm);
let resolved_loc = self.node_addr_to_value(loc_op, vm);
if self.match_ireg(val_op) {
let val = self.emit_ireg(val_op, f_content, f_context, vm);
......@@ -648,7 +684,7 @@ impl <'a> InstructionSelection {
self.backend.emit_div_r64(&reg_op2);
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
let mem_op2 = self.emit_mem(op2, vm);
self.backend.emit_div_mem64(&mem_op2);
} else if self.match_iimm(op2) {
......@@ -684,7 +720,7 @@ impl <'a> InstructionSelection {
self.backend.emit_idiv_r64(&reg_op2);
} else if self.match_mem(op2) {
let mem_op2 = self.emit_mem(op2);
let mem_op2 = self.emit_mem(op2, vm);
self.backend.emit_idiv_mem64(&mem_op2);
} else if self.match_iimm(op2) {
......@@ -986,7 +1022,7 @@ impl <'a> InstructionSelection {
let callsite = self.new_callsite_label(Some(cur_node));
self.backend.emit_call_near_r64(callsite, &target)
} else if self.match_mem(func) {
let target = self.emit_mem(func);
let target = self.emit_mem(func, vm);
let callsite = self.new_callsite_label(Some(cur_node));
self.backend.emit_call_near_mem64(callsite, &target)
......@@ -1335,7 +1371,7 @@ impl <'a> InstructionSelection {
}
}
fn node_mem_to_value(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
fn node_addr_to_value(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
match op.v {
TreeNode_::Value(ref pv) => {
match pv.v {
......@@ -1423,7 +1459,7 @@ impl <'a> InstructionSelection {
}
#[allow(unused_variables)]
fn emit_mem(&mut self, op: &P<TreeNode>) -> P<Value> {
fn emit_mem(&mut self, op: &P<TreeNode>, vm: &VM) -> P<Value> {
unimplemented!()
}
......
......@@ -173,5 +173,92 @@ fn sdiv() -> VM {
vm.define_func_version(func_ver);
vm
}
#[test]
fn test_shl() {
let lib = testutil::compile_fnc("shl", &shl);
unsafe {
let shl : libloading::Symbol<unsafe extern fn(u64, u8) -> u64> = lib.get(b"shl").unwrap();
let shl_1_2 = shl(1, 2);
println!("shl(1, 2) = {}", shl_1_2);
assert!(shl_1_2 == 4);
let shl_2_2 = shl(2, 2);
println!("shl(2, 2) = {}", shl_2_2);
assert!(shl_2_2 == 8);
}
}
fn shl() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let type_def_int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(type_def_int64.as_entity(), Mu("int64"));
// .typedef @int8 = int<8>
let type_def_int8 = vm.declare_type(vm.next_id(), MuType_::int(8));
vm.set_name(type_def_int8.as_entity(), Mu("int8"));
// .funcsig @shl_sig = (@int64 @int8) -> (@int64)
let shl_sig = vm.declare_func_sig(vm.next_id(), vec![type_def_int64.clone()], vec![type_def_int64.clone(), type_def_int8.clone()]);
vm.set_name(shl_sig.as_entity(), Mu("shl_sig"));
// .funcdecl @shl <@shl_sig>
let func_id = vm.next_id();
let func = MuFunction::new(func_id, shl_sig.clone());
vm.set_name(func.as_entity(), Mu("shl"));
vm.declare_func(func);
// .funcdef @shl VERSION @shl_v1 <@shl_sig>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, shl_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("shl_v1"));
// %entry(<@int64> %a, <@int8> %b):
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
let blk_entry_a = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let blk_entry_b = func_ver.new_ssa(vm.next_id(), type_def_int8.clone());
vm.set_name(blk_entry_b.as_entity(), Mu("blk_entry_b"));
// %r = SHL %a %b
let blk_entry_r = func_ver.new_ssa(vm.next_id(), type_def_int64.clone());
vm.set_name(blk_entry_r.as_entity(), Mu("blk_entry_r"));
let blk_entry_add = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_r.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone(), blk_entry_b.clone()]),
v: Instruction_::BinOp(BinOp::Shl, 0, 1)
});
// RET %r
let blk_entry_term = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_r.clone()]),
v: Instruction_::Return(vec![0])
});
blk_entry.content = Some(BlockContent{
args: vec![blk_entry_a.clone_value(), blk_entry_b.clone_value()],
exn_arg: None,
body: vec![blk_entry_add, blk_entry_term],
keepalives: None
});
func_ver.define(FunctionContent{
entry: blk_entry.id(),
blocks: hashmap!{
blk_entry.id() => blk_entry
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment