To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit dba7a91a authored by qinsoon's avatar qinsoon
Browse files

[wip] refactor: remove is_int_reg()/is_fp_reg() from Value, always check

with RegGroup::get_from_value
parent 884a5109
...@@ -703,15 +703,16 @@ impl Value { ...@@ -703,15 +703,16 @@ impl Value {
} }
} }
pub fn is_int_reg(&self) -> bool { pub fn is_reg(&self) -> bool {
match self.v { match self.v {
Value_::SSAVar(_) => { Value_::SSAVar(_) => true,
if is_scalar(&self.ty) && !is_fp(&self.ty) { _ => false
true
} else {
false
} }
} }
pub fn is_const(&self) -> bool {
match self.v {
Value_::Constant(_) => true,
_ => false _ => false
} }
} }
...@@ -724,21 +725,6 @@ impl Value { ...@@ -724,21 +725,6 @@ impl Value {
}) })
} }
pub fn is_fp_reg(&self) -> bool {
match self.v {
Value_::SSAVar(_) => {
if is_scalar(&self.ty) && is_fp(&self.ty) {
true
} else {
false
}
},
Value_::Constant(Constant::Double(_)) => true,
Value_::Constant(Constant::Float(_)) => true,
_ => false
}
}
pub fn is_int_const(&self) -> bool { pub fn is_int_const(&self) -> bool {
match self.v { match self.v {
Value_::Constant(Constant::Int(_)) => true, Value_::Constant(Constant::Int(_)) => true,
......
...@@ -16,6 +16,7 @@ use runtime::entrypoints::RuntimeEntrypoint; ...@@ -16,6 +16,7 @@ use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass; use compiler::CompilerPass;
use compiler::backend; use compiler::backend;
use compiler::backend::RegGroup;
use compiler::backend::PROLOGUE_BLOCK_NAME; use compiler::backend::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64; use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator; use compiler::backend::x86_64::CodeGenerator;
...@@ -601,7 +602,7 @@ impl <'a> InstructionSelection { ...@@ -601,7 +602,7 @@ impl <'a> InstructionSelection {
let mut status_value_index = 1; let mut status_value_index = 1;
// status flags only works with int operations // status flags only works with int operations
if values[0].is_int_reg() { if RegGroup::get_from_value(&values[0]) == RegGroup::GPR {
// negative flag // negative flag
if status.flag_n { if status.flag_n {
let tmp_status = values[status_value_index].clone(); let tmp_status = values[status_value_index].clone();
...@@ -2206,7 +2207,7 @@ impl <'a> InstructionSelection { ...@@ -2206,7 +2207,7 @@ impl <'a> InstructionSelection {
// new block (no livein) // new block (no livein)
self.current_block = Some(slowpath.clone()); self.current_block = Some(slowpath.clone());
self.backend.start_block(slowpath.clone()); self.backend.start_block(slowpath.clone());
if size.is_int_reg() { if RegGroup::get_from_value(&size) == RegGroup::GPR {
self.backend.set_block_livein(slowpath.clone(), &vec![size.clone()]); self.backend.set_block_livein(slowpath.clone(), &vec![size.clone()]);
} }
...@@ -2466,7 +2467,9 @@ impl <'a> InstructionSelection { ...@@ -2466,7 +2467,9 @@ impl <'a> InstructionSelection {
let mut fpr_arg_count = 0; let mut fpr_arg_count = 0;
for arg in args.iter() { for arg in args.iter() {
if arg.is_int_reg() { let arg_reg_group = RegGroup::get_from_value(&arg);
if arg_reg_group == RegGroup::GPR && arg.is_reg() {
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() { if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
let arg_gpr = { let arg_gpr = {
let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count]; let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count];
...@@ -2480,7 +2483,7 @@ impl <'a> InstructionSelection { ...@@ -2480,7 +2483,7 @@ impl <'a> InstructionSelection {
// use stack to pass argument // use stack to pass argument
stack_args.push(arg.clone()); stack_args.push(arg.clone());
} }
} else if arg.is_int_const() { } else if arg_reg_group == RegGroup::GPR && arg.is_const() {
let int_const = arg.extract_int_const(); let int_const = arg.extract_int_const();
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() { if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
...@@ -2501,9 +2504,7 @@ impl <'a> InstructionSelection { ...@@ -2501,9 +2504,7 @@ impl <'a> InstructionSelection {
// use stack to pass argument // use stack to pass argument
stack_args.push(arg.clone()); stack_args.push(arg.clone());
} }
} else if arg.is_mem() { } else if arg_reg_group == RegGroup::FPR && arg.is_reg() {
unimplemented!()
} else if arg.is_fp_reg() {
if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() { if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() {
let arg_fpr = x86_64::ARGUMENT_FPRs[fpr_arg_count].clone(); let arg_fpr = x86_64::ARGUMENT_FPRs[fpr_arg_count].clone();
...@@ -2513,7 +2514,7 @@ impl <'a> InstructionSelection { ...@@ -2513,7 +2514,7 @@ impl <'a> InstructionSelection {
stack_args.push(arg.clone()); stack_args.push(arg.clone());
} }
} else { } else {
// struct, etc // fp const, struct, etc
unimplemented!() unimplemented!()
} }
} }
...@@ -2601,7 +2602,7 @@ impl <'a> InstructionSelection { ...@@ -2601,7 +2602,7 @@ impl <'a> InstructionSelection {
} }
}; };
if ret_val.is_int_reg() { if RegGroup::get_from_value(&ret_val) == RegGroup::GPR && ret_val.is_reg() {
if gpr_ret_count < x86_64::RETURN_GPRs.len() { if gpr_ret_count < x86_64::RETURN_GPRs.len() {
let ret_gpr = { let ret_gpr = {
let ref reg64 = x86_64::RETURN_GPRs[gpr_ret_count]; let ref reg64 = x86_64::RETURN_GPRs[gpr_ret_count];
...@@ -2615,7 +2616,7 @@ impl <'a> InstructionSelection { ...@@ -2615,7 +2616,7 @@ impl <'a> InstructionSelection {
// get return value by stack // get return value by stack
unimplemented!() unimplemented!()
} }
} else if ret_val.is_fp_reg() { } else if RegGroup::get_from_value(&ret_val) == RegGroup::FPR && ret_val.is_reg() {
// floating point register // floating point register
if fpr_ret_count < x86_64::RETURN_FPRs.len() { if fpr_ret_count < x86_64::RETURN_FPRs.len() {
let ref ret_fpr = x86_64::RETURN_FPRs[fpr_ret_count]; let ref ret_fpr = x86_64::RETURN_FPRs[fpr_ret_count];
...@@ -2959,7 +2960,7 @@ impl <'a> InstructionSelection { ...@@ -2959,7 +2960,7 @@ impl <'a> InstructionSelection {
let mut arg_by_stack = vec![]; let mut arg_by_stack = vec![];
for arg in args { for arg in args {
if arg.is_int_reg() { if RegGroup::get_from_value(&arg) == RegGroup::GPR && arg.is_reg() {
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() { if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
let arg_gpr = { let arg_gpr = {
let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count]; let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count];
...@@ -2982,7 +2983,7 @@ impl <'a> InstructionSelection { ...@@ -2982,7 +2983,7 @@ impl <'a> InstructionSelection {
// let arg_size = vm.get_backend_type_info(arg.ty.id()).size; // let arg_size = vm.get_backend_type_info(arg.ty.id()).size;
// stack_arg_offset += arg_size as i32; // stack_arg_offset += arg_size as i32;
} }
} else if arg.is_fp_reg() { } else if RegGroup::get_from_value(&arg) == RegGroup::FPR && arg.is_reg() {
if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() { if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() {
let arg_fpr = x86_64::ARGUMENT_FPRs[fpr_arg_count].clone(); let arg_fpr = x86_64::ARGUMENT_FPRs[fpr_arg_count].clone();
...@@ -3210,7 +3211,7 @@ impl <'a> InstructionSelection { ...@@ -3210,7 +3211,7 @@ impl <'a> InstructionSelection {
let ref value = inst.value.as_ref().unwrap()[0]; let ref value = inst.value.as_ref().unwrap()[0];
if value.is_int_reg() { if RegGroup::get_from_value(&value) == RegGroup::GPR && value.is_reg() {
true true
} else { } else {
false false
...@@ -3221,7 +3222,7 @@ impl <'a> InstructionSelection { ...@@ -3221,7 +3222,7 @@ impl <'a> InstructionSelection {
} }
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
pv.is_int_reg() || pv.is_int_const() RegGroup::get_from_value(&pv) == RegGroup::GPR
} }
} }
} }
...@@ -3236,7 +3237,7 @@ impl <'a> InstructionSelection { ...@@ -3236,7 +3237,7 @@ impl <'a> InstructionSelection {
let ref value = inst.value.as_ref().unwrap()[0]; let ref value = inst.value.as_ref().unwrap()[0];
if value.is_fp_reg() { if RegGroup::get_from_value(&value) == RegGroup::FPR {
true true
} else { } else {
false false
...@@ -3247,7 +3248,7 @@ impl <'a> InstructionSelection { ...@@ -3247,7 +3248,7 @@ impl <'a> InstructionSelection {
} }
TreeNode_::Value(ref pv) => { TreeNode_::Value(ref pv) => {
pv.is_fp_reg() RegGroup::get_from_value(pv) == RegGroup::FPR
} }
} }
} }
...@@ -3875,10 +3876,10 @@ impl <'a> InstructionSelection { ...@@ -3875,10 +3876,10 @@ impl <'a> InstructionSelection {
fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) { fn emit_move_node_to_value(&mut self, dest: &P<Value>, src: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) {
let ref dst_ty = dest.ty; let ref dst_ty = dest.ty;
if !types::is_fp(dst_ty) && types::is_scalar(dst_ty) { if RegGroup::get_from_ty(&dst_ty) == RegGroup::GPR {
if self.match_iimm(src) { if self.match_iimm(src) {
let (src_imm, src_len) = self.node_iimm_to_i32_with_len(src); let (src_imm, src_len) = self.node_iimm_to_i32_with_len(src);
if dest.is_int_reg() { if RegGroup::get_from_value(&dest) == RegGroup::GPR && dest.is_reg() {
self.backend.emit_mov_r_imm(dest, src_imm); self.backend.emit_mov_r_imm(dest, src_imm);
} else if dest.is_mem() { } else if dest.is_mem() {
self.backend.emit_mov_mem_imm(dest, src_imm, src_len); self.backend.emit_mov_mem_imm(dest, src_imm, src_len);
...@@ -3891,7 +3892,7 @@ impl <'a> InstructionSelection { ...@@ -3891,7 +3892,7 @@ impl <'a> InstructionSelection {
} else { } else {
panic!("expected src: {}", src); panic!("expected src: {}", src);
} }
} else if types::is_fp(dst_ty) && types::is_scalar(dst_ty) { } else if RegGroup::get_from_ty(&dst_ty) == RegGroup::FPR {
if self.match_fpreg(src) { if self.match_fpreg(src) {
let src_reg = self.emit_fpreg(src, f_content, f_context, vm); let src_reg = self.emit_fpreg(src, f_content, f_context, vm);
self.emit_move_value_to_value(dest, &src_reg) self.emit_move_value_to_value(dest, &src_reg)
...@@ -3911,43 +3912,45 @@ impl <'a> InstructionSelection { ...@@ -3911,43 +3912,45 @@ impl <'a> InstructionSelection {
debug!("source type: {}", src_ty); debug!("source type: {}", src_ty);
debug!("dest type: {}", dest.ty); debug!("dest type: {}", dest.ty);
if types::is_scalar(src_ty) && !types::is_fp(src_ty) { if RegGroup::get_from_ty(&src_ty) == RegGroup::GPR {
// gpr mov // gpr mov
if dest.is_int_reg() && src.is_int_reg() { if dest.is_reg() && src.is_reg() {
self.backend.emit_mov_r_r(dest, src); self.backend.emit_mov_r_r(dest, src);
} else if dest.is_int_reg() && src.is_mem() { } else if dest.is_reg() && src.is_mem() {
self.backend.emit_mov_r_mem(dest, src); self.backend.emit_mov_r_mem(dest, src);
} else if dest.is_int_reg() && src.is_int_const() { } else if dest.is_reg() && src.is_const() {
let imm = self.value_iimm_to_i32(src); let imm = self.value_iimm_to_i32(src);
self.backend.emit_mov_r_imm(dest, imm); self.backend.emit_mov_r_imm(dest, imm);
} else if dest.is_mem() && src.is_int_reg() { } else if dest.is_mem() && src.is_reg() {
self.backend.emit_mov_mem_r(dest, src); self.backend.emit_mov_mem_r(dest, src);
} else if dest.is_mem() && src.is_int_const() { } else if dest.is_mem() && src.is_const() {
let (imm, len) = self.value_iimm_to_i32_with_len(src); let (imm, len) = self.value_iimm_to_i32_with_len(src);
self.backend.emit_mov_mem_imm(dest, imm, len); self.backend.emit_mov_mem_imm(dest, imm, len);
} else { } else {
panic!("unexpected gpr mov between {} -> {}", src, dest); panic!("unexpected gpr mov between {} -> {}", src, dest);
} }
} else if types::is_scalar(src_ty) && types::is_fp(src_ty) { } else if RegGroup::get_from_ty(&src_ty) == RegGroup::GPREX {
unimplemented!()
} else if RegGroup::get_from_ty(&src_ty) == RegGroup::FPR {
// fpr mov // fpr mov
match src_ty.v { match src_ty.v {
MuType_::Double => { MuType_::Double => {
if dest.is_fp_reg() && src.is_fp_reg() { if dest.is_reg() && src.is_reg() {
self.backend.emit_movsd_f64_f64(dest, src); self.backend.emit_movsd_f64_f64(dest, src);
} else if dest.is_fp_reg() && src.is_mem() { } else if dest.is_reg() && src.is_mem() {
self.backend.emit_movsd_f64_mem64(dest, src); self.backend.emit_movsd_f64_mem64(dest, src);
} else if dest.is_mem() && src.is_fp_reg() { } else if dest.is_mem() && src.is_reg() {
self.backend.emit_movsd_mem64_f64(dest, src); self.backend.emit_movsd_mem64_f64(dest, src);
} else { } else {
panic!("unexpected fpr mov between {} -> {}", src, dest); panic!("unexpected fpr mov between {} -> {}", src, dest);
} }
} }
MuType_::Float => { MuType_::Float => {
if dest.is_fp_reg() && src.is_fp_reg() { if dest.is_reg() && src.is_reg() {
self.backend.emit_movss_f32_f32(dest, src); self.backend.emit_movss_f32_f32(dest, src);
} else if dest.is_fp_reg() && src.is_mem() { } else if dest.is_reg() && src.is_mem() {
self.backend.emit_movss_f32_mem32(dest, src); self.backend.emit_movss_f32_mem32(dest, src);
} else if dest.is_mem() && src.is_fp_reg() { } else if dest.is_mem() && src.is_reg() {
self.backend.emit_movss_mem32_f32(dest, src); self.backend.emit_movss_mem32_f32(dest, src);
} else { } else {
panic!("unexpected fpr mov between {} -> {}", src, dest); panic!("unexpected fpr mov between {} -> {}", src, dest);
......
...@@ -426,6 +426,7 @@ pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) { ...@@ -426,6 +426,7 @@ pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
pub fn number_of_regs_in_group(group: RegGroup) -> usize { pub fn number_of_regs_in_group(group: RegGroup) -> usize {
match group { match group {
RegGroup::GPR => ALL_GPRs.len(), RegGroup::GPR => ALL_GPRs.len(),
RegGroup::GPREX => ALL_GPRs.len(),
RegGroup::FPR => ALL_FPRs.len() RegGroup::FPR => ALL_FPRs.len()
} }
} }
...@@ -444,13 +445,7 @@ pub fn all_usable_regs() -> &'static Vec<P<Value>> { ...@@ -444,13 +445,7 @@ pub fn all_usable_regs() -> &'static Vec<P<Value>> {
pub fn pick_group_for_reg(reg_id: MuID) -> RegGroup { pub fn pick_group_for_reg(reg_id: MuID) -> RegGroup {
let reg = all_regs().get(&reg_id).unwrap(); let reg = all_regs().get(&reg_id).unwrap();
if reg.is_int_reg() { RegGroup::get_from_value(reg)
RegGroup::GPR
} else if reg.is_fp_reg() {
RegGroup::FPR
} else {
panic!("expect a machine reg to be either a GPR or a FPR: {}", reg)
}
} }
pub fn is_callee_saved(reg_id: MuID) -> bool { pub fn is_callee_saved(reg_id: MuID) -> bool {
......
...@@ -315,10 +315,10 @@ impl fmt::Display for BackendTypeInfo { ...@@ -315,10 +315,10 @@ impl fmt::Display for BackendTypeInfo {
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum RegGroup {GPR, FPR} pub enum RegGroup {GPR, GPREX, FPR}
impl RegGroup { impl RegGroup {
pub fn get(ty: &P<MuType>) -> RegGroup { pub fn get_from_ty(ty: &P<MuType>) -> RegGroup {
match ty.v { match ty.v {
// for now, only use 64bits registers // for now, only use 64bits registers
MuType_::Int(len) if len == 1 => RegGroup::GPR, MuType_::Int(len) if len == 1 => RegGroup::GPR,
...@@ -326,6 +326,7 @@ impl RegGroup { ...@@ -326,6 +326,7 @@ impl RegGroup {
MuType_::Int(len) if len == 16 => RegGroup::GPR, MuType_::Int(len) if len == 16 => RegGroup::GPR,
MuType_::Int(len) if len == 32 => RegGroup::GPR, MuType_::Int(len) if len == 32 => RegGroup::GPR,
MuType_::Int(len) if len == 64 => RegGroup::GPR, MuType_::Int(len) if len == 64 => RegGroup::GPR,
MuType_::Int(len) if len == 128=> RegGroup::GPREX,
MuType_::Ref(_) MuType_::Ref(_)
| MuType_::IRef(_) | MuType_::IRef(_)
...@@ -343,4 +344,8 @@ impl RegGroup { ...@@ -343,4 +344,8 @@ impl RegGroup {
_ => unimplemented!() _ => unimplemented!()
} }
} }
pub fn get_from_value(val: &P<Value>) -> RegGroup {
RegGroup::get_from_ty(&val.ty)
}
} }
...@@ -43,7 +43,7 @@ impl InterferenceGraph { ...@@ -43,7 +43,7 @@ impl InterferenceGraph {
let node = GraphNode { let node = GraphNode {
temp: reg_id, temp: reg_id,
color: None, color: None,
group: backend::RegGroup::get(entry.ty()), group: backend::RegGroup::get_from_ty(entry.ty()),
spill_cost: 0.0f32 spill_cost: 0.0f32
}; };
......
...@@ -120,6 +120,7 @@ impl MuStack { ...@@ -120,6 +120,7 @@ impl MuStack {
match reg_group { match reg_group {
RegGroup::GPR => gpr_used.push(word), RegGroup::GPR => gpr_used.push(word),
RegGroup::GPREX => unimplemented!(),
RegGroup::FPR => fpr_used.push(word), RegGroup::FPR => fpr_used.push(word),
} }
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment