GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 615a1344 authored by qinsoon's avatar qinsoon

[wip] clean up x64 backend

parent b7d7851a
Pipeline #536 passed with stages
in 55 minutes and 3 seconds
......@@ -1022,27 +1022,27 @@ impl ASMCodeGen {
let (id, loc) = target.unwrap();
uses.insert(id, vec![loc]);
}
// for reg in ARGUMENT_GPRs.iter() {
// for reg in ARGUMENT_GPRS.iter() {
// uses.insert(reg.id(), vec![]);
// }
// for reg in ARGUMENT_FPRs.iter() {
// for reg in ARGUMENT_FPRS.iter() {
// uses.insert(reg.id(), vec![]);
// }
// defines: return registers
let mut defines: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
for reg in RETURN_GPRs.iter() {
for reg in RETURN_GPRS.iter() {
defines.insert(reg.id(), vec![]);
}
for reg in RETURN_FPRs.iter() {
for reg in RETURN_FPRS.iter() {
defines.insert(reg.id(), vec![]);
}
for reg in CALLER_SAVED_GPRs.iter() {
for reg in CALLER_SAVED_GPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
}
for reg in CALLER_SAVED_FPRs.iter() {
for reg in CALLER_SAVED_FPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
......
......@@ -2045,23 +2045,23 @@ impl <'a> InstructionSelection {
match t.v {
Vector(_, _) | Tagref64 => unimplemented!(),
Float | Double =>
vec![get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(&t, vm))],
vec![get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(&t, vm))],
Hybrid(_) => panic!("cant return a hybrid"),
Struct(_) | Array(_, _) => {
let hfa_n = hfa_length(t.clone());
if hfa_n > 0 {
let mut res = vec![get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(&t, vm)/hfa_n)];
let mut res = vec![get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(&t, vm)/hfa_n)];
for i in 1..hfa_n {
res.push(get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(&t, vm)/hfa_n));
res.push(get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(&t, vm)/hfa_n));
}
res
} else if size <= 8 {
// Return in a single GRP
vec![get_alias_for_length(RETURN_GPRs[0].id(), get_bit_size(&t, vm))]
vec![get_alias_for_length(RETURN_GPRS[0].id(), get_bit_size(&t, vm))]
} else if size <= 16 {
// Return in 2 GPRs
vec![RETURN_GPRs[0].clone(), RETURN_GPRs[0].clone()]
vec![RETURN_GPRS[0].clone(), RETURN_GPRS[0].clone()]
} else {
// Returned on the stack
......@@ -2073,7 +2073,7 @@ impl <'a> InstructionSelection {
// Integral or pointer type
Int(_) | Ref(_) | IRef(_) | WeakRef(_) | UPtr(_) | ThreadRef | StackRef | FuncRef(_) | UFuncPtr(_) =>
// can return in GPR
vec![get_alias_for_length(RETURN_GPRs[0].id(), get_bit_size(&t, vm))]
vec![get_alias_for_length(RETURN_GPRS[0].id(), get_bit_size(&t, vm))]
}
}
......@@ -2083,19 +2083,19 @@ impl <'a> InstructionSelection {
let size = round_up(vm.get_type_size(t.id()), 8);
match t.v {
Vector(_, _) | Tagref64 => unimplemented!(),
Float | Double => get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(t, vm)),
Float | Double => get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(t, vm)),
Hybrid(_) => panic!("cant return a hybrid"),
Struct(_) | Array(_, _) => {
let hfa_n = hfa_length(t.clone());
if hfa_n > 0 {
// Return in a sequence of FPRs
get_alias_for_length(RETURN_FPRs[0].id(), get_bit_size(t, vm)/hfa_n)
get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(t, vm)/hfa_n)
} else if size <= 8 {
// Return in a singe GRPs
get_alias_for_length(RETURN_GPRs[0].id(), get_bit_size(t, vm))
get_alias_for_length(RETURN_GPRS[0].id(), get_bit_size(t, vm))
} else if size <= 16 {
// Return in 2 GPRS
RETURN_GPRs[0].clone()
RETURN_GPRS[0].clone()
} else {
// Return at the location pointed to by loc
self.make_value_base_offset(&loc, 0, &t, vm)
......@@ -2106,7 +2106,7 @@ impl <'a> InstructionSelection {
// Integral or pointer type
Int(_) | Ref(_) | IRef(_) | WeakRef(_) | UPtr(_) | ThreadRef | StackRef | FuncRef(_) | UFuncPtr(_) =>
// can return in GPR
get_alias_for_length(RETURN_GPRs[0].id(), get_bit_size(t, vm))
get_alias_for_length(RETURN_GPRS[0].id(), get_bit_size(t, vm))
}
}
// TODO: Thoroughly test this (compare with code generated by GCC with variouse different types???)
......@@ -2151,7 +2151,7 @@ impl <'a> InstructionSelection {
Vector(_, _) | Tagref64 => unimplemented!(),
Float | Double => {
if nsrn < 8 {
locations.push(get_alias_for_length(ARGUMENT_FPRs[nsrn].id(), get_bit_size(&t, vm)));
locations.push(get_alias_for_length(ARGUMENT_FPRS[nsrn].id(), get_bit_size(&t, vm)));
nsrn += 1;
} else {
nsrn = 8;
......@@ -2164,7 +2164,7 @@ impl <'a> InstructionSelection {
if hfa_n > 0 {
if nsrn + hfa_n <= 8 {
// Note: the argument will occupy succesiv registers (one for each element)
locations.push(get_alias_for_length(ARGUMENT_FPRs[nsrn].id(), get_bit_size(&t, vm)/hfa_n));
locations.push(get_alias_for_length(ARGUMENT_FPRS[nsrn].id(), get_bit_size(&t, vm)/hfa_n));
nsrn += hfa_n;
} else {
nsrn = 8;
......@@ -2180,7 +2180,7 @@ impl <'a> InstructionSelection {
// The struct should be packed, starting here
// (note: this may result in multiple struct fields in the same regsiter
// or even floating points in a GPR)
locations.push(ARGUMENT_GPRs[ngrn].clone());
locations.push(ARGUMENT_GPRS[ngrn].clone());
// How many GPRS are taken up by t
ngrn += if size % 8 != 0 { size/8 + 1 } else { size/8 };
} else {
......@@ -2199,7 +2199,7 @@ impl <'a> InstructionSelection {
ThreadRef | StackRef | FuncRef(_) | UFuncPtr(_) => {
if size <= 8 {
if ngrn < 8 {
locations.push(get_alias_for_length(ARGUMENT_GPRs[ngrn].id(), get_bit_size(&t, vm)));
locations.push(get_alias_for_length(ARGUMENT_GPRS[ngrn].id(), get_bit_size(&t, vm)));
ngrn += 1;
} else {
nsaa = round_up(nsaa, round_up(align, 8));
......@@ -2703,16 +2703,16 @@ impl <'a> InstructionSelection {
}
// push all callee-saved registers
for i in 0..CALLEE_SAVED_FPRs.len() {
let ref reg = CALLEE_SAVED_FPRs[i];
for i in 0..CALLEE_SAVED_FPRS.len() {
let ref reg = CALLEE_SAVED_FPRS[i];
trace!("allocate frame slot for reg {}", reg);
let loc = self.current_frame.as_mut().unwrap().alloc_slot_for_callee_saved_reg(reg.clone(), vm);
let loc = emit_mem(self.backend.as_mut(), &loc, f_context, vm);
self.backend.emit_str_callee_saved(&loc, &reg);
}
for i in 0..CALLEE_SAVED_GPRs.len() {
let ref reg = CALLEE_SAVED_GPRs[i];
for i in 0..CALLEE_SAVED_GPRS.len() {
let ref reg = CALLEE_SAVED_GPRS[i];
trace!("allocate frame slot for regs {}", reg);
let loc = self.current_frame.as_mut().unwrap().alloc_slot_for_callee_saved_reg(reg.clone(), vm);
......@@ -2779,15 +2779,15 @@ impl <'a> InstructionSelection {
self.start_block(EPILOGUE_BLOCK_NAME.to_string(), &livein);
// pop all callee-saved registers
for i in (0..CALLEE_SAVED_GPRs.len()).rev() {
let ref reg = CALLEE_SAVED_GPRs[i];
for i in (0..CALLEE_SAVED_GPRS.len()).rev() {
let ref reg = CALLEE_SAVED_GPRS[i];
let reg_id = reg.extract_ssa_id().unwrap();
let loc = self.current_frame.as_mut().unwrap().allocated.get(&reg_id).unwrap().make_memory_op(reg.ty.clone(), vm);
let loc = emit_mem(self.backend.as_mut(), &loc, f_context, vm);
self.backend.emit_ldr_callee_saved(reg, &loc);
}
for i in (0..CALLEE_SAVED_FPRs.len()).rev() {
let ref reg = CALLEE_SAVED_FPRs[i];
for i in (0..CALLEE_SAVED_FPRS.len()).rev() {
let ref reg = CALLEE_SAVED_FPRS[i];
let reg_id = reg.extract_ssa_id().unwrap();
let loc = self.current_frame.as_mut().unwrap().allocated.get(&reg_id).unwrap().make_memory_op(reg.ty.clone(), vm);
......
......@@ -301,8 +301,8 @@ pub fn primitive_byte_size(ty : &P<MuType>) -> usize
}
lazy_static! {
// Note: these are the same as the ARGUMENT_GPRs
pub static ref RETURN_GPRs : [P<Value>; 8] = [
// Note: these are the same as the ARGUMENT_GPRS
pub static ref RETURN_GPRS : [P<Value>; 8] = [
X0.clone(),
X1.clone(),
X2.clone(),
......@@ -313,7 +313,7 @@ lazy_static! {
X7.clone()
];
pub static ref ARGUMENT_GPRs : [P<Value>; 8] = [
pub static ref ARGUMENT_GPRS : [P<Value>; 8] = [
X0.clone(),
X1.clone(),
X2.clone(),
......@@ -324,7 +324,7 @@ lazy_static! {
X7.clone()
];
pub static ref CALLEE_SAVED_GPRs : [P<Value>; 10] = [
pub static ref CALLEE_SAVED_GPRS : [P<Value>; 10] = [
X19.clone(),
X20.clone(),
X21.clone(),
......@@ -341,7 +341,7 @@ lazy_static! {
//X30.clone() // Link Register
];
pub static ref CALLER_SAVED_GPRs : [P<Value>; 18] = [
pub static ref CALLER_SAVED_GPRS : [P<Value>; 18] = [
X0.clone(),
X1.clone(),
X2.clone(),
......@@ -363,7 +363,7 @@ lazy_static! {
//X18.clone(), // Platform Register
];
static ref ALL_GPRs : [P<Value>; 30] = [
static ref ALL_GPRS : [P<Value>; 30] = [
X0.clone(),
X1.clone(),
X2.clone(),
......@@ -490,8 +490,8 @@ lazy_static! {
}
lazy_static!{
// Same as ARGUMENT_FPRs
pub static ref RETURN_FPRs : [P<Value>; 8] = [
// Same as ARGUMENT_FPRS
pub static ref RETURN_FPRS : [P<Value>; 8] = [
D0.clone(),
D1.clone(),
D2.clone(),
......@@ -502,7 +502,7 @@ lazy_static!{
D7.clone()
];
pub static ref ARGUMENT_FPRs : [P<Value>; 8] = [
pub static ref ARGUMENT_FPRS : [P<Value>; 8] = [
D0.clone(),
D1.clone(),
D2.clone(),
......@@ -513,7 +513,7 @@ lazy_static!{
D7.clone(),
];
pub static ref CALLEE_SAVED_FPRs : [P<Value>; 8] = [
pub static ref CALLEE_SAVED_FPRS : [P<Value>; 8] = [
D8.clone(),
D9.clone(),
D10.clone(),
......@@ -524,7 +524,7 @@ lazy_static!{
D15.clone()
];
pub static ref CALLER_SAVED_FPRs : [P<Value>; 24] = [
pub static ref CALLER_SAVED_FPRS : [P<Value>; 24] = [
D0.clone(),
D1.clone(),
D2.clone(),
......@@ -552,7 +552,7 @@ lazy_static!{
D31.clone()
];
static ref ALL_FPRs : [P<Value>; 32] = [
static ref ALL_FPRS : [P<Value>; 32] = [
D0.clone(),
D1.clone(),
D2.clone(),
......@@ -591,7 +591,7 @@ lazy_static!{
}
lazy_static! {
pub static ref ALL_MACHINE_REGs : LinkedHashMap<MuID, P<Value>> = {
pub static ref ALL_MACHINE_REGS : LinkedHashMap<MuID, P<Value>> = {
let mut map = LinkedHashMap::new();
for vec in GPR_ALIAS_TABLE.values() {
......@@ -609,7 +609,7 @@ lazy_static! {
map
};
pub static ref CALLEE_SAVED_REGs : [P<Value>; 18] = [
pub static ref CALLEE_SAVED_REGS : [P<Value>; 18] = [
X19.clone(),
X20.clone(),
X21.clone(),
......@@ -637,7 +637,7 @@ lazy_static! {
// put caller saved regs first (they imposes no overhead if there is no call instruction)
pub static ref ALL_USABLE_MACHINE_REGs : Vec<P<Value>> = vec![
pub static ref ALL_USABLE_MACHINE_REGS : Vec<P<Value>> = vec![
X19.clone(),
X20.clone(),
X21.clone(),
......@@ -709,7 +709,7 @@ lazy_static! {
}
pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
for reg in ALL_MACHINE_REGs.values() {
for reg in ALL_MACHINE_REGS.values() {
let reg_id = reg.extract_ssa_id().unwrap();
let entry = SSAVarEntry::new(reg.clone());
......@@ -719,21 +719,21 @@ pub fn init_machine_regs_for_func (func_context: &mut FunctionContext) {
pub fn number_of_regs_in_group(group: RegGroup) -> usize {
match group {
RegGroup::GPR => ALL_GPRs.len(),
RegGroup::FPR => ALL_FPRs.len()
RegGroup::GPR => ALL_GPRS.len(),
RegGroup::FPR => ALL_FPRS.len()
}
}
pub fn number_of_all_regs() -> usize {
ALL_MACHINE_REGs.len()
ALL_MACHINE_REGS.len()
}
pub fn all_regs() -> &'static LinkedHashMap<MuID, P<Value>> {
&ALL_MACHINE_REGs
&ALL_MACHINE_REGS
}
pub fn all_usable_regs() -> &'static Vec<P<Value>> {
&ALL_USABLE_MACHINE_REGs
&ALL_USABLE_MACHINE_REGS
}
pub fn pick_group_for_reg(reg_id: MuID) -> RegGroup {
......@@ -749,13 +749,13 @@ pub fn pick_group_for_reg(reg_id: MuID) -> RegGroup {
pub fn is_callee_saved(reg_id: MuID) -> bool {
for reg in CALLEE_SAVED_GPRs.iter() {
for reg in CALLEE_SAVED_GPRS.iter() {
if reg_id == reg.extract_ssa_id().unwrap() {
return true;
}
}
for reg in CALLEE_SAVED_FPRs.iter() {
for reg in CALLEE_SAVED_FPRS.iter() {
if reg_id == reg.extract_ssa_id().unwrap() {
return true;
}
......
......@@ -1013,19 +1013,19 @@ impl ASMCodeGen {
// defines
let mut defines : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
// return registers get defined
for reg in x86_64::RETURN_GPRs.iter() {
for reg in x86_64::RETURN_GPRS.iter() {
defines.insert(reg.id(), vec![]);
}
for reg in x86_64::RETURN_FPRs.iter() {
for reg in x86_64::RETURN_FPRS.iter() {
defines.insert(reg.id(), vec![]);
}
// caller saved register will be destroyed
for reg in x86_64::CALLER_SAVED_GPRs.iter() {
for reg in x86_64::CALLER_SAVED_GPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
}
for reg in x86_64::CALLER_SAVED_FPRs.iter() {
for reg in x86_64::CALLER_SAVED_FPRS.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
}
......
......@@ -14,7 +14,6 @@ use runtime::entrypoints;
use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass;
use compiler::backend;
use compiler::backend::BackendType;
use compiler::backend::RegGroup;
use compiler::backend::PROLOGUE_BLOCK_NAME;
......@@ -2907,9 +2906,9 @@ impl <'a> InstructionSelection {
let arg_reg_group = RegGroup::get_from_value(&arg);
if arg_reg_group == RegGroup::GPR && arg.is_reg() {
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
if gpr_arg_count < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr = {
let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count];
let ref reg64 = x86_64::ARGUMENT_GPRS[gpr_arg_count];
let expected_len = arg.ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
......@@ -2923,9 +2922,9 @@ impl <'a> InstructionSelection {
} else if arg_reg_group == RegGroup::GPR && arg.is_const() {
let int_const = arg.extract_int_const().unwrap();
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
if gpr_arg_count < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr = {
let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count];
let ref reg64 = x86_64::ARGUMENT_GPRS[gpr_arg_count];
let expected_len = arg.ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
......@@ -2942,8 +2941,8 @@ impl <'a> InstructionSelection {
stack_args.push(arg.clone());
}
} else if arg_reg_group == RegGroup::FPR && arg.is_reg() {
if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() {
let arg_fpr = x86_64::ARGUMENT_FPRs[fpr_arg_count].clone();
if fpr_arg_count < x86_64::ARGUMENT_FPRS.len() {
let arg_fpr = x86_64::ARGUMENT_FPRS[fpr_arg_count].clone();
self.emit_move_value_to_value(&arg_fpr, &arg);
fpr_arg_count += 1;
......@@ -3040,9 +3039,9 @@ impl <'a> InstructionSelection {
};
if RegGroup::get_from_value(&ret_val) == RegGroup::GPR && ret_val.is_reg() {
if gpr_ret_count < x86_64::RETURN_GPRs.len() {
if gpr_ret_count < x86_64::RETURN_GPRS.len() {
let ret_gpr = {
let ref reg64 = x86_64::RETURN_GPRs[gpr_ret_count];
let ref reg64 = x86_64::RETURN_GPRS[gpr_ret_count];
let expected_len = ret_val.ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
......@@ -3055,8 +3054,8 @@ impl <'a> InstructionSelection {
}
} else if RegGroup::get_from_value(&ret_val) == RegGroup::FPR && ret_val.is_reg() {
// floating point register
if fpr_ret_count < x86_64::RETURN_FPRs.len() {
let ref ret_fpr = x86_64::RETURN_FPRs[fpr_ret_count];
if fpr_ret_count < x86_64::RETURN_FPRS.len() {
let ref ret_fpr = x86_64::RETURN_FPRS[fpr_ret_count];
match ret_val.ty.v {
MuType_::Double => self.backend.emit_movsd_f64_f64(&ret_val, &ret_fpr),
......@@ -3384,8 +3383,8 @@ impl <'a> InstructionSelection {
let frame = self.current_frame.as_mut().unwrap();
let rbp = x86_64::RBP.extract_ssa_id().unwrap();
for i in 0..x86_64::CALLEE_SAVED_GPRs.len() {
let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
for i in 0..x86_64::CALLEE_SAVED_GPRS.len() {
let ref reg = x86_64::CALLEE_SAVED_GPRS[i];
// not pushing rbp (as we have done that)
if reg.extract_ssa_id().unwrap() != rbp {
trace!("allocate frame slot for reg {}", reg);
......@@ -3404,9 +3403,9 @@ impl <'a> InstructionSelection {
for arg in args {
if RegGroup::get_from_value(&arg) == RegGroup::GPR && arg.is_reg() {
if gpr_arg_count < x86_64::ARGUMENT_GPRs.len() {
if gpr_arg_count < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr = {
let ref reg64 = x86_64::ARGUMENT_GPRs[gpr_arg_count];
let ref reg64 = x86_64::ARGUMENT_GPRS[gpr_arg_count];
let expected_len = arg.ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
......@@ -3419,10 +3418,10 @@ impl <'a> InstructionSelection {
arg_by_stack.push(arg.clone());
}
} else if RegGroup::get_from_value(&arg) == RegGroup::GPREX && arg.is_reg() {
if gpr_arg_count + 1 < x86_64::ARGUMENT_GPRs.len() {
if gpr_arg_count + 1 < x86_64::ARGUMENT_GPRS.len() {
// we need two registers
let gpr1 = x86_64::ARGUMENT_GPRs[gpr_arg_count].clone();
let gpr2 = x86_64::ARGUMENT_GPRs[gpr_arg_count + 1].clone();
let gpr1 = x86_64::ARGUMENT_GPRS[gpr_arg_count].clone();
let gpr2 = x86_64::ARGUMENT_GPRS[gpr_arg_count + 1].clone();
let (arg_l, arg_h) = self.split_int128(&arg, f_context, vm);
......@@ -3436,8 +3435,8 @@ impl <'a> InstructionSelection {
arg_by_stack.push(arg.clone())
}
} else if RegGroup::get_from_value(&arg) == RegGroup::FPR && arg.is_reg() {
if fpr_arg_count < x86_64::ARGUMENT_FPRs.len() {
let arg_fpr = x86_64::ARGUMENT_FPRs[fpr_arg_count].clone();
if fpr_arg_count < x86_64::ARGUMENT_FPRS.len() {
let arg_fpr = x86_64::ARGUMENT_FPRS[fpr_arg_count].clone();
match arg.ty.v {
MuType_::Double => self.backend.emit_movsd_f64_f64(&arg, &arg_fpr),
......@@ -3497,8 +3496,8 @@ impl <'a> InstructionSelection {
if self.match_iimm(ret_val) {
let imm_ret_val = self.node_iimm_to_i32(ret_val);
if gpr_ret_count < x86_64::RETURN_GPRs.len() {
self.backend.emit_mov_r_imm(&x86_64::RETURN_GPRs[gpr_ret_count], imm_ret_val);
if gpr_ret_count < x86_64::RETURN_GPRS.len() {
self.backend.emit_mov_r_imm(&x86_64::RETURN_GPRS[gpr_ret_count], imm_ret_val);
gpr_ret_count += 1;
} else {
// pass by stack
......@@ -3507,9 +3506,9 @@ impl <'a> InstructionSelection {
} else if self.match_ireg(ret_val) {
let reg_ret_val = self.emit_ireg(ret_val, f_content, f_context, vm);
if gpr_ret_count < x86_64::RETURN_GPRs.len() {
if gpr_ret_count < x86_64::RETURN_GPRS.len() {
let ret_gpr = {
let ref reg64 = x86_64::RETURN_GPRs[gpr_ret_count];
let ref reg64 = x86_64::RETURN_GPRS[gpr_ret_count];
let expected_len = reg_ret_val.ty.get_int_length().unwrap();
x86_64::get_alias_for_length(reg64.id(), expected_len)
};
......@@ -3523,9 +3522,9 @@ impl <'a> InstructionSelection {
} else if self.match_ireg_ex(ret_val) {
let (ret_val1, ret_val2) = self.emit_ireg_ex(ret_val, f_content, f_context, vm);
if gpr_ret_count + 1 < x86_64::RETURN_GPRs.len() {
let ret_gpr1 = x86_64::RETURN_GPRs[gpr_ret_count].clone();
let ret_gpr2 = x86_64::RETURN_GPRs[gpr_ret_count + 1].clone();
if gpr_ret_count + 1 < x86_64::RETURN_GPRS.len() {
let ret_gpr1 = x86_64::RETURN_GPRS[gpr_ret_count].clone();
let ret_gpr2 = x86_64::RETURN_GPRS[gpr_ret_count + 1].clone();
self.backend.emit_mov_r_r(&ret_gpr1, &ret_val1);
self.backend.emit_mov_r_r(&ret_gpr2, &ret_val2);
......@@ -3538,10 +3537,10 @@ impl <'a> InstructionSelection {
} else if self.match_fpreg(ret_val) {
let reg_ret_val = self.emit_fpreg(ret_val, f_content, f_context, vm);
if fpr_ret_count < x86_64::RETURN_FPRs.len() {
if fpr_ret_count < x86_64::RETURN_FPRS.len() {
match reg_ret_val.ty.v {
MuType_::Double => self.backend.emit_movsd_f64_f64(&x86_64::RETURN_FPRs[fpr_ret_count], &reg_ret_val),
MuType_::Float => self.backend.emit_movss_f32_f32(&x86_64::RETURN_FPRs[fpr_ret_count], &reg_ret_val),
MuType_::Double => self.backend.emit_movsd_f64_f64(&x86_64::RETURN_FPRS[fpr_ret_count], &reg_ret_val),
MuType_::Float => self.backend.emit_movss_f32_f32(&x86_64::RETURN_FPRS[fpr_ret_count], &reg_ret_val),
_ => panic!("expect double or float")
}
......@@ -3558,8 +3557,8 @@ impl <'a> InstructionSelection {
// pop all callee-saved registers - reverse order
{
let frame = self.current_frame.as_mut().unwrap();
for i in (0..x86_64::CALLEE_SAVED_GPRs.len()).rev() {
let ref reg = x86_64::CALLEE_SAVED_GPRs[i];
for i in (0..x86_64::CALLEE_SAVED_GPRS.len()).rev() {
let ref reg = x86_64::CALLEE_SAVED_GPRS[i];
let reg_id = reg.extract_ssa_id().unwrap();
if reg_id != x86_64::RBP.extract_ssa_id().unwrap() {
let loc = frame.allocated.get(&reg_id).unwrap().make_memory_op(reg.ty.clone(), vm);
......
#![allow(dead_code)]
#![allow(non_upper_case_globals)]
/// Tree pattern matching instruction selection.
pub mod inst_sel;
mod codegen;
pub use compiler::backend::x86_64::codegen::CodeGenerator;
/// CodeGenerator trait serves as an interface to the backend code generator, which
/// may generate assembly code or binary (not implemented yet)
use compiler::backend::x86_64::codegen::CodeGenerator;
pub mod asm_backend;
pub use compiler::backend::x86_64::asm_backend::ASMCodeGen;
/// assembly backend as AOT compiler
mod asm_backend;
use compiler::backend::x86_64::asm_backend::ASMCodeGen;
// re-export a few functions for AOT compilation
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::emit_code;
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::emit_context;
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::emit_context_with_reloc;
#[cfg(feature = "aot")]
pub use compiler::backend::x86_64::asm_backend::spill_rewrite;
......@@ -22,6 +30,7 @@ use compiler::backend::RegGroup;
use utils::LinkedHashMap;
use std::collections::HashMap;
/// a macro to declare a set of general purpose registers that are aliased to the first one
macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) -> $r32: ident, $r16: ident, $r8l: ident, $r8h: ident) => {
lazy_static!{
......@@ -55,6 +64,7 @@ macro_rules! GPR_ALIAS {
};
}
/// a macro to declare a general purpose register
macro_rules! GPR {
($id:expr, $name: expr, $ty: ident) => {
{
......@@ -67,6 +77,7 @@ macro_rules! GPR {
};
}
/// a macro to declare a floating point register
macro_rules! FPR {
($id:expr, $name: expr) => {
{
......@@ -79,6 +90,9 @@ macro_rules! FPR {
};
}
// declare all general purpose registers for x86_64
// non 64-bit registers are alias of its 64-bit one
GPR_ALIAS!(RAX_ALIAS: (0, RAX) -> EAX, AX , AL, AH);
GPR_ALIAS!(RCX_ALIAS: (5, RCX) -> ECX, CX , CL, CH);
GPR_ALIAS!(RDX_ALIAS: (10,RDX) -> EDX, DX , DL, DH);
......@@ -98,6 +112,8 @@ GPR_ALIAS!(R15_ALIAS: (64,R15) -> R15D,R15W,R15B);
GPR_ALIAS!(RIP_ALIAS: (68,RIP));
lazy_static! {
/// a map from 64-bit register IDs to a vector of its aliased register (Values),
/// including the 64-bit register
pub static ref GPR_ALIAS_TABLE : LinkedHashMap<MuID, Vec<P<Value>>> = {
let mut ret = LinkedHashMap::new();
......@@ -122,7 +138,7 @@ lazy_static! {
ret
};
// e.g. given eax, return rax
/// a map from any register to its 64-bit alias
pub static ref GPR_ALIAS_LOOKUP : HashMap<MuID, P<Value>> = {
let mut ret = HashMap::new();
......@@ -138,6 +154,8 @@ lazy_static! {
};
}
/// returns P<Value> for a register ID of its alias of the given length
/// panics if the ID is not a machine register ID
pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
if id < FPR_ID_START {
let vec = match GPR_ALIAS_TABLE.get(&id) {
......@@ -154,7 +172,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
_ => panic!("unexpected length {} for {}", length, vec[0])
}
} else {
for r in ALL_FPRs.iter() {
for r in ALL_FPRS.iter() {
if r.id() == id {
return r.clone();
}
......@@ -164,6 +182,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
}
}
/// are two registers aliased? (both must be machine register IDs, otherwise this function panics)
pub fn is_aliased(id1: MuID, id2: MuID) -> bool {
if get_color_for_precolored(id1) == get_color_for_precolored(id2) {
macro_rules! is_match {
......@@ -215,12 +234,12 @@ pub fn check_op_len(op: &P<Value>) -> usize {
}
lazy_static! {
pub static ref RETURN_GPRs : [P<Value>; 2] = [
pub static ref RETURN_GPRS : [P<Value>; 2] = [
RAX.clone(),
RDX.clone(),
];
pub static ref ARGUMENT_GPRs : [P<Value>; 6] = [
pub static ref ARGUMENT_GPRS : [P<Value>; 6] = [
RDI.clone(),
RSI.clone(),
RDX.clone(),
......@@ -229,7 +248,7 @@ lazy_static! {
R9.clone()
];
pub static ref CALLEE_SAVED_GPRs : [P<Value>; 6] = [
pub static ref CALLEE_SAVED_GPRS : [P<Value>; 6] = [
RBX.clone(),
RBP.clone(),
R12.clone(),
......@@ -238,7 +257,7 @@ lazy_static! {
R15.clone()
];
pub static ref CALLER_SAVED_GPRs : [P<Value>; 9] = [
pub static ref CALLER_SAVED_GPRS : [P<Value>; 9] = [
RAX.clone(),
RCX.clone(),
RDX.clone(),
......@@ -250,7 +269,7 @@ lazy_static! {
R11.clone()
];
static ref ALL_GPRs : [P<Value>; 15] = [
static ref ALL_GPRS : [P<Value>; 15] = [
RAX.clone(),
RCX.clone(),
RDX.clone(),
......@@ -290,12 +309,12 @@ lazy_static!{
pub static ref XMM14 : P<Value> = FPR!(FPR_ID_START + 14,"xmm14");