WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 93c4cc59 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Merge branch 'master' of https://gitlab.anu.edu.au/mu/mu-impl-fast.git

# Conflicts:
#	tests/ir_macros.rs
#	tests/lib.rs
#	tests/test_compiler/test_alloc.rs
#	tests/test_compiler/test_binop.rs
#	tests/test_compiler/test_call.rs
#	tests/test_compiler/test_controlflow.rs
#	tests/test_compiler/test_exception.rs
#	tests/test_compiler/test_floatingpoint.rs
#	tests/test_compiler/test_global.rs
#	tests/test_compiler/test_int.rs
#	tests/test_compiler/test_mem_inst.rs
#	tests/test_compiler/test_pre_instsel.rs
#	tests/test_compiler/test_regalloc.rs
#	tests/test_compiler/test_thread.rs
#	tests/test_ir/test_ir.rs
#	tests/test_jit/test_rpython.py
#	tests/test_runtime/mod.rs
parents beb9d46a 1a9e6c2e
......@@ -9,7 +9,6 @@ use utils::LinkedHashSet;
use std::fmt;
use std::default;
use std::sync::RwLock;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
pub type WPID = usize;
......@@ -74,9 +73,9 @@ pub struct MuFunction {
}
impl MuFunction {
pub fn new(id: MuID, sig: P<MuFuncSig>) -> MuFunction {
pub fn new(entity: MuEntityHeader, sig: P<MuFuncSig>) -> MuFunction {
MuFunction {
hdr: MuEntityHeader::unnamed(id),
hdr: entity,
sig: sig,
cur_ver: None,
all_vers: vec![]
......@@ -143,9 +142,9 @@ impl fmt::Debug for MuFunctionVersion {
}
impl MuFunctionVersion {
pub fn new(id: MuID, func: MuID, sig: P<MuFuncSig>) -> MuFunctionVersion {
pub fn new(entity: MuEntityHeader, func: MuID, sig: P<MuFuncSig>) -> MuFunctionVersion {
MuFunctionVersion{
hdr: MuEntityHeader::unnamed(id),
hdr: entity,
func_id: func,
sig: sig,
orig_content: None,
......@@ -194,9 +193,10 @@ impl MuFunctionVersion {
self.is_compiled = true;
}
pub fn new_ssa(&mut self, id: MuID, ty: P<MuType>) -> P<TreeNode> {
pub fn new_ssa(&mut self, entity: MuEntityHeader, ty: P<MuType>) -> P<TreeNode> {
let id = entity.id();
let val = P(Value{
hdr: MuEntityHeader::unnamed(id),
hdr: entity,
ty: ty,
v: Value_::SSAVar(id)
});
......@@ -425,8 +425,8 @@ impl fmt::Debug for Block {
}
impl Block {
pub fn new(id: MuID) -> Block {
Block{hdr: MuEntityHeader::unnamed(id), content: None, control_flow: ControlFlow::default()}
pub fn new(entity: MuEntityHeader) -> Block {
Block{hdr: entity, content: None, control_flow: ControlFlow::default()}
}
pub fn is_receiving_exception_arg(&self) -> bool {
......@@ -588,7 +588,7 @@ impl BlockContent {
vec_utils::append_unique(&mut ret, &mut live_outs);
}
_ => panic!("didn't expect last inst as {:?}", inst)
_ => panic!("didn't expect last inst as {}", inst)
}
},
_ => panic!("expect last treenode of block is a inst")
......@@ -763,7 +763,7 @@ impl Value {
}
}
const DISPLAY_TYPE : bool = false;
const DISPLAY_TYPE : bool = true;
const PRINT_ABBREVIATE_NAME: bool = true;
impl fmt::Debug for Value {
......@@ -1065,14 +1065,14 @@ impl fmt::Display for MemoryLocation {
#[derive(Debug)] // Display, PartialEq, Clone
pub struct MuEntityHeader {
id: MuID,
name: RwLock<Option<MuName>>
name: Option<MuName>
}
impl Clone for MuEntityHeader {
fn clone(&self) -> Self {
MuEntityHeader {
id: self.id,
name: RwLock::new(self.name.read().unwrap().clone())
name: self.name.clone()
}
}
}
......@@ -1083,7 +1083,7 @@ impl Encodable for MuEntityHeader {
s.emit_struct("MuEntityHeader", 2, |s| {
try!(s.emit_struct_field("id", 0, |s| self.id.encode(s)));
let name = &self.name.read().unwrap();
let name = &self.name;
try!(s.emit_struct_field("name", 1, |s| name.encode(s)));
Ok(())
......@@ -1099,24 +1099,36 @@ impl Decodable for MuEntityHeader {
Ok(MuEntityHeader{
id: id,
name: RwLock::new(name)
name: name
})
})
}
}
pub fn name_check(name: MuName) -> MuName {
let name = name.replace('.', "$");
if name.starts_with("@") || name.starts_with("%") {
let (_, name) = name.split_at(1);
return name.to_string();
}
name
}
impl MuEntityHeader {
pub fn unnamed(id: MuID) -> MuEntityHeader {
MuEntityHeader {
id: id,
name: RwLock::new(None)
name: None
}
}
pub fn named(id: MuID, name: MuName) -> MuEntityHeader {
MuEntityHeader {
id: id,
name: RwLock::new(Some(MuEntityHeader::name_check(name)))
name: Some(name_check(name))
}
}
......@@ -1125,24 +1137,7 @@ impl MuEntityHeader {
}
pub fn name(&self) -> Option<MuName> {
self.name.read().unwrap().clone()
}
pub fn set_name(&self, name: MuName) {
let mut name_guard = self.name.write().unwrap();
*name_guard = Some(MuEntityHeader::name_check(name));
}
pub fn name_check(name: MuName) -> MuName {
let name = name.replace('.', "$");
if name.starts_with("@") || name.starts_with("%") {
let (_, name) = name.split_at(1);
return name.to_string();
}
name
self.name.clone()
}
fn abbreviate_name(&self) -> Option<MuName> {
......@@ -1192,7 +1187,6 @@ impl fmt::Display for MuEntityHeader {
pub trait MuEntity {
fn id(&self) -> MuID;
fn name(&self) -> Option<MuName>;
fn set_name(&self, name: MuName);
fn as_entity(&self) -> &MuEntity;
}
......@@ -1218,13 +1212,6 @@ impl MuEntity for TreeNode {
}
}
fn set_name(&self, name: MuName) {
match self.v {
TreeNode_::Instruction(ref inst) => inst.set_name(name),
TreeNode_::Value(ref pv) => pv.set_name(name)
}
}
fn as_entity(&self) -> &MuEntity {
match self.v {
TreeNode_::Instruction(ref inst) => inst.as_entity(),
......
#[macro_use]
extern crate log;
extern crate simple_logger;
#[macro_use]
......@@ -14,9 +13,6 @@ macro_rules! impl_mu_entity {
fn id(&self) -> MuID {self.hdr.id()}
#[inline(always)]
fn name(&self) -> Option<MuName> {self.hdr.name()}
fn set_name(&self, name: MuName) {
self.hdr.set_name(name);
}
fn as_entity(&self) -> &MuEntity {
let ref_ty : &$entity = self;
ref_ty as &MuEntity
......
......@@ -1013,26 +1013,23 @@ impl ASMCodeGen {
fn prepare_machine_regs(&self, regs: Iter<P<Value>>) -> Vec<MuID> {
regs.map(|x| self.prepare_machine_reg(x)).collect()
}
fn add_asm_call(&mut self, code: String, potentially_excepting: Option<MuName>) {
// a call instruction will use all the argument registers
// do not need
let uses : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
// for reg in x86_64::ARGUMENT_GPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// for reg in x86_64::ARGUMENT_FPRs.iter() {
// uses.insert(reg.id(), vec![]);
// }
// defines: return registers
fn add_asm_call_with_extra_uses(&mut self,
code: String,
extra_uses: LinkedHashMap<MuID, Vec<ASMLocation>>,
potentially_excepting: Option<MuName>) {
let uses = extra_uses;
// defines
let mut defines : LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
// return registers get defined
for reg in x86_64::RETURN_GPRs.iter() {
defines.insert(reg.id(), vec![]);
}
for reg in x86_64::RETURN_FPRs.iter() {
defines.insert(reg.id(), vec![]);
}
// caller saved register will be destroyed
for reg in x86_64::CALLER_SAVED_GPRs.iter() {
if !defines.contains_key(&reg.id()) {
defines.insert(reg.id(), vec![]);
......@@ -1043,7 +1040,7 @@ impl ASMCodeGen {
defines.insert(reg.id(), vec![]);
}
}
self.add_asm_inst_internal(code, defines, uses, false, {
if potentially_excepting.is_some() {
ASMBranchTarget::PotentiallyExcepting(potentially_excepting.unwrap())
......@@ -1053,6 +1050,10 @@ impl ASMCodeGen {
}, None)
}
fn add_asm_call(&mut self, code: String, potentially_excepting: Option<MuName>) {
self.add_asm_call_with_extra_uses(code, LinkedHashMap::new(), potentially_excepting);
}
fn add_asm_ret(&mut self, code: String) {
// return instruction does not use anything (not RETURN REGS)
// otherwise it will keep RETURN REGS alive
......@@ -1697,9 +1698,7 @@ impl ASMCodeGen {
}
}
fn internal_mov_mem_imm(&mut self, inst: &str, dest: &P<Value>, src: i32) {
let len = check_op_len(dest);
fn internal_mov_mem_imm(&mut self, inst: &str, dest: &P<Value>, src: i32, len: usize) {
let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest);
......@@ -2136,8 +2135,8 @@ impl CodeGenerator for ASMCodeGen {
fn emit_mov_mem_r (&mut self, dest: &P<Value>, src: &P<Value>) {
self.internal_mov_mem_r("mov", dest, src, false)
}
fn emit_mov_mem_imm(&mut self, dest: &P<Value>, src: i32) {
self.internal_mov_mem_imm("mov", dest, src)
fn emit_mov_mem_imm(&mut self, dest: &P<Value>, src: i32, oplen: usize) {
self.internal_mov_mem_imm("mov", dest, src, oplen)
}
// zero/sign extend mov
......@@ -2844,7 +2843,18 @@ impl CodeGenerator for ASMCodeGen {
fn emit_call_near_r64(&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation {
trace!("emit: call {}", func);
unimplemented!()
let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("call *{}", reg);
self.add_asm_call_with_extra_uses(asm, linked_hashmap!{id => vec![loc]}, pe);
let callsite_symbol = symbol(callsite.clone());
self.add_asm_symbolic(directive_globl(callsite_symbol.clone()));
self.add_asm_symbolic(format!("{}:", callsite_symbol.clone()));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
}
fn emit_call_near_mem64(&mut self, callsite: String, func: &P<Value>, pe: Option<MuName>) -> ValueLocation {
......@@ -3308,6 +3318,8 @@ fn write_const_value(f: &mut File, constant: P<Value>) {
}
use std::collections::HashMap;
use compiler::backend::code_emission::emit_mu_types;
pub fn emit_context_with_reloc(vm: &VM,
symbols: HashMap<Address, String>,
fields : HashMap<Address, String>) {
......@@ -3315,6 +3327,8 @@ pub fn emit_context_with_reloc(vm: &VM,
use std::io::prelude::*;
use rustc_serialize::json;
emit_mu_types(vm);
debug!("---Emit VM Context---");
create_emit_directory(vm);
......@@ -3546,7 +3560,7 @@ pub fn spill_rewrite(
// generate a random new temporary
let temp_ty = val_reg.ty.clone();
let temp = func.new_ssa(vm.next_id(), temp_ty.clone()).clone_value();
let temp = func.new_ssa(MuEntityHeader::unnamed(vm.next_id()), temp_ty.clone()).clone_value();
// maintain mapping
trace!("reg {} used in Inst{} is replaced as {}", val_reg, i, temp);
......@@ -3594,7 +3608,7 @@ pub fn spill_rewrite(
temp_for_cur_inst.get(&reg).unwrap().clone()
} else {
let temp_ty = val_reg.ty.clone();
let temp = func.new_ssa(vm.next_id(), temp_ty.clone()).clone_value();
let temp = func.new_ssa(MuEntityHeader::unnamed(vm.next_id()), temp_ty.clone()).clone_value();
spilled_scratch_temps.insert(temp.id(), reg);
......
......@@ -50,7 +50,9 @@ pub trait CodeGenerator {
fn emit_mov_r_mem (&mut self, dest: Reg, src: Mem); // load
fn emit_mov_r_r (&mut self, dest: Reg, src: Reg);
fn emit_mov_mem_r (&mut self, dest: Mem, src: Reg); // store
fn emit_mov_mem_imm(&mut self, dest: Mem, src: i32); // store
// we can infer imm length from Reg, but cannot from Mem
// because mem may only have type as ADDRESS_TYPE
fn emit_mov_mem_imm(&mut self, dest: Mem, src: i32, oplen: usize); // store
// zero/sign extend mov
fn emit_movs_r_r (&mut self, dest: Reg, src: Reg);
......
......@@ -11,8 +11,8 @@ use std::io::prelude::*;
use std::fs::File;
use std::collections::HashMap;
const EMIT_MUIR : bool = true;
const EMIT_MC_DOT : bool = true;
pub const EMIT_MUIR : bool = true;
pub const EMIT_MC_DOT : bool = true;
pub fn create_emit_directory(vm: &VM) {
use std::fs;
......@@ -45,6 +45,48 @@ impl CodeEmission {
}
}
#[allow(dead_code)]
pub fn emit_mu_types(vm: &VM) {
if EMIT_MUIR {
create_emit_directory(vm);
let mut file_path = path::PathBuf::new();
file_path.push(&vm.vm_options.flag_aot_emit_dir);
file_path.push("___types.muty");
let mut file = match File::create(file_path.as_path()) {
Err(why) => panic!("couldn't create mu types file {}: {}", file_path.to_str().unwrap(), why),
Ok(file) => file
};
{
use ast::types::*;
let ty_guard = vm.types().read().unwrap();
let struct_map = STRUCT_TAG_MAP.read().unwrap();
let hybrid_map = HYBRID_TAG_MAP.read().unwrap();
for ty in ty_guard.values() {
if ty.is_struct() {
file.write_fmt(format_args!("{}", ty)).unwrap();
let struct_ty = struct_map.get(&ty.get_struct_hybrid_tag().unwrap()).unwrap();
file.write_fmt(format_args!(" -> {}\n", struct_ty)).unwrap();
file.write_fmt(format_args!(" {}\n", vm.get_backend_type_info(ty.id()))).unwrap();
} else if ty.is_hybrid() {
file.write_fmt(format_args!("{}", ty)).unwrap();
let hybrid_ty = hybrid_map.get(&ty.get_struct_hybrid_tag().unwrap()).unwrap();
file.write_fmt(format_args!(" -> {}\n", hybrid_ty)).unwrap();
file.write_fmt(format_args!(" {}\n", vm.get_backend_type_info(ty.id()))).unwrap();
} else {
// we only care about struct
}
}
}
}
}
#[allow(dead_code)]
fn emit_muir(func: &MuFunctionVersion, vm: &VM) {
let func_name = match func.name() {
......
......@@ -5,6 +5,7 @@ pub mod code_emission;
use ast::types;
use utils::ByteSize;
use utils::math::align_up;
use runtime::mm;
use runtime::mm::common::gctype::{GCType, GCTYPE_INIT_ID, RefPattern};
......@@ -102,23 +103,23 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
MuType_::Int(size_in_bit) => {
match size_in_bit {
1 => BackendTypeInfo{
size: 1, alignment: 1, struct_layout: None,
size: 1, alignment: 1, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
},
8 => BackendTypeInfo{
size: 1, alignment: 1, struct_layout: None,
size: 1, alignment: 1, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(1, 1))
},
16 => BackendTypeInfo{
size: 2, alignment: 2, struct_layout: None,
size: 2, alignment: 2, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(2, 2))
},
32 => BackendTypeInfo{
size: 4, alignment: 4, struct_layout: None,
size: 4, alignment: 4, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
},
64 => BackendTypeInfo{
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
},
_ => unimplemented!()
......@@ -128,7 +129,7 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
MuType_::Ref(_)
| MuType_::IRef(_)
| MuType_::WeakRef(_) => BackendTypeInfo{
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_reftype())
},
// pointer
......@@ -137,30 +138,32 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
| MuType_::FuncRef(_)
| MuType_::ThreadRef
| MuType_::StackRef => BackendTypeInfo{
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
},
// tagref
MuType_::Tagref64 => unimplemented!(),
// floating point
MuType_::Float => BackendTypeInfo{
size: 4, alignment: 4, struct_layout: None,
size: 4, alignment: 4, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(4, 4))
},
MuType_::Double => BackendTypeInfo {
size: 8, alignment: 8, struct_layout: None,
size: 8, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(8, 8))
},
// array
MuType_::Array(ref ty, len) => {
let ele_ty = vm.get_backend_type_info(ty.id());
let ele_padded_size = align_up(ele_ty.size, ele_ty.alignment);
BackendTypeInfo{
size : ele_ty.size * len,
size : ele_padded_size * len,
alignment : ele_ty.alignment,
struct_layout: None,
elem_padded_size : Some(ele_padded_size),
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
ele_ty.size * len,
ele_padded_size * len,
ele_ty.alignment,
Some(RefPattern::Repeat{
pattern: Box::new(RefPattern::NestedType(vec![ele_ty.gc_type])),
......@@ -195,6 +198,8 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
// treat var_ty as array (getting its alignment)
let var_ele_ty = vm.get_backend_type_info(var_ty.id());
let var_align = var_ele_ty.alignment;
let var_padded_size = align_up(var_ele_ty.size, var_ele_ty.alignment);
ret.elem_padded_size = Some(var_padded_size);
// fix type info as hybrid
// 1. check alignment
......@@ -204,14 +209,14 @@ pub fn resolve_backend_type_info (ty: &MuType, vm: &VM) -> BackendTypeInfo {
// 2. fix gc type
let mut gctype = ret.gc_type.as_ref().clone();
gctype.var_refs = Some(RefPattern::NestedType(vec![var_ele_ty.gc_type.clone()]));
gctype.var_size = Some(var_ele_ty.size);
gctype.var_size = Some(var_padded_size);
ret.gc_type = mm::add_gc_type(gctype);
ret
}
// void
MuType_::Void => BackendTypeInfo{
size: 0, alignment: 8, struct_layout: None,
size: 0, alignment: 8, struct_layout: None, elem_padded_size: None,
gc_type: mm::add_gc_type(GCType::new_noreftype(0, 8))
},
// vector
......@@ -274,6 +279,7 @@ fn layout_struct(tys: &Vec<P<MuType>>, vm: &VM) -> BackendTypeInfo {
size : size,
alignment : struct_align,
struct_layout: Some(offsets),
elem_padded_size: None,
gc_type : mm::add_gc_type(GCType::new_fix(GCTYPE_INIT_ID,
size,
struct_align,
......@@ -298,7 +304,11 @@ pub fn sequetial_layout(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, ByteSize, V
pub struct BackendTypeInfo {
pub size: ByteSize,
pub alignment: ByteSize,
pub struct_layout: Option<Vec<ByteSize>>,
// for hybrid/array, every element needs to be properly aligned
// thus it may take more space than it actually needs
pub elem_padded_size: Option<ByteSize>,
pub gc_type: P<GCType>
}
......@@ -318,6 +328,21 @@ impl BackendTypeInfo {
}
}
use std::fmt;
impl fmt::Display for BackendTypeInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} bytes ({} bytes aligned), ", self.size, self.alignment).unwrap();
if self.struct_layout.is_some() {
use utils::vec_utils;
let layout = self.struct_layout.as_ref().unwrap();
write!(f, "field offsets: ({})", vec_utils::as_str(layout)).unwrap();
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum RegGroup {GPR, FPR}
......
......@@ -11,6 +11,8 @@ use compiler::backend::reg_alloc::validate::alive_entry::*;
mod exact_liveness;
use compiler::backend::reg_alloc::validate::exact_liveness::*;
const VERIFY_SPILLING : bool = false;
pub fn validate_regalloc(cf: &CompiledFunction,
reg_assigned: LinkedHashMap<MuID, MuID>,
spill_scratch_regs: LinkedHashMap<MuID, MuID>)
......@@ -62,42 +64,46 @@ pub fn validate_regalloc(cf: &CompiledFunction,
Some(range) => range,
None => panic!("cannot find range for block {}", block)
};
let last_inst = range.end - 1;
let last_inst = mc.get_last_inst(range.end - 1).unwrap();
for i in range {
mc.trace_inst(i);
// validate spill
if let Some(spill_loc) = mc.is_spill_load(i) {
// spill load is a move from spill location (mem) to temp
// its define is the scratch temp
let scratch_temp = mc.get_inst_reg_defines(i)[0];
let source_temp = get_source_temp_for_scratch(scratch_temp, &spill_scratch_regs);
// we check if source_temp are alive, and if it is alive in the designated location
validate_spill_load(scratch_temp, source_temp, spill_loc, &mut alive);
} else if let Some(spill_loc) = mc.is_spill_store(i) {
// spill store is a move from scratch temp to mem
// it uses scratch temp as well as stack pointer (to refer to mem)
// we try to find the scratch temp
let scratch_temp = {
let uses = mc.get_inst_reg_uses(i);
let mut use_temps = vec![];
for reg in uses {
if reg >= MACHINE_ID_END {
use_temps.push(reg)
}
};
if VERIFY_SPILLING {
panic!("the code doesnt work");
if let Some(spill_loc) = mc.is_spill_load(i) {
// spill load is a move from spill location (mem) to temp
// its define is the scratch temp
let scratch_temp = mc.get_inst_reg_defines(i)[0];
let source_temp = get_source_temp_for_scratch(scratch_temp, &spill_scratch_regs);
// we check if source_temp are alive, and if it is alive in the designated location
validate_spill_load(scratch_temp, source_temp, spill_loc, &mut alive);
} else if let Some(spill_loc) = mc.is_spill_store(i)