testing rodal alternatives

parent a7336ae4
...@@ -57,3 +57,4 @@ extprim = "*" ...@@ -57,3 +57,4 @@ extprim = "*"
num-traits = "*" num-traits = "*"
built = "*" built = "*"
mu_gc = { path = "src/gc"} mu_gc = { path = "src/gc"}
cfg-if = "*"
...@@ -53,5 +53,6 @@ fn main() { ...@@ -53,5 +53,6 @@ fn main() {
} }
fn built() { fn built() {
built::write_built_file().expect("Failed to acquire build-time information"); built::write_built_file()
.expect("Failed to acquire build-time information");
} }
#trailing_comma = "Never" max_width = 80
wrap_comments = true
trailing_comma = "Never"
...@@ -85,6 +85,17 @@ impl Instruction { ...@@ -85,6 +85,17 @@ impl Instruction {
| NewStack(_) | NewStack(_)
| NewThread { .. } | NewThread { .. }
| NewRTThread { .. } | NewRTThread { .. }
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_, _, _)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_, _)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| NotifyThread(_) // TODO remove - not needed anymore | NotifyThread(_) // TODO remove - not needed anymore
| SetPriority(_,_) | SetPriority(_,_)
| GetPriority(_) | GetPriority(_)
...@@ -150,6 +161,17 @@ impl Instruction { ...@@ -150,6 +161,17 @@ impl Instruction {
| AllocA(_) | AllocA(_)
| NewHybrid(_, _) | NewHybrid(_, _)
| AllocAHybrid(_, _) | AllocAHybrid(_, _)
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_,_)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_,_)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| NewStack(_) | NewStack(_)
| NewThread { .. } | NewThread { .. }
| NewRTThread { .. } | NewRTThread { .. }
...@@ -239,6 +261,17 @@ impl Instruction { ...@@ -239,6 +261,17 @@ impl Instruction {
| NewHybrid(_, _) | NewHybrid(_, _)
| AllocAHybrid(_, _) | AllocAHybrid(_, _)
| NewStack(_) | NewStack(_)
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_, _)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_,_)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| NewThread { .. } | NewThread { .. }
| NewRTThread { .. } | NewRTThread { .. }
| NotifyThread(_) | NotifyThread(_)
...@@ -320,6 +353,17 @@ impl Instruction { ...@@ -320,6 +353,17 @@ impl Instruction {
| New(_) | New(_)
| AllocA(_) | AllocA(_)
| NewHybrid(_, _) | NewHybrid(_, _)
| AllocAU(_)
| NewReg(_)
| DeleteReg(_)
| rAlloc(_, _)
| rAllocHybrid(_,_,_)
// | rAllocT(_)
| eAlloc(_)
| eAllocHybrid(_,_)
// | eAllocT(_)
| eDelete(_)
// | eDeleteT(_)
| AllocAHybrid(_, _) | AllocAHybrid(_, _)
| NewStack(_) | NewStack(_)
| NewThread { .. } | NewThread { .. }
...@@ -572,6 +616,55 @@ impl Instruction { ...@@ -572,6 +616,55 @@ impl Instruction {
ops[func].ty().get_sig().unwrap(), ops[func].ty().get_sig().unwrap(),
ops[func] ops[func]
), ),
&Instruction_::NewReg(size) => format!(
"COMMINST @uvm.new_region({})",
ops[size]
),
&Instruction_::DeleteReg(regref) => format!(
"COMMINST @uvm.delete_region({})",
ops[regref]
),
&Instruction_::AllocAU(ref ty) => format!(
"COMMINST @uvm.AllocAU({})",
ty.id()
),
&Instruction_::rAlloc(regref, ref ty) => format!(
"COMMINST @uvm.rAlloc({}, {})",
ops[regref],
ty.id()
),
&Instruction_::rAllocHybrid(regref, ref ty, var_len) => format!(
"COMMINST @uvm.rAllocHybrid({}, {}, {})",
ops[regref],
ty.id(),
ops[var_len]
),
// &Instruction_::rAllocT(regref, ty) => format!(
// "COMMINST @uvm.rAllocT({}, {})",
// ops[regref],
// ops[ty]
// ),
&Instruction_::eAlloc( ref ty) => format!(
"COMMINST @uvm.eAlloc({})",
ty.id()
),
&Instruction_::eAllocHybrid( ref ty, var_len) => format!(
"COMMINST @uvm.eAllocHybrid({}, {})",
ty.id(),
ops[var_len]
),
// &Instruction_::eAllocT( ty) => format!(
// "COMMINST @uvm.eAllocT({})",
// ops[ty]
// ),
&Instruction_::eDelete(obj) => format!(
"COMMINST @uvm.eDelete({})",
ops[obj]
),
// &Instruction_::eDeleteT( obj) => format!(
// "COMMINST @uvm.eDeleteT({})",
// ops[obj]
// ),
&Instruction_::NewThread { &Instruction_::NewThread {
stack, stack,
thread_local, thread_local,
...@@ -995,14 +1088,62 @@ pub enum Instruction_ { ...@@ -995,14 +1088,62 @@ pub enum Instruction_ {
/// allocate an object (non hybrid type) on the stack, yields an iref of the type /// allocate an object (non hybrid type) on the stack, yields an iref of the type
AllocA(P<MuType>), AllocA(P<MuType>),
/// allocate an object (non hybrid type) on the stack, yields an uptr of the type
AllocAU(P<MuType>),
/// allocate a hybrid type object in the heap, yields ref /// allocate a hybrid type object in the heap, yields ref
/// args: the type of the hybrid, hybrid part length /// args: the type of the hybrid, hybrid part length
NewHybrid(P<MuType>, OpIndex), NewHybrid(P<MuType>, OpIndex),
/// allocate an emm region
/// args: region size
NewReg(OpIndex),
/// deallocate a whole emm region
/// args: regionref to the target region
DeleteReg(OpIndex),
/// allocates an untraced object on an emm region
/// args: 1. a regionref to the target emm region
/// args: 2. the object type
/// returns: a uptr to the allocated object
rAlloc(OpIndex, P<MuType>),
rAllocHybrid(OpIndex, P<MuType>, OpIndex),
// /// allocates a traced object on an emm region
// /// args: 1. a regionref to the target emm region
// /// args: 2. the object type
// /// returns: an iref to the allocated object
// rAllocT(OpIndex, P<MuType>),
/// allocate an untraced object on the emm
/// args: type of the object
/// returns: a uptr to the object
eAlloc(P<MuType>),
eAllocHybrid(P<MuType>, OpIndex),
/// deallocate an untraced emm object
/// args: an uptr to the object
eDelete(OpIndex),
// /// allocate a traced object on the emm
// /// args: type of the object
// /// returns: an iref to the object
// eAllocT(P<MuType>),
// /// deallocate a traced emm object
// /// args: an iref to the object
// eDeleteT(OpIndex),
/// allocate a hybrid type object on the stack, yields iref /// allocate a hybrid type object on the stack, yields iref
/// args: the type of the hybrid, hybrid part length /// args: the type of the hybrid, hybrid part length
AllocAHybrid(P<MuType>, OpIndex), AllocAHybrid(P<MuType>, OpIndex),
// TODO add hybrid versions
// /// allocate a hybrid type object on the stack, yields uptr
// /// args: the type of the hybrid, hybrid part length
// AllocAUHybrid(P<MuType>, OpIndex),
/// create a new Mu stack, yields stack ref /// create a new Mu stack, yields stack ref
/// args: functionref of the entry function /// args: functionref of the entry function
NewStack(OpIndex), NewStack(OpIndex),
......
...@@ -745,6 +745,10 @@ impl fmt::Debug for BlockContent { ...@@ -745,6 +745,10 @@ impl fmt::Debug for BlockContent {
} }
impl BlockContent { impl BlockContent {
pub fn get_own_args(&self, index: usize) -> &P<Value> {
&self.args[index]
}
/// returns all the arguments passed to its successors /// returns all the arguments passed to its successors
pub fn get_out_arguments(&self) -> Vec<P<Value>> { pub fn get_out_arguments(&self) -> Vec<P<Value>> {
let n_insts = self.body.len(); let n_insts = self.body.len();
......
This diff is collapsed.
This diff is collapsed.
...@@ -12,7 +12,7 @@ pub enum CallConvResult { ...@@ -12,7 +12,7 @@ pub enum CallConvResult {
GPR(P<Value>), GPR(P<Value>),
GPREX(P<Value>, P<Value>), GPREX(P<Value>, P<Value>),
FPR(P<Value>), FPR(P<Value>),
STACK, STACK
} }
pub mod mu { pub mod mu {
...@@ -55,10 +55,12 @@ pub mod c { ...@@ -55,10 +55,12 @@ pub mod c {
ret.push(CallConvResult::STACK); ret.push(CallConvResult::STACK);
} }
} else if arg_reg_group == RegGroup::GPREX { } else if arg_reg_group == RegGroup::GPREX {
// need two regsiters for this, otherwise, we need to pass on stack // need two regsiters for this, otherwise, we need to pass on
// stack
if gpr_arg_count + 1 < x86_64::ARGUMENT_GPRS.len() { if gpr_arg_count + 1 < x86_64::ARGUMENT_GPRS.len() {
let arg_gpr1 = x86_64::ARGUMENT_GPRS[gpr_arg_count].clone(); let arg_gpr1 = x86_64::ARGUMENT_GPRS[gpr_arg_count].clone();
let arg_gpr2 = x86_64::ARGUMENT_GPRS[gpr_arg_count + 1].clone(); let arg_gpr2 =
x86_64::ARGUMENT_GPRS[gpr_arg_count + 1].clone();
ret.push(CallConvResult::GPREX(arg_gpr1, arg_gpr2)); ret.push(CallConvResult::GPREX(arg_gpr1, arg_gpr2));
gpr_arg_count += 2; gpr_arg_count += 2;
...@@ -83,7 +85,10 @@ pub mod c { ...@@ -83,7 +85,10 @@ pub mod c {
ret ret
} }
pub fn compute_stack_args(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) { pub fn compute_stack_args(
tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_arguments(tys); let callconv = compute_arguments(tys);
let mut stack_arg_tys = vec![]; let mut stack_arg_tys = vec![];
...@@ -124,7 +129,8 @@ pub mod c { ...@@ -124,7 +129,8 @@ pub mod c {
} else if RegGroup::get_from_ty(ty) == RegGroup::GPREX { } else if RegGroup::get_from_ty(ty) == RegGroup::GPREX {
if gpr_ret_count + 1 < x86_64::RETURN_GPRS.len() { if gpr_ret_count + 1 < x86_64::RETURN_GPRS.len() {
let ret_gpr1 = x86_64::RETURN_GPRS[gpr_ret_count].clone(); let ret_gpr1 = x86_64::RETURN_GPRS[gpr_ret_count].clone();
let ret_gpr2 = x86_64::RETURN_GPRS[gpr_ret_count + 1].clone(); let ret_gpr2 =
x86_64::RETURN_GPRS[gpr_ret_count + 1].clone();
ret.push(CallConvResult::GPREX(ret_gpr1, ret_gpr2)); ret.push(CallConvResult::GPREX(ret_gpr1, ret_gpr2));
} else { } else {
...@@ -149,14 +155,19 @@ pub mod c { ...@@ -149,14 +155,19 @@ pub mod c {
ret ret
} }
pub fn compute_stack_retvals(tys: &Vec<P<MuType>>, vm: &VM) -> (ByteSize, Vec<ByteSize>) { pub fn compute_stack_retvals(
tys: &Vec<P<MuType>>,
vm: &VM
) -> (ByteSize, Vec<ByteSize>) {
let callconv = compute_return_values(tys); let callconv = compute_return_values(tys);
let mut stack_ret_val_tys = vec![]; let mut stack_ret_val_tys = vec![];
for i in 0..callconv.len() { for i in 0..callconv.len() {
let ref cc = callconv[i]; let ref cc = callconv[i];
match cc { match cc {
&CallConvResult::STACK => stack_ret_val_tys.push(tys[i].clone()), &CallConvResult::STACK => {
stack_ret_val_tys.push(tys[i].clone())
}
_ => {} _ => {}
} }
} }
...@@ -164,11 +175,11 @@ pub mod c { ...@@ -164,11 +175,11 @@ pub mod c {
compute_stack_locations(&stack_ret_val_tys, vm) compute_stack_locations(&stack_ret_val_tys, vm)
} }
/// computes the area on the stack for a list of types that need to put on stack, /// computes the area on the stack for a list of types that need to put on
/// returns a tuple of (size, offset for each values on stack) /// stack, returns a tuple of (size, offset for each values on stack)
pub fn compute_stack_locations( pub fn compute_stack_locations(
stack_val_tys: &Vec<P<MuType>>, stack_val_tys: &Vec<P<MuType>>,
vm: &VM, vm: &VM
) -> (ByteSize, Vec<ByteSize>) { ) -> (ByteSize, Vec<ByteSize>) {
let (stack_arg_size, _, stack_arg_offsets) = let (stack_arg_size, _, stack_arg_offsets) =
BackendType::sequential_layout(stack_val_tys, vm); BackendType::sequential_layout(stack_val_tys, vm);
...@@ -176,7 +187,8 @@ pub mod c { ...@@ -176,7 +187,8 @@ pub mod c {
// "The end of the input argument area shall be aligned on a 16 // "The end of the input argument area shall be aligned on a 16
// (32, if __m256 is passed on stack) byte boundary." - x86 ABI // (32, if __m256 is passed on stack) byte boundary." - x86 ABI
// if we need to special align the args, we do it now // if we need to special align the args, we do it now
// (then the args will be put to stack following their regular alignment) // (then the args will be put to stack following their regular
// alignment)
let mut stack_arg_size_with_padding = stack_arg_size; let mut stack_arg_size_with_padding = stack_arg_size;
if stack_arg_size % 16 == 0 { if stack_arg_size % 16 == 0 {
......
...@@ -19,17 +19,21 @@ use runtime::ValueLocation; ...@@ -19,17 +19,21 @@ use runtime::ValueLocation;
use compiler::backend::{Mem, Reg}; use compiler::backend::{Mem, Reg};
use compiler::machine_code::MachineCode; use compiler::machine_code::MachineCode;
/// CodeGenerator provides an interface to emit x86_64 code for instruction selection. /// CodeGenerator provides an interface to emit x86_64 code for instruction
/// This allows us to implement the other parts of the compiler (mostly instruction selection) /// selection. This allows us to implement the other parts of the compiler
/// without assuming code generator. Currently there is only an assembly backend /// (mostly instruction selection) without assuming code generator. Currently
/// that implements this interface for ahead-of-time compilation. We plan to add /// there is only an assembly backend that implements this interface for
/// a binary backend for just-in-time compilation. /// ahead-of-time compilation. We plan to add a binary backend for just-in-time
/// compilation.
pub trait CodeGenerator { pub trait CodeGenerator {
/// starts code for a function /// starts code for a function
fn start_code(&mut self, func_name: MuName, entry: MuName) -> ValueLocation; fn start_code(&mut self, func_name: MuName, entry: MuName)
-> ValueLocation;
/// finishes code for a function /// finishes code for a function
fn finish_code(&mut self, func_name: MuName) fn finish_code(
-> (Box<MachineCode + Sync + Send>, ValueLocation); &mut self,
func_name: MuName
) -> (Box<MachineCode + Sync + Send>, ValueLocation);
/// starts a sequence of linear code (no branch) /// starts a sequence of linear code (no branch)
fn start_code_sequence(&mut self); fn start_code_sequence(&mut self);
...@@ -43,7 +47,8 @@ pub trait CodeGenerator { ...@@ -43,7 +47,8 @@ pub trait CodeGenerator {
fn start_block(&mut self, block_name: MuName); fn start_block(&mut self, block_name: MuName);
/// starts an exceptional block, and returns its code address /// starts an exceptional block, and returns its code address
fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation; fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation;
/// finishes a block (must have called start_block() or start_excpetion_block() first) /// finishes a block (must have called start_block() or
/// start_excpetion_block() first)
fn end_block(&mut self, block_name: MuName); fn end_block(&mut self, block_name: MuName);
// adds CFI info // adds CFI info
...@@ -238,7 +243,7 @@ pub trait CodeGenerator { ...@@ -238,7 +243,7 @@ pub trait CodeGenerator {
pe: Option<MuName>, pe: Option<MuName>,
uses: Vec<P<Value>>, uses: Vec<P<Value>>,
defs: Vec<P<Value>>, defs: Vec<P<Value>>,
is_native: bool, is_native: bool
) -> ValueLocation; ) -> ValueLocation;
fn emit_call_near_r64( fn emit_call_near_r64(
&mut self, &mut self,
...@@ -246,7 +251,7 @@ pub trait CodeGenerator { ...@@ -246,7 +251,7 @@ pub trait CodeGenerator {
func: &P<Value>, func: &P<Value>,
pe: Option<MuName>, pe: Option<MuName>,
uses: Vec<P<Value>>, uses: Vec<P<Value>>,
defs: Vec<P<Value>>, defs: Vec<P<Value>>
) -> ValueLocation; ) -> ValueLocation;
fn emit_call_near_mem64( fn emit_call_near_mem64(
&mut self, &mut self,
...@@ -254,7 +259,7 @@ pub trait CodeGenerator { ...@@ -254,7 +259,7 @@ pub trait CodeGenerator {
func: &P<Value>, func: &P<Value>,
pe: Option<MuName>, pe: Option<MuName>,
uses: Vec<P<Value>>, uses: Vec<P<Value>>,
defs: Vec<P<Value>>, defs: Vec<P<Value>>
) -> ValueLocation; ) -> ValueLocation;
// sometimes we use jmp as a call (but without pushing return address) // sometimes we use jmp as a call (but without pushing return address)
...@@ -265,7 +270,7 @@ pub trait CodeGenerator { ...@@ -265,7 +270,7 @@ pub trait CodeGenerator {
pe: Option<MuName>, pe: Option<MuName>,
uses: Vec<P<Value>>, uses: Vec<P<Value>>,
defs: Vec<P<Value>>, defs: Vec<P<Value>>,
is_native: bool, is_native: bool
) -> ValueLocation; ) -> ValueLocation;
fn emit_call_jmp_indirect( fn emit_call_jmp_indirect(
&mut self, &mut self,
...@@ -273,7 +278,7 @@ pub trait CodeGenerator { ...@@ -273,7 +278,7 @@ pub trait CodeGenerator {
func: &P<Value>, func: &P<Value>,
pe: Option<MuName>, pe: Option<MuName>,
uses: Vec<P<Value>>, uses: Vec<P<Value>>,
defs: Vec<P<Value>>, defs: Vec<P<Value>>
) -> ValueLocation; ) -> ValueLocation;
fn emit_ret(&mut self); fn emit_ret(&mut self);
......
...@@ -18,8 +18,9 @@ ...@@ -18,8 +18,9 @@
pub mod inst_sel; pub mod inst_sel;
mod codegen; mod codegen;
/// CodeGenerator trait serves as an interface to the backend code generator, which /// CodeGenerator trait serves as an interface to the backend code
/// may generate assembly code or binary (not implemented yet) /// generator, which may generate assembly code or binary (not implemented
/// yet)
use compiler::backend::x86_64::codegen::CodeGenerator; use compiler::backend::x86_64::codegen::CodeGenerator;
/// assembly backend as AOT compiler /// assembly backend as AOT compiler
...@@ -54,16 +55,22 @@ use utils::LinkedHashMap; ...@@ -54,16 +55,22 @@ use utils::LinkedHashMap;
// number of normal callee saved registers (excluding RSP and RBP) // number of normal callee saved registers (excluding RSP and RBP)
pub const CALLEE_SAVED_COUNT: usize = 5; pub const CALLEE_SAVED_COUNT: usize = 5;
/// a macro to declare a set of general purpose registers that are aliased to the first one /// a macro to declare a set of general purpose registers that are aliased to
/// the first one
macro_rules! GPR_ALIAS { macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) -> ($alias: ident: ($id64: expr, $r64: ident) ->
$r32: ident, $r16: ident, $r8l: ident, $r8h: ident) => { $r32: ident, $r16: ident, $r8l: ident, $r8h: ident) => {
lazy_static! { lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE); pub static ref $r64: P<Value> =
pub static ref $r32: P<Value> = GPR!($id64 + 1, stringify!($r32), UINT32_TYPE); GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r16: P<Value> = GPR!($id64 + 2, stringify!($r16), UINT16_TYPE); pub static ref $r32: P<Value> =
pub static ref $r8l: P<Value> = GPR!($id64 + 3, stringify!($r8l), UINT8_TYPE); GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r8h: P<Value> = GPR!($id64 + 4, stringify!($r8h), UINT8_TYPE); pub static ref $r16: P<Value> =
GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8l: P<Value> =
GPR!($id64 + 3, stringify!($r8l), UINT8_TYPE);
pub static ref $r8h: P<Value> =
GPR!($id64 + 4, stringify!($r8h), UINT8_TYPE);
pub static ref $alias: [P<Value>; 5] = [ pub static ref $alias: [P<Value>; 5] = [
$r64.clone(), $r64.clone(),
$r32.clone(), $r32.clone(),
...@@ -76,10 +83,14 @@ macro_rules! GPR_ALIAS { ...@@ -76,10 +83,14 @@ macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident) -> $r32: ident, $r16: ident, $r8: ident) => { ($alias: ident: ($id64: expr, $r64: ident) -> $r32: ident, $r16: ident, $r8: ident) => {
lazy_static! { lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE); pub static ref $r64: P<Value> =
pub static ref $r32: P<Value> = GPR!($id64 + 1, stringify!($r32), UINT32_TYPE); GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $r16: P<Value> = GPR!($id64 + 2, stringify!($r16), UINT16_TYPE); pub static ref $r32: P<Value> =
pub static ref $r8: P<Value> = GPR!($id64 + 3, stringify!($r8), UINT8_TYPE); GPR!($id64 + 1, stringify!($r32), UINT32_TYPE);
pub static ref $r16: P<Value> =
GPR!($id64 + 2, stringify!($r16), UINT16_TYPE);
pub static ref $r8: P<Value> =
GPR!($id64 + 3, stringify!($r8), UINT8_TYPE);
pub static ref $alias: [P<Value>; 4] = pub static ref $alias: [P<Value>; 4] =
[$r64.clone(), $r32.clone(), $r16.clone(), $r8.clone()]; [$r64.clone(), $r32.clone(), $r16.clone(), $r8.clone()];
} }
...@@ -87,7 +98,8 @@ macro_rules! GPR_ALIAS { ...@@ -87,7 +98,8 @@ macro_rules! GPR_ALIAS {
($alias: ident: ($id64: expr, $r64: ident)) => { ($alias: ident: ($id64: expr, $r64: ident)) => {
lazy_static! { lazy_static! {
pub static ref $r64: P<Value> = GPR!($id64, stringify!($r64), UINT64_TYPE); pub static ref $r64: P<Value> =
GPR!($id64, stringify!($r64), UINT64_TYPE);
pub static ref $alias: [P<Value>; 4] = pub static ref $alias: [P<Value>; 4] =
[$r64.clone(), $r64.clone(), $r64.clone(), $r64.clone()]; [$r64.clone(), $r64.clone(), $r64.clone(), $r64.clone()];
} }
...@@ -100,7 +112,7 @@ macro_rules! GPR { ...@@ -100,7 +112,7 @@ macro_rules! GPR {
P(Value { P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())), hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: $ty.clone(), ty: $ty.clone(),
v: Value_::SSAVar($id), v: Value_::SSAVar($id)
}) })
}}; }};
} }
...@@ -111,7 +123,7 @@ macro_rules! FPR { ...@@ -111,7 +123,7 @@ macro_rules! FPR {
P(Value { P(Value {
hdr: MuEntityHeader::named($id, Arc::new($name.to_string())), hdr: MuEntityHeader::named($id, Arc::new($name.to_string())),
ty: DOUBLE_TYPE.clone(), ty: DOUBLE_TYPE.clone(),
v: Value_::SSAVar($id), v: Value_::SSAVar($id)
}) })
}}; }};
} }
...@@ -186,7 +198,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> { ...@@ -186,7 +198,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
if id < FPR_ID_START { if id < FPR_ID_START {
let vec = match GPR_ALIAS_TABLE.get(&id) { let vec = match GPR_ALIAS_TABLE.get(&id) {
Some(vec) => vec, Some(vec) => vec,
None => panic!("didnt find {} as GPR", id), None => panic!("didnt find {} as GPR", id)
}; };
match length { match length {
...@@ -195,7 +207,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> { ...@@ -195,7 +207,7 @@ pub fn get_alias_for_length(id: MuID, length: usize) -> P<Value> {
16 => vec[2].clone(), 16 => vec[2].clone(),
8 => vec[3].clone(), 8 => vec[3].clone(),
1 => vec[3].clone(), 1 => vec[3].clone(),
_ => panic!("unexpected length {} for {}", length, vec[0]), _ => panic!("unexpected length {} for {}", length, vec[0])
} }
} else {