WARNING! Access to this system is limited to authorised users only.
Unauthorised users may be subject to prosecution.
Unauthorised access to this system is a criminal offence under Australian law (Federal Crimes Act 1914 Part VIA)
It is a criminal offence to:
(1) Obtain access to data without authority. -Penalty 2 years imprisonment.
(2) Damage, delete, alter or insert data without authority. -Penalty 10 years imprisonment.
User activity is monitored and recorded. Anyone using this system expressly consents to such monitoring and recording.

To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit d9196658 authored by Isaac Oscar Gariano's avatar Isaac Oscar Gariano
Browse files

Updated ALLOCA implementation

parent c0089a9f
......@@ -69,6 +69,10 @@ lazy_static! {
MuType::new(new_internal_id(), MuType_::muref(VOID_TYPE.clone()))
);
pub static ref IREF_VOID_TYPE : P<MuType> = P(
MuType::new(new_internal_id(), MuType_::iref(VOID_TYPE.clone()))
);
pub static ref INTERNAL_TYPES : Vec<P<MuType>> = vec![
ADDRESS_TYPE.clone(),
UINT1_TYPE.clone(),
......@@ -82,6 +86,7 @@ lazy_static! {
FLOAT_TYPE.clone(),
VOID_TYPE.clone(),
REF_VOID_TYPE.clone(),
IREF_VOID_TYPE.clone(),
];
}
......
......@@ -2035,59 +2035,6 @@ impl CodeGenerator for ASMCodeGen {
self.cur().blocks.contains_key(&block_name)
}
fn set_block_livein(&mut self, block_name: MuName, live_in: &Vec<P<Value>>) {
let cur = self.cur_mut();
match cur.blocks.get_mut(&block_name) {
Some(ref mut block) => {
if block.livein.is_empty() {
let mut live_in = {
let mut ret = vec![];
for p in live_in {
match p.extract_ssa_id() {
Some(id) => ret.push(id),
// this should not happen
None => error!("{} as live-in of block {} is not SSA", p, block_name)
}
}
ret
};
block.livein.append(&mut live_in);
} else {
panic!("seems we are inserting livein to block {} twice", block_name);
}
}
None => panic!("haven't created ASMBlock for {}", block_name)
}
}
fn set_block_liveout(&mut self, block_name: MuName, live_out: &Vec<P<Value>>) {
let cur = self.cur_mut();
match cur.blocks.get_mut(&block_name) {
Some(ref mut block) => {
if block.liveout.is_empty() {
let mut live_out = {
let mut ret = vec![];
for p in live_out {
match p.extract_ssa_id() {
Some(id) => ret.push(id),
// the liveout are actually args out of this block
// (they can be constants)
None => trace!("{} as live-out of block {} is not SSA", p, block_name)
}
}
ret
};
block.liveout.append(&mut live_out);
} else {
panic!("seems we are inserting liveout to block {} twice", block_name);
}
}
None => panic!("haven't created ASMBlock for {}", block_name)
}
}
fn add_cfi_sections(&mut self, arg: &str) { self.add_asm_symbolic(format!(".cfi_sections {}", arg)); }
fn add_cfi_startproc(&mut self) {
self.add_asm_symbolic(".cfi_startproc".to_string());
......@@ -2127,6 +2074,22 @@ impl CodeGenerator for ASMCodeGen {
)
}
fn emit_frame_shrink(&mut self) {
trace!("emit: \tframe shrink");
let asm = format!("ADD SP,SP,#{}", FRAME_SIZE_PLACEHOLDER.clone());
let line = self.line();
self.cur_mut().add_frame_size_patchpoint(ASMLocation::new(line, 11, FRAME_SIZE_PLACEHOLDER_LEN, 0));
self.add_asm_inst(
asm,
linked_hashmap!{},
linked_hashmap!{},
false
)
}
fn emit_add_str(&mut self, dest: Reg, src1: Reg, src2: &str) {self.internal_binop_str("ADD", dest, src1, src2)}
// Pushes a pair of registers on the givne stack (uses the STP instruction)
......@@ -2174,7 +2137,12 @@ impl CodeGenerator for ASMCodeGen {
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{id1 => vec![loc1]}, false, ASMBranchTarget::Return, None);
}
#[cfg(target_os = "linux")]
fn emit_fake_ret(&mut self) {
trace!("emit: \tFAKE RET");
let asm = format!("B muentry_return");
self.add_asm_inst_internal(asm, linked_hashmap!{}, linked_hashmap!{}, false, ASMBranchTarget::Return, None);
}
fn emit_bl(&mut self, callsite: String, func: MuName, pe: Option<MuName>, is_native: bool) -> ValueLocation {
if is_native {
trace!("emit: \tBL /*C*/ {}", func);
......
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use ast::ptr::P;
use ast::ir::*;
use runtime::ValueLocation;
......@@ -20,6 +19,8 @@ use compiler::machine_code::MachineCode;
use compiler::backend::{Reg, Mem};
pub trait CodeGenerator {
fn emit_fake_ret(&mut self);
fn start_code(&mut self, func_name: MuName, entry: MuName) -> ValueLocation;
fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode + Sync + Send>, ValueLocation);
......@@ -32,8 +33,6 @@ pub trait CodeGenerator {
fn start_block(&mut self, block_name: MuName);
fn block_exists(&self, block_name: MuName) -> bool;
fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation;
fn set_block_livein(&mut self, block_name: MuName, live_in: &Vec<P<Value>>);
fn set_block_liveout(&mut self, block_name: MuName, live_out: &Vec<P<Value>>);
fn end_block(&mut self, block_name: MuName);
// add CFI info
......@@ -49,6 +48,7 @@ pub trait CodeGenerator {
// emit code to adjust frame
fn emit_frame_grow(&mut self); // Emits a SUB
fn emit_frame_shrink(&mut self); // Emits an ADD
// Used to pass a string that the assembler will interpret as an immediate argument
// (This is neccesary to support the use of ELF relocations like ':tprel_hi12:foo')
......@@ -91,7 +91,6 @@ pub trait CodeGenerator {
fn emit_ldaxp(&mut self, dest1: Reg, dest2: Reg, src: Mem); // [base]
fn emit_ldnp(&mut self, dest1: Reg/*GPR or FPR*/, dest2: Reg/*GPR or FPR*/, src: Mem); // [base, #simm7]
// Stores
fn emit_str(&mut self, dest: Mem, src: Reg/*GPR or FPR*/); // supports the full full range of addressing modes
fn emit_sttr(&mut self, dest: Mem, src: Reg); // [base, #simm9]
......@@ -105,9 +104,7 @@ pub trait CodeGenerator {
fn emit_stlxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg); // [base]
fn emit_stnp(&mut self, dest: Mem, src1: Reg/*GPR or FPR*/, src2: Reg/*GPR or FPR*/); // [base, #simm7]
// branching
// calls
// Calls
fn emit_bl(&mut self, callsite: String, func: MuName, pe: Option<MuName>, is_native: bool) -> ValueLocation;
fn emit_blr(&mut self, callsite: String, func: Reg, pe: Option<MuName>) -> ValueLocation;
......@@ -130,7 +127,7 @@ pub trait CodeGenerator {
fn emit_adrp(&mut self, dest: Reg, src: Reg);
// Unary ops
fn emit_mov(&mut self, dest: Reg, src: Reg);
fn emit_mov(&mut self, dest: Reg/*GPR or SP or ZR*/, src: Reg/*GPR or SP or ZR*/); // The SP and ZR cannot both be used
fn emit_mvn(&mut self, dest: Reg, src: Reg);
fn emit_neg(&mut self, dest: Reg, src: Reg);
fn emit_negs(&mut self, dest: Reg, src: Reg);
......@@ -148,7 +145,7 @@ pub trait CodeGenerator {
fn emit_rev16(&mut self, dest: Reg, src: Reg);
fn emit_rev32(&mut self, dest: Reg/*64*/, src: Reg);
fn emit_rev64(&mut self, dest: Reg/*64*/, src: Reg); // alias of REV
fn emit_fabs(&mut self, dest: Reg, src: Reg/*Must have different size*/);
fn emit_fabs(&mut self, dest: Reg, src: Reg);
fn emit_fcvt(&mut self, dest: Reg, src: Reg/*Must have different size*/);
fn emit_fcvtas(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
fn emit_fcvtau(&mut self, dest: Reg/*GPR, may have different size*/, src: Reg);
......
......@@ -1662,7 +1662,7 @@ pub fn emit_mov_u64(backend: &mut CodeGenerator, dest: &P<Value>, val: u64)
}
// TODO: Will this be correct if src is treated as signed (i think so...)
pub fn emit_mul_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64)
pub fn emit_mul_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, val: u64)
{
if val == 0 {
// dest = 0
......@@ -1677,17 +1677,16 @@ pub fn emit_mul_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>
backend.emit_lsl_imm(&dest, &src, log2(val as u64) as u8);
} else {
// dest = src * val
let temp_mul = make_temporary(f_context, src.ty.clone(), vm);
emit_mov_u64(backend, &temp_mul, val as u64);
backend.emit_mul(&dest, &src, &temp_mul);
emit_mov_u64(backend, &dest, val as u64);
backend.emit_mul(&dest, &src, &dest);
}
}
// Decrement the register by an immediate value
fn emit_sub_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64)
fn emit_sub_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, val: u64)
{
if (val as i64) < 0 {
emit_add_u64(backend, &dest, &src, f_context, vm, (-(val as i64) as u64));
emit_add_u64(backend, &dest, &src, (-(val as i64) as u64));
} else if val == 0 {
if dest.id() != src.id() {
backend.emit_mov(&dest, &src);
......@@ -1697,17 +1696,16 @@ fn emit_sub_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_
let imm_val = if imm_shift { val >> 12 } else { val };
backend.emit_sub_imm(&dest, &src, imm_val as u16, imm_shift);
} else {
let tmp = make_temporary(f_context, UINT64_TYPE.clone(), vm);
emit_mov_u64(backend, &tmp, val);
backend.emit_sub(&dest, &src, &tmp);
emit_mov_u64(backend, &dest, val);
backend.emit_sub(&dest, &src, &dest);
}
}
// Increment the register by an immediate value
fn emit_add_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64)
fn emit_add_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, val: u64)
{
if (val as i64) < 0 {
emit_sub_u64(backend, &dest, &src, f_context, vm, (-(val as i64) as u64));
emit_sub_u64(backend, &dest, &src, (-(val as i64) as u64));
} else if val == 0 {
if dest.id() != src.id() {
backend.emit_mov(&dest, &src);
......@@ -1717,14 +1715,13 @@ fn emit_add_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_
let imm_val = if imm_shift { val >> 12 } else { val };
backend.emit_add_imm(&dest, &src, imm_val as u16, imm_shift);
} else {
let tmp = make_temporary(f_context, UINT64_TYPE.clone(), vm);
emit_mov_u64(backend, &tmp, val);
backend.emit_add(&dest, &src, &tmp);
emit_mov_u64(backend, &dest, val);
backend.emit_add(&dest, &src, &dest);
}
}
// dest = src1*val + src2
fn emit_madd_u64(backend: &mut CodeGenerator, dest: &P<Value>, src1: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64, src2: &P<Value>)
fn emit_madd_u64(backend: &mut CodeGenerator, dest: &P<Value>, src1: &P<Value>, val: u64, src2: &P<Value>)
{
if val == 0 {
// dest = src2
......@@ -1732,17 +1729,49 @@ fn emit_madd_u64(backend: &mut CodeGenerator, dest: &P<Value>, src1: &P<Value>,
} else if val == 1 {
// dest = src1 + src2
backend.emit_add(&dest, &src1, &src2);
} else if val == !0 {
// dest = src2 - src1
backend.emit_sub(&dest, &src2, &src1);
} else if val.is_power_of_two() {
// dest = src1 << log2(val) + src2
backend.emit_lsl_imm(&dest, &src1, log2(val as u64) as u8);
backend.emit_add(&dest, &dest, &src2);
} else {
// dest = src1 * val + src2
let temp_mul = make_temporary(f_context, src1.ty.clone(), vm);
emit_mov_u64(backend, &temp_mul, val as u64);
backend.emit_madd(&dest, &src1, &temp_mul, &src2);
emit_mov_u64(backend, &dest, val as u64);
backend.emit_madd(&dest, &src1, &dest, &src2);
}
}
// dest = src*val1 + val2
fn emit_madd_u64_u64(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val1: u64, val2: u64)
{
if val2 == 0 {
// dest = src*val
emit_mul_u64(backend, &dest, &src, val1);
} else if val1 == 0 {
// dest = val2
emit_mov_u64(backend, &dest, val2);
} else if val1 == 1 {
// dest = src1 + val2
emit_add_u64(backend, &dest, &src, val2);
} else if val1 == !0 {
// dest = val2 - src1
emit_mov_u64(backend, &dest, val2);
backend.emit_sub(&dest, &dest, &src);
} else if val1.is_power_of_two() {
// dest = src << log2(val1) + val2
backend.emit_lsl_imm(&dest, &src, log2(val1 as u64) as u8);
emit_add_u64(backend, &dest, &src, val2);
} else {
// dest = src * val1 + val2
let tmp = make_temporary(f_context, src.ty.clone(), vm);
emit_mov_u64(backend, &dest, val1 as u64);
emit_mov_u64(backend, &tmp, val2 as u64);
backend.emit_madd(&dest, &src, &dest, &tmp);
}
}
// Compare register with value
fn emit_cmp_u64(backend: &mut CodeGenerator, src1: &P<Value>, f_context: &mut FunctionContext, vm: &VM, val: u64)
{
......@@ -1863,7 +1892,7 @@ fn emit_reg_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fu
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem = make_value_symbolic(vm.get_func_name(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, f_context, vm);
emit_calculate_address(backend, &tmp, &mem, vm);
tmp
},
&Constant::NullRef => {
......@@ -1923,7 +1952,7 @@ pub fn emit_ireg_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &m
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem = make_value_symbolic(vm.get_func_name(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, f_context, vm);
emit_calculate_address(backend, &tmp, &mem, vm);
tmp
},
&Constant::NullRef => {
......@@ -2021,7 +2050,7 @@ pub fn emit_mem(backend: &mut CodeGenerator, pv: &P<Value>, alignment: usize, f_
if !is_valid_immediate_scale(scale, alignment) {
let temp = make_temporary(f_context, offset.ty.clone(), vm);
emit_mul_u64(backend, &temp, &offset, f_context, vm, scale);
emit_mul_u64(backend, &temp, &offset, scale);
Some(temp)
} else {
shift = log2(scale) as u8;
......@@ -2089,7 +2118,7 @@ fn emit_mem_base(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fun
base.clone() // trivial
} else {
let temp = make_temporary(f_context, pv.ty.clone(), vm);
emit_add_u64(backend, &temp, &base, f_context, vm, (offset_val * scale as i64) as u64);
emit_add_u64(backend, &temp, &base, (offset_val * scale as i64) as u64);
temp
}
} else {
......@@ -2106,7 +2135,7 @@ fn emit_mem_base(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fun
let temp_offset = make_temporary(f_context, offset.ty.clone(), vm);
// temp_offset = offset * scale
emit_mul_u64(backend, &temp_offset, &offset, f_context, vm, scale);
emit_mul_u64(backend, &temp_offset, &offset, scale);
// Don't need to create a new register, just overwrite temp_offset
let temp = cast_value(&temp_offset, &pv.ty);
......@@ -2131,7 +2160,7 @@ fn emit_mem_base(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fun
base.clone()
} else {
let temp = make_temporary(f_context, pv.ty.clone(), vm);
emit_add_u64(backend, &temp, &base, f_context, vm, offset as u64);
emit_add_u64(backend, &temp, &base, offset as u64);
temp
}
} else if RegGroup::get_from_value(&offset) == RegGroup::GPR && offset.is_reg() {
......@@ -2224,16 +2253,16 @@ pub fn emit_addr_sym(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value
}
}
fn emit_calculate_address(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, f_context: &mut FunctionContext, vm: &VM) {
fn emit_calculate_address(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<Value>, vm: &VM) {
match src.v {
Value_::Memory(MemoryLocation::VirtualAddress{ref base, ref offset, scale, signed}) => {
if offset.is_some() {
let offset = offset.as_ref().unwrap();
if match_value_int_imm(offset) {
emit_add_u64(backend, &dest, &base, f_context, vm, ((value_imm_to_i64(offset, signed) as i64)*(scale as i64)) as u64);
emit_add_u64(backend, &dest, &base, ((value_imm_to_i64(offset, signed) as i64)*(scale as i64)) as u64);
} else {
// dest = offset * scale + base
emit_madd_u64(backend, &dest, &offset, f_context, vm, scale as u64, &base);
emit_madd_u64(backend, &dest, &offset, scale as u64, &base);
}
} else {
backend.emit_mov(&dest, &base)
......@@ -2250,7 +2279,7 @@ fn emit_calculate_address(backend: &mut CodeGenerator, dest: &P<Value>, src: &P<
// Offset is 0, address calculation is trivial
backend.emit_mov(&dest, &base);
} else {
emit_add_u64(backend, &dest, &base, f_context, vm, offset as u64);
emit_add_u64(backend, &dest, &base, offset as u64);
}
} else if is_int_reg(&offset) {
backend.emit_add_ext(&dest, &base, &offset, signed, shift);
......@@ -2330,12 +2359,12 @@ fn memory_location_shift(backend: &mut CodeGenerator, mem: MemoryLocation, more_
if more_offset % (scale as i64) == 0 {
// temp = offset + more_offset/scale
emit_add_u64(backend, &temp, &offset, f_context, vm, (more_offset/(scale as i64)) as u64);
emit_add_u64(backend, &temp, &offset, (more_offset/(scale as i64)) as u64);
new_scale = scale;
} else {
// temp = offset*scale + more_offset
emit_mul_u64(backend, &temp, &offset, f_context, vm, scale);
emit_add_u64(backend, &temp, &temp, f_context, vm, more_offset as u64);
emit_mul_u64(backend, &temp, &offset, scale);
emit_add_u64(backend, &temp, &temp, more_offset as u64);
}
temp
......@@ -2379,13 +2408,13 @@ fn memory_location_shift_scale(backend: &mut CodeGenerator, mem: MemoryLocation,
let temp = make_temporary(f_context, offset.ty.clone(), vm);
let offset_scaled = (offset.extract_int_const() as i64)*(scale as i64);
if offset_scaled % (new_scale as i64) == 0 {
emit_add_u64(backend, &temp, &more_offset, f_context, vm, (offset_scaled / (new_scale as i64)) as u64);
emit_add_u64(backend, &temp, &more_offset, (offset_scaled / (new_scale as i64)) as u64);
// new_scale*temp = (more_offset + (offset*scale)/new_scale)
// = more_offset*new_scale + offset*scale
} else {
// temp = more_offset*new_scale + offset*scale
emit_mul_u64(backend, &temp, &more_offset, f_context, vm, new_scale);
emit_add_u64(backend, &temp, &temp, f_context, vm, offset_scaled as u64);
emit_mul_u64(backend, &temp, &more_offset, new_scale);
emit_add_u64(backend, &temp, &temp, offset_scaled as u64);
new_scale = 1;
}
temp
......@@ -2398,7 +2427,7 @@ fn memory_location_shift_scale(backend: &mut CodeGenerator, mem: MemoryLocation,
backend.emit_add_ext(&temp, &more_offset, &temp, signed, 0);
} else {
// temp = offset * scale
emit_mul_u64(backend, &temp, &offset, f_context, vm, scale);
emit_mul_u64(backend, &temp, &offset, scale);
if new_scale.is_power_of_two() && is_valid_immediate_extension(log2(new_scale)) {
// temp = (offset * scale) + more_offset << log2(new_scale)
......@@ -2406,7 +2435,7 @@ fn memory_location_shift_scale(backend: &mut CodeGenerator, mem: MemoryLocation,
} else {
// temp_more = more_offset * new_scale
let temp_more = make_temporary(f_context, offset.ty.clone(), vm);
emit_mul_u64(backend, &temp_more, &more_offset, f_context, vm, new_scale);
emit_mul_u64(backend, &temp_more, &more_offset, new_scale);
// temp = (offset * scale) + (more_offset * new_scale);
backend.emit_add_ext(&temp, &temp_more, &temp, signed, 0);
......
......@@ -284,4 +284,16 @@ lazy_static! {
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_print_hex")),
jit: RwLock::new(None)
};
// impl/decl: mod.rs
pub static ref MEM_ZERO : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![],
arg_tys: vec![IREF_VOID_TYPE.clone(), UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_mem_zero")),
jit: RwLock::new(None)
};
}
......@@ -198,3 +198,8 @@ pub extern fn mu_main(edata: *const(), dumped_vm : *mut Arc<VM>, argc: c_int, ar
pub extern fn muentry_print_hex(x: u64) {
println!("PRINTHEX: 0x{:x}", x);
}
#[no_mangle]
pub unsafe extern fn muentry_mem_zero(dest: *mut u8, size: usize) {
std::ptr::write_bytes(dest, 0, size);
}
\ No newline at end of file
......@@ -137,3 +137,9 @@ begin_func fake_swap_mu_thread
# return to caller, but preserve those pushed values (since THREADEXIT will pick them up)
RET
end_func fake_swap_mu_thread
begin_func muentry_return
MOV SP, FP
pop_pair FP, LR
RET LR
end_func muentry_return
# Copyright 2017 The Australian National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util import execute, compile_bundle, load_bundle, get_function;
import pytest;
import ctypes;
# Tests that zebu can handle wierd, but valid mu names
def test_alloca():
lib = load_bundle(
"""
.typedef type = struct<int<64> double ref<void>>
.funcdef alloca <(int<64>)->(int<64>)>
{
entry(<int<64>>arg):
a = ALLOCA <type>
// Load the int field to ai_int
ai_ref = GETFIELDIREF <type 0> a
ai_int = LOAD <int<64>> ai_ref
// Load the double field to ad_int (converting it to an int<64>)
ad_ref = GETFIELDIREF <type 1> a
ad = LOAD <double> ad_ref
ad_int = BITCAST <double int<64>> ad
// Load the ref field to ar_int (which will be '0' for a null ref, and '1' otherwise)
ar_ref = GETFIELDIREF <type 2> a
ar = LOAD <ref<void>> ar_ref
ar_null = NE <ref<void>> ar <ref<void>>NULL
ar_int = ZEXT <int<1> int<64>> ar_null
// Store arg into the ALLOCA'd area
STORE <type> ai_ref arg
argc_int = LOAD <int<64>> ai_ref
// sum all the *_int values togterh
res_0 = ADD <int<64>> ai_int ad_int
res_1 = ADD <int<64>> res_0 ar_int
res_2 = ADD <int<64>> res_1 argc_int
RET res_2
}
""", "test_alloca");
alloca = get_function(lib.alloca, [ctypes.c_int64], ctypes.c_int64);
assert(alloca(-56) == -56);
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment