Commit 69d2807d authored by qinsoon's avatar qinsoon

remove warnings, types(struct/hybrid) gets reset in init_vm()

parent 06c91e7f
Pipeline #251 failed with stage
in 24 minutes and 46 seconds
......@@ -51,6 +51,18 @@ lazy_static! {
];
}
pub fn init_types() {
{
let mut map_lock = STRUCT_TAG_MAP.write().unwrap();
map_lock.clear();
}
{
let mut map_lock = HYBRID_TAG_MAP.write().unwrap();
map_lock.clear();
}
}
#[derive(PartialEq, Debug, RustcEncodable, RustcDecodable)]
pub struct MuType {
pub hdr: MuEntityHeader,
......
......@@ -2739,7 +2739,6 @@ pub fn emit_context(vm: &VM) {
{
use runtime::mm;
use runtime::mm::common::objectdump::*;
// persist globals
let global_locs_lock = vm.global_locations.read().unwrap();
......@@ -2797,7 +2796,7 @@ pub fn emit_context(vm: &VM) {
// write ref with label
let load_ref = unsafe {cur_ref_addr.load::<Address>()};
if load_ref.is_zero() {
file.write("\t.quad 0\n".as_bytes());
file.write("\t.quad 0\n".as_bytes()).unwrap();
} else {
let label = match global_dump.relocatable_refs.get(&load_ref) {
Some(label) => label,
......
......@@ -14,7 +14,6 @@ use runtime::entrypoints::RuntimeEntrypoint;
use compiler::CompilerPass;
use compiler::backend;
use compiler::backend::BackendTypeInfo;
use compiler::backend::PROLOGUE_BLOCK_NAME;
use compiler::backend::x86_64;
use compiler::backend::x86_64::CodeGenerator;
......@@ -1329,7 +1328,7 @@ impl <'a> InstructionSelection {
let const_size = self.make_value_int_const(size as u64, vm);
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), &ty_info, const_size, ty_align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), const_size, ty_align, node, f_content, f_context, vm);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
......@@ -1432,7 +1431,7 @@ impl <'a> InstructionSelection {
};
let tmp_allocator = self.emit_get_allocator(node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), &ty_info, actual_size, ty_align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence(tmp_allocator.clone(), actual_size, ty_align, node, f_content, f_context, vm);
// ASM: call muentry_init_object(%allocator, %tmp_res, %encode)
let encode = self.make_value_int_const(mm::get_gc_type_encode(ty_info.gc_type.id), vm);
......@@ -1505,13 +1504,13 @@ impl <'a> InstructionSelection {
})
}
fn emit_alloc_sequence (&mut self, tmp_allocator: P<Value>, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_alloc_sequence (&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
if size.is_int_const() {
// size known at compile time, we can choose to emit alloc_small or large now
if size.extract_int_const() > mm::LARGE_OBJECT_THRESHOLD as u64 {
self.emit_alloc_sequence_large(tmp_allocator, ty_info, size, align, node, f_content, f_context, vm)
self.emit_alloc_sequence_large(tmp_allocator, size, align, node, f_content, f_context, vm)
} else {
self.emit_alloc_sequence_small(tmp_allocator, ty_info, size, align, node, f_content, f_context, vm)
self.emit_alloc_sequence_small(tmp_allocator, size, align, node, f_content, f_context, vm)
}
} else {
// size is unknown at compile time
......@@ -1532,7 +1531,7 @@ impl <'a> InstructionSelection {
self.backend.emit_jg(blk_alloc_large.clone());
// alloc small here
let tmp_res = self.emit_alloc_sequence_small(tmp_allocator.clone(), ty_info, size.clone(), align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence_small(tmp_allocator.clone(), size.clone(), align, node, f_content, f_context, vm);
self.backend.emit_jmp(blk_alloc_large_end.clone());
......@@ -1546,7 +1545,7 @@ impl <'a> InstructionSelection {
self.backend.start_block(blk_alloc_large.clone());
self.backend.set_block_livein(blk_alloc_large.clone(), &vec![size.clone()]);
let tmp_res = self.emit_alloc_sequence_large(tmp_allocator.clone(), ty_info, size, align, node, f_content, f_context, vm);
let tmp_res = self.emit_alloc_sequence_large(tmp_allocator.clone(), size, align, node, f_content, f_context, vm);
self.backend.end_block(blk_alloc_large.clone());
self.backend.set_block_liveout(blk_alloc_large.clone(), &vec![tmp_res.clone()]);
......@@ -1571,7 +1570,7 @@ impl <'a> InstructionSelection {
tmp_allocator
}
fn emit_alloc_sequence_large (&mut self, tmp_allocator: P<Value>, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_alloc_sequence_large (&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
let tmp_res = self.get_result_value(node);
// ASM: %tmp_res = call muentry_alloc_large(%allocator, size, align)
......@@ -1587,7 +1586,7 @@ impl <'a> InstructionSelection {
tmp_res
}
fn emit_alloc_sequence_small (&mut self, tmp_allocator: P<Value>, ty_info: &BackendTypeInfo, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
fn emit_alloc_sequence_small (&mut self, tmp_allocator: P<Value>, size: P<Value>, align: usize, node: &TreeNode, f_content: &FunctionContent, f_context: &mut FunctionContext, vm: &VM) -> P<Value> {
// emit immix allocation fast path
// ASM: %tl = get_thread_local()
......
......@@ -56,8 +56,6 @@ pub fn allocate_hybrid(ty: P<MuType>, len: u64, vm: &VM) -> Address {
}
pub fn allocate_global(iref_global: P<Value>, vm: &VM) -> ValueLocation {
let tyid = iref_global.ty.id();
let referenced_type = match iref_global.ty.get_referenced_ty() {
Some(ty) => ty,
None => panic!("expected global to be an iref type, found {}", iref_global.ty)
......
......@@ -4,8 +4,6 @@ use compiler::*;
use ast::ir::*;
use vm::*;
use std::sync::Arc;
use runtime::thread::MuThread;
use utils::Address;
use std::process::Command;
use std::process::Output;
......
......@@ -421,6 +421,7 @@ impl <'a> VM {
active_handles: RwLock::new(hashmap!{})
};
// insert all intenral types
{
let mut types = ret.types.write().unwrap();
for ty in INTERNAL_TYPES.iter() {
......@@ -449,6 +450,9 @@ impl <'a> VM {
// init log
VM::start_logging(self.vm_options.flag_log_level);
// init types
types::init_types();
// init gc
{
let ref options = self.vm_options;
......
......@@ -7,13 +7,10 @@ use self::mu::ast::ir::*;
use self::mu::ast::inst::*;
use self::mu::ast::op::*;
use self::mu::vm::*;
use self::mu::compiler::*;
use self::mu::testutil;
use mu::utils::LinkedHashMap;
use std::sync::RwLock;
use std::sync::Arc;
use mu::testutil::aot;
#[test]
fn test_fp_add() {
......
......@@ -13,7 +13,6 @@ use self::mu::ast::inst::*;
use self::mu::ast::op::*;
use utils::Address;
use utils::LinkedHashMap;
use mu::testutil;
use mu::testutil::aot;
use mu::vm::handle;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment