To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 8728d082 authored by qinsoon's avatar qinsoon
Browse files

vm.rs

parent 83c91384
......@@ -92,6 +92,7 @@ impl Instruction {
| CommonInst_SetThreadLocal(_)
| CommonInst_Pin(_)
| CommonInst_Unpin(_)
| CommonInst_GetAddr(_)
| CommonInst_Tr64IsFp(_)
| CommonInst_Tr64IsInt(_)
| CommonInst_Tr64IsRef(_)
......@@ -174,8 +175,9 @@ impl Instruction {
ExnInstruction{..} => true,
CommonInst_GetThreadLocal => true,
CommonInst_SetThreadLocal(_) => true,
CommonInst_Pin(_) => true,
CommonInst_Unpin(_) => true,
CommonInst_Pin(_)
| CommonInst_Unpin(_)
| CommonInst_GetAddr(_) => true,
CommonInst_Tr64IsFp(_)
| CommonInst_Tr64IsInt(_)
| CommonInst_Tr64IsRef(_) => false,
......@@ -241,6 +243,7 @@ impl Instruction {
| CommonInst_SetThreadLocal(_)
| CommonInst_Pin(_)
| CommonInst_Unpin(_)
| CommonInst_GetAddr(_)
| CommonInst_Tr64IsFp(_)
| CommonInst_Tr64IsInt(_)
| CommonInst_Tr64IsRef(_)
......@@ -311,6 +314,7 @@ impl Instruction {
| CommonInst_SetThreadLocal(_)
| CommonInst_Pin(_)
| CommonInst_Unpin(_)
| CommonInst_GetAddr(_)
| CommonInst_Tr64IsFp(_)
| CommonInst_Tr64IsInt(_)
| CommonInst_Tr64IsRef(_)
......@@ -572,6 +576,8 @@ pub enum Instruction_ {
CommonInst_Pin (OpIndex),
/// common inst: unpin an object (the object is automatically managed by GC)
CommonInst_Unpin(OpIndex),
/// common inst: get address of a global cell or a pinned object
CommonInst_GetAddr(OpIndex),
/// common inst: is the tagref a floating point?
CommonInst_Tr64IsFp(OpIndex),
......@@ -720,9 +726,9 @@ impl Instruction_ {
&Instruction_::CommonInst_GetThreadLocal => format!("COMMONINST GetThreadLocal"),
&Instruction_::CommonInst_SetThreadLocal(op) => format!("COMMONINST SetThreadLocal {}", ops[op]),
&Instruction_::CommonInst_Pin(op) => format!("COMMONINST Pin {}", ops[op]),
&Instruction_::CommonInst_Unpin(op) => format!("COMMONINST Unpin {}", ops[op]),
&Instruction_::CommonInst_Pin(op) => format!("COMMONINST Pin {}", ops[op]),
&Instruction_::CommonInst_Unpin(op) => format!("COMMONINST Unpin {}", ops[op]),
&Instruction_::CommonInst_GetAddr(op)=> format!("COMMONINST GetAddr {}", ops[op]),
// Tagerf64
&Instruction_::CommonInst_Tr64IsFp (op) => format!("COMMONINST Tr64IsFp {}", ops[op]),
&Instruction_::CommonInst_Tr64IsInt (op) => format!("COMMONINST Tr64IsInt {}", ops[op]),
......
......@@ -1118,7 +1118,7 @@ impl <'a> InstructionSelection {
let hybrid_ty_ = map_lock.get(name).unwrap();
let var_ty = hybrid_ty_.get_var_ty();
vm.get_backend_type_info(var_ty.id()).size
vm.get_backend_type_size((var_ty.id()))
},
_ => panic!("only expect HYBRID type here")
};
......@@ -2769,7 +2769,7 @@ impl <'a> InstructionSelection {
fn compute_return_allocation(&self, t: &P<MuType>, vm: &VM) -> usize
{
use ast::types::MuType_::*;
let size = round_up(vm.get_type_size(t.id()), 8);
let size = round_up(vm.get_backend_type_size(t.id()), 8);
match t.v {
Vector(_, _) => unimplemented!(),
Float | Double => 0, // Can return in FPR
......@@ -2792,7 +2792,7 @@ impl <'a> InstructionSelection {
fn compute_return_registers(&mut self, t: &P<MuType>, vm: &VM) -> Vec<P<Value>>
{
use ast::types::MuType_::*;
let size = round_up(vm.get_type_size(t.id()), 8);
let size = round_up(vm.get_backend_type_size(t.id()), 8);
match t.v {
Vector(_, _) => unimplemented!(),
Float | Double =>
......@@ -2835,7 +2835,7 @@ impl <'a> InstructionSelection {
fn compute_return_locations(&mut self, t: &P<MuType>, loc: &P<Value>, vm: &VM) -> P<Value>
{
use ast::types::MuType_::*;
let size = round_up(vm.get_type_size(t.id()), 8);
let size = round_up(vm.get_backend_type_size(t.id()), 8);
match t.v {
Vector(_, _) => unimplemented!(),
Float | Double => get_alias_for_length(RETURN_FPRS[0].id(), get_bit_size(t, vm)),
......@@ -2887,7 +2887,7 @@ impl <'a> InstructionSelection {
hfa_length(t.clone()) == 0 && // HFA's aren't converted to IRef's
match t.v {
Hybrid(_) => panic!("Hybrid argument not supported"), // size can't be statically determined
Struct(_) | Array(_, _) if vm.get_type_size(t.id()) > 16 => true, // type is too large
Struct(_) | Array(_, _) if vm.get_backend_type_size(t.id()) > 16 => true, // type is too large
Vector(_, _) => unimplemented!(),
_ => false
}
......@@ -2898,7 +2898,7 @@ impl <'a> InstructionSelection {
for i in 0..arg_types.len() {
let i = i as usize;
let t = if reference[i] { P(MuType::new(new_internal_id(), MuType_::IRef(arg_types[i].clone()))) } else { arg_types[i].clone() };
let size = round_up(vm.get_type_size(t.id()), 8);
let size = round_up(vm.get_backend_type_size(t.id()), 8);
let align = get_type_alignment(&t, vm);
match t.v {
Hybrid(_) => panic!("hybrid argument not supported"),
......@@ -4078,20 +4078,20 @@ impl <'a> InstructionSelection {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
let fix_part_size = vm.get_backend_type_info(struct_ty.id()).size;
let fix_part_size = vm.get_backend_type_size(struct_ty.id());
self.emit_offset_ref(&ops[base], fix_part_size as i64, f_content, f_context, vm)
}
// SHIFTIREF < T1 T2 > opnd offset = opnd + offset*size_of(T1)
Instruction_::ShiftIRef{base, offset, ..} => {
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap();
let element_size = vm.get_backend_type_info(element_type.id()).size;
let element_size = vm.get_backend_type_size(element_type.id());
self.emit_shift_ref(&ops[base], &ops[offset], element_size, f_content, f_context, vm)
}
// GETELEMIREF <T1 T2> opnd index = opnd + index*element_size(T1)
Instruction_::GetElementIRef{base, index, ..} => {
let element_type = ops[base].clone_value().ty.get_referent_ty().unwrap().get_elem_ty().unwrap();
let element_size = vm.get_backend_type_info(element_type.id()).size;
let element_size = vm.get_backend_type_size(element_type.id());
self.emit_shift_ref(&ops[base], &ops[index], element_size, f_content, f_context, vm)
}
......
......@@ -303,7 +303,7 @@ pub fn get_bit_size(ty : &P<MuType>, vm: &VM) -> usize
MuType_::Vector(ref t, n) => get_bit_size(t, vm)*n,
MuType_::Array(ref t, n) => get_bit_size(t, vm)*n,
MuType_::Void => 0,
_ => vm.get_type_size(ty.id())*8,
_ => vm.get_backend_type_size(ty.id())*8,
}
}
}
......@@ -1862,7 +1862,7 @@ fn emit_reg_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &mut Fu
&Constant::FuncRef(func_id) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem = make_value_symbolic(vm.get_func_name(func_id), true, &ADDRESS_TYPE, vm);
let mem = make_value_symbolic(vm.get_func_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, f_context, vm);
tmp
},
......@@ -1922,7 +1922,7 @@ pub fn emit_ireg_value(backend: &mut CodeGenerator, pv: &P<Value>, f_context: &m
&Constant::FuncRef(func_id) => {
let tmp = make_temporary(f_context, pv.ty.clone(), vm);
let mem = make_value_symbolic(vm.get_func_name(func_id), true, &ADDRESS_TYPE, vm);
let mem = make_value_symbolic(vm.get_func_name_for_func(func_id), true, &ADDRESS_TYPE, vm);
emit_calculate_address(backend, &tmp, &mem, f_context, vm);
tmp
},
......
......@@ -789,7 +789,7 @@ impl <'a> InstructionSelection {
// Truncate (from int to int)
op::ConvOp::TRUNC => {
let tmp_res = self.get_result_value(node);
let to_ty_size = vm.get_backend_type_info(tmp_res.ty.id()).size;
let to_ty_size = vm.get_backend_type_size(tmp_res.ty.id());
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
......@@ -821,8 +821,8 @@ impl <'a> InstructionSelection {
let tmp_res = self.get_result_value(node);
// movz op -> result
let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
let from_ty_size = vm.get_backend_type_size(from_ty.id());
let to_ty_size = vm.get_backend_type_size(to_ty.id());
// we treat int1 as int8, so it is possible from_ty_size == to_ty_size == 1 byte
assert!(from_ty_size <= to_ty_size);
......@@ -866,8 +866,8 @@ impl <'a> InstructionSelection {
let tmp_res = self.get_result_value(node);
// movs op -> result
let from_ty_size = vm.get_backend_type_info(from_ty.id()).size;
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
let from_ty_size = vm.get_backend_type_size(from_ty.id());
let to_ty_size = vm.get_backend_type_size(to_ty.id());
// we treat int1 as int8, so it is possible from_ty_size == to_ty_size == 1 byte
assert!(from_ty_size <= to_ty_size);
......@@ -944,7 +944,7 @@ impl <'a> InstructionSelection {
assert!(self.match_fpreg(op), "unexpected op (expected fpreg): {}", op);
let tmp_op = self.emit_fpreg(op, f_content, f_context, vm);
let to_ty_size = vm.get_backend_type_info(to_ty.id()).size;
let to_ty_size = vm.get_backend_type_size(to_ty.id());
match to_ty_size {
1 | 2 | 4 | 8 => {
match from_ty.v {
......@@ -967,7 +967,7 @@ impl <'a> InstructionSelection {
if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let op_ty_size = vm.get_backend_type_info(tmp_op.ty.id()).size;
let op_ty_size = vm.get_backend_type_size(tmp_op.ty.id());
if to_ty.is_double() {
match op_ty_size {
......@@ -1082,7 +1082,7 @@ impl <'a> InstructionSelection {
assert!(self.match_fpreg(op), "unexpected op (expected fpreg): {}", op);
let tmp_op = self.emit_fpreg(op, f_content, f_context, vm);
let res_ty_size = vm.get_backend_type_info(tmp_res.ty.id()).size;
let res_ty_size = vm.get_backend_type_size(tmp_res.ty.id());
if from_ty.is_double() {
match res_ty_size {
......@@ -1375,6 +1375,8 @@ impl <'a> InstructionSelection {
self.emit_store_base_offset(&tl, *thread::USER_TLS_OFFSET as i32, &tmp_op, vm);
}
// FIXME: the semantic of Pin/Unpin is different from spec
// See Issue #33
Instruction_::CommonInst_Pin(op) => {
trace!("instsel on PIN");
......@@ -1408,6 +1410,20 @@ impl <'a> InstructionSelection {
None,
Some(node), f_content, f_context, vm);
}
Instruction_::CommonInst_GetAddr(op) => {
use runtime::mm::objectmodel::GC_IREF_HAS_OFFSET;
debug_assert!(!GC_IREF_HAS_OFFSET);
trace!("instsel on GETADDR");
// assume it is pinned
let ref op = inst.ops[op];
assert!(self.match_ireg(op));
let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);
self.emit_move_value_to_value(&tmp_res, &tmp_op);
}
Instruction_::Move(op) => {
trace!("instsel on MOVE (internal IR)");
......@@ -1911,7 +1927,7 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_info(op1.as_value().ty.id()).size;
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
......@@ -1958,7 +1974,7 @@ impl <'a> InstructionSelection {
}
// mov rax -> result
let res_size = vm.get_backend_type_info(res_tmp.ty.id()).size;
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
assert!(res_size == op_size, "op and res do not have matching type: {}", node);
match res_size {
......@@ -2028,14 +2044,14 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_info(op1.as_value().ty.id()).size;
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
let res_size = vm.get_backend_type_info(res_tmp.ty.id()).size;
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
match res_size {
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
4 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
......@@ -2064,14 +2080,14 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_info(op1.as_value().ty.id()).size;
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rax -> result
let res_size = vm.get_backend_type_info(res_tmp.ty.id()).size;
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
match res_size {
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
4 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
......@@ -2096,14 +2112,14 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_info(op1.as_value().ty.id()).size;
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result
let res_size = vm.get_backend_type_info(res_tmp.ty.id()).size;
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
match res_size {
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX),
4 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX),
......@@ -2128,14 +2144,14 @@ impl <'a> InstructionSelection {
let op1 = &ops[op1];
let op2 = &ops[op2];
let op_size = vm.get_backend_type_info(op1.as_value().ty.id()).size;
let op_size = vm.get_backend_type_size(op1.as_value().ty.id());
match op_size {
1 | 2 | 4 | 8 => {
self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result
let res_size = vm.get_backend_type_info(res_tmp.ty.id()).size;
let res_size = vm.get_backend_type_size(res_tmp.ty.id());
match res_size {
8 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX),
4 => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX),
......@@ -2871,7 +2887,7 @@ impl <'a> InstructionSelection {
assert!(self.match_ireg(op1));
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let op1_size = vm.get_backend_type_info(reg_op1.ty.id()).size;
let op1_size = vm.get_backend_type_size(reg_op1.ty.id());
match op1_size {
8 => {
// div uses RDX and RAX
......@@ -2933,7 +2949,7 @@ impl <'a> InstructionSelection {
assert!(self.match_ireg(op1));
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
let op1_size = vm.get_backend_type_info(reg_op1.ty.id()).size;
let op1_size = vm.get_backend_type_size(reg_op1.ty.id());
match op1_size {
8 => {
// idiv uses RDX and RAX
......@@ -4398,7 +4414,7 @@ impl <'a> InstructionSelection {
Some(ty) => ty,
None => panic!("expecting an iref or uptr in GetVarPartIRef")
};
let fix_part_size = vm.get_backend_type_info(struct_ty.id()).size;
let fix_part_size = vm.get_backend_type_size(struct_ty.id());
match base.v {
// GETVARPARTIREF(GETIREF) -> add FIX_PART_SIZE to old offset
......@@ -4990,7 +5006,7 @@ impl <'a> InstructionSelection {
/// returns a memory location P<Value> for a function reference
#[cfg(feature = "aot")]
fn get_mem_for_funcref(&mut self, func_id: MuID, vm: &VM) -> P<Value> {
let func_name = vm.get_func_name(func_id);
let func_name = vm.get_name_for_func(func_id);
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
......
......@@ -177,7 +177,7 @@ impl Inlining {
// getting the function being inlined
let inlined_func = *call_edges.get(&inst.id()).unwrap();
trace!("function being inlined is {}", inlined_func);
let inlined_fvid = match vm.get_cur_version_of(inlined_func) {
let inlined_fvid = match vm.get_cur_version_for_func(inlined_func) {
Some(fvid) => fvid,
None => panic!("cannot resolve current version of Func {}, which is supposed to be inlined", inlined_func)
};
......
......@@ -15,6 +15,8 @@
use std::sync::atomic;
use utils::ByteSize;
pub const GC_IREF_HAS_OFFSET : bool = false;
#[cfg(feature = "use-sidemap")]
mod sidemap;
#[cfg(not(feature = "use-sidemap"))]
......
......@@ -1334,7 +1334,7 @@ impl<'lb, 'lvm> BundleLoader<'lb, 'lvm> {
if let Some(impl_func) = self.built_funcs.get(&id) {
impl_func.sig.clone()
} else {
self.vm.get_func_sig_for_func(id)
self.vm.get_sig_for_func(id)
}
}
......
......@@ -70,7 +70,7 @@ impl MuVM {
}
pub fn id_of(&self, name: MuName) -> MuID {
self.vm.id_of_by_refstring(&name)
self.vm.id_of(&name)
}
pub fn name_of(&self, id: MuID) -> CMuCString {
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment