To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit ca802615 authored by qinsoon's avatar qinsoon
Browse files

[wip] GetFieldIRef on hybrid

parent 8da4a884
......@@ -292,6 +292,19 @@ impl MuType_ {
MuType_::Hybrid(tag)
}
pub fn hybrid_put(tag: HybridTag, mut fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) {
let mut map_guard = HYBRID_TAG_MAP.write().unwrap();
match map_guard.get_mut(&tag) {
Some(hybrid_ty_) => {
hybrid_ty_.fix_tys.clear();
hybrid_ty_.fix_tys.append(&mut fix_tys);
hybrid_ty_.var_ty = var_ty;
},
None => panic!("call hybrid_empty() to create an empty struct before hybrid_put()")
}
}
pub fn hybrid(tag: HybridTag, fix_tys: Vec<P<MuType>>, var_ty: P<MuType>) -> MuType_ {
let hybrid_ty_ = HybridType_{fix_tys: fix_tys, var_ty: var_ty};
......
......@@ -870,7 +870,7 @@ impl ASMCodeGen {
if cfg!(debug_assertions) {
match op.v {
Value_::Memory(_) => {},
_ => panic!("expecting register op")
_ => panic!("expecting memory op")
}
}
......@@ -890,7 +890,7 @@ impl ASMCodeGen {
match offset.v {
Value_::SSAVar(id) => {
// temp as offset
let (str, id, loc) = self.prepare_reg(offset, 0);
let (str, id, loc) = self.prepare_reg(offset, loc_cursor);
result_str.push_str(&str);
ids.push(id);
......
......@@ -133,12 +133,14 @@ pub extern fn alloc(mutator: *mut ImmixMutatorLocal, size: usize, align: usize)
#[no_mangle]
#[inline(never)]
pub extern fn muentry_alloc_slow(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_slow(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = unsafe {mutator.as_mut().unwrap()}.try_alloc_from_local(size, align);
unsafe {ret.to_object_reference()}
}
#[no_mangle]
pub extern fn alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
pub extern fn muentry_alloc_large(mutator: *mut ImmixMutatorLocal, size: usize, align: usize) -> ObjectReference {
trace!("muentry_alloc_large(mutator: {:?}, size: {}, align: {})", mutator, size, align);
let ret = freelist::alloc_large(size, align, unsafe {mutator.as_mut().unwrap()}, MY_GC.read().unwrap().as_ref().unwrap().lo_space.clone());
unsafe {ret.to_object_reference()}
}
\ No newline at end of file
......@@ -46,11 +46,22 @@ lazy_static! {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![ADDRESS_TYPE.clone()],
arg_tys: vec![UINT64_TYPE.clone(), UINT64_TYPE.clone()]
arg_tys: vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_alloc_slow")),
jit: RwLock::new(None),
};
// impl/decl: gc/lib.rs
pub static ref ALLOC_LARGE : RuntimeEntrypoint = RuntimeEntrypoint {
sig: P(MuFuncSig {
hdr: MuEntityHeader::unnamed(ir::new_internal_id()),
ret_tys: vec![ADDRESS_TYPE.clone()],
arg_tys: vec![ADDRESS_TYPE.clone(), UINT64_TYPE.clone(), UINT64_TYPE.clone()]
}),
aot: ValueLocation::Relocatable(RegGroup::GPR, String::from("muentry_alloc_large")),
jit: RwLock::new(None)
};
// impl/decl: exception.rs
pub static ref THROW_EXCEPTION : RuntimeEntrypoint = RuntimeEntrypoint {
......
......@@ -284,5 +284,283 @@ pub fn struct_insts() -> VM {
vm.define_func_version(func_ver);
vm
}
#[test]
fn test_hybrid_fix_part() {
VM::start_logging_trace();
let vm = Arc::new(hybrid_fix_part_insts());
let compiler = Compiler::new(CompilerPolicy::default(), vm.clone());
let func_id = vm.id_of("hybrid_fix_part_insts");
{
let funcs = vm.funcs().read().unwrap();
let func = funcs.get(&func_id).unwrap().read().unwrap();
let func_vers = vm.func_vers().read().unwrap();
let mut func_ver = func_vers.get(&func.cur_ver.unwrap()).unwrap().write().unwrap();
compiler.compile(&mut func_ver);
}
vm.make_primordial_thread(func_id, vec![]);
backend::emit_context(&vm);
let executable = aot::link_primordial(vec!["hybrid_fix_part_insts".to_string()], "hybrid_fix_part_insts_test");
let output = aot::execute_nocheck(executable);
assert!(output.status.code().is_some());
let ret_code = output.status.code().unwrap();
println!("return code: {}", ret_code);
assert!(ret_code == 1);
}
pub fn hybrid_fix_part_insts() -> VM {
let vm = VM::new();
// .typedef @int64 = int<64>
let int64 = vm.declare_type(vm.next_id(), MuType_::int(64));
vm.set_name(int64.as_entity(), Mu("int64"));
// .typedef @my_hybrid = hybrid<@int64 @int64 | @int64>
let my_hybrid = vm.declare_type(vm.next_id(), MuType_::hybrid("MyHybrid".to_string(), vec![int64.clone(), int64.clone()], int64.clone()));
vm.set_name(my_hybrid.as_entity(), Mu("my_hybrid"));
// .typedef @ref_hybrid = ref<@my_hybrid>
let ref_hybrid = vm.declare_type(vm.next_id(), MuType_::muref(my_hybrid.clone()));
vm.set_name(ref_hybrid.as_entity(), Mu("ref_hybrid"));
// .typedef @iref_hybrid = iref<@my_hybrid>
let iref_hybrid = vm.declare_type(vm.next_id(), MuType_::iref(my_hybrid.clone()));
vm.set_name(iref_hybrid.as_entity(), Mu("iref_hybrid"));
// .typedef @iref_int64 = iref<@int64>
let iref_int64 = vm.declare_type(vm.next_id(), MuType_::iref(int64.clone()));
vm.set_name(iref_int64.as_entity(), Mu("iref_int64"));
// .const @int64_0 <@int64> = 0
let int64_0 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(0));
vm.set_name(int64_0.as_entity(), Mu("int64_0"));
// .const @int64_1 <@int64> = 1
let int64_1 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(1));
vm.set_name(int64_1.as_entity(), Mu("int64_1"));
// .const @int64_10 <@int64> = 10
let int64_10 = vm.declare_const(vm.next_id(), int64.clone(), Constant::Int(10));
vm.set_name(int64_10.as_entity(), Mu("int64_10"));
// .funcsig @noparam_noret_sig = () -> ()
let noparam_noret_sig = vm.declare_func_sig(vm.next_id(), vec![], vec![]);
vm.set_name(noparam_noret_sig.as_entity(), Mu("noparam_noret_sig"));
// .funcdecl @hybrid_fix_part_insts <@noparam_noret_sig>
let func = MuFunction::new(vm.next_id(), noparam_noret_sig.clone());
vm.set_name(func.as_entity(), Mu("hybrid_fix_part_insts"));
let func_id = func.id();
vm.declare_func(func);
// .funcdef @hybrid_fix_part_insts VERSION @hybrid_fix_part_insts_v1 <@noparam_noret_si>
let mut func_ver = MuFunctionVersion::new(vm.next_id(), func_id, noparam_noret_sig.clone());
vm.set_name(func_ver.as_entity(), Mu("hybrid_fix_part_insts_v1"));
// %entry():
let mut blk_entry = Block::new(vm.next_id());
vm.set_name(blk_entry.as_entity(), Mu("entry"));
// %a = NEWHYBRID <@my_hybrid @int64> @int64_10
let blk_entry_a = func_ver.new_ssa(vm.next_id(), ref_hybrid.clone());
vm.set_name(blk_entry_a.as_entity(), Mu("blk_entry_a"));
let int64_10_local = func_ver.new_constant(int64_10.clone());
let blk_entry_inst0 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_a.clone_value()]),
ops: RwLock::new(vec![int64_10_local]),
v: Instruction_::NewHybrid(my_hybrid.clone(), 0)
});
// %iref_a = GETIREF <@int64> %a
let blk_entry_iref_a = func_ver.new_ssa(vm.next_id(), iref_hybrid.clone());
vm.set_name(blk_entry_iref_a.as_entity(), Mu("blk_entry_iref_a"));
let blk_entry_inst1 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_iref_a.clone_value()]),
ops: RwLock::new(vec![blk_entry_a.clone()]),
v: Instruction_::GetIRef(0)
});
// %iref_x = GETFIELDIREF <@my_hybrid 0> %iref_a
let blk_entry_iref_x = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_entry_iref_x.as_entity(), Mu("blk_entry_iref_x"));
let int64_0_local = func_ver.new_constant(int64_0.clone());
let blk_entry_inst2 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_entry_iref_x.clone_value()]),
ops: RwLock::new(vec![blk_entry_iref_a.clone()]),
v: Instruction_::GetFieldIRef {
is_ptr: false,
base: 0, // 0th node in ops
index: 0 // 0th element in the struct
}
});
// STORE <@int64> %iref_x @int64_1
let int64_1_local = func_ver.new_constant(int64_1.clone());
let blk_entry_inst3 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_iref_x.clone(), int64_1_local.clone()]),
v: Instruction_::Store{
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0,
value: 1
}
});
// BRANCH %check(%a)
let blk_check_id = vm.next_id();
let blk_entry_branch = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![blk_entry_a]),
v: Instruction_::Branch1(Destination{
target: blk_check_id,
args: vec![DestArg::Normal(0)]
})
});
blk_entry.content = Some(BlockContent{
args: vec![],
exn_arg: None,
body: vec![blk_entry_inst0, blk_entry_inst1, blk_entry_inst2, blk_entry_inst3, blk_entry_branch],
keepalives: None
});
// %check(%a):
let blk_check_a = func_ver.new_ssa(vm.next_id(), ref_hybrid.clone());
vm.set_name(blk_check_a.as_entity(), Mu("blk_check_a"));
let mut blk_check = Block::new(blk_check_id);
vm.set_name(blk_check.as_entity(), Mu("check"));
// %blk_check_iref_a = GETIREF <@my_hybrid> a
let blk_check_iref_a = func_ver.new_ssa(vm.next_id(), iref_hybrid.clone());
vm.set_name(blk_check_iref_a.as_entity(), Mu("blk_check_iref_a"));
let blk_check_inst0 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_iref_a.clone_value()]),
ops: RwLock::new(vec![blk_check_a.clone()]),
v: Instruction_::GetIRef(0)
});
// %blk_check_iref_x = GETFIELDIREF <@my_hybrid 0> %blk_check_iref_a
let blk_check_iref_x = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_check_iref_x.as_entity(), Mu("blk_check_iref_x"));
let blk_check_inst1 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_iref_x.clone_value()]),
ops: RwLock::new(vec![blk_check_iref_a.clone()]),
v: Instruction_::GetFieldIRef {
is_ptr: false,
base: 0, // 0th node in ops
index: 0 // 0th element in the struct
}
});
// %x = LOAD <@int64> %blk_check_iref_x
let blk_check_x = func_ver.new_ssa(vm.next_id(), int64.clone());
vm.set_name(blk_check_x.as_entity(), Mu("blk_check_x"));
let blk_check_inst2 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_x.clone_value()]),
ops: RwLock::new(vec![blk_check_iref_x.clone()]),
v: Instruction_::Load {
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0
}
});
// %blk_check_iref_y = GETFIELDIREF <@my_hybrid 1> %blk_check_iref_a
let blk_check_iref_y = func_ver.new_ssa(vm.next_id(), iref_int64.clone());
vm.set_name(blk_check_iref_y.as_entity(), Mu("blk_check_iref_y"));
let blk_check_inst3 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_iref_y.clone_value()]),
ops: RwLock::new(vec![blk_check_iref_a.clone()]),
v: Instruction_::GetFieldIRef {
is_ptr: false,
base: 0, // 0th node in ops
index: 1 // 1th element in the struct
}
});
// %y = LOAD <@int64> %blk_check_iref_y
let blk_check_y = func_ver.new_ssa(vm.next_id(), int64.clone());
vm.set_name(blk_check_y.as_entity(), Mu("blk_check_y"));
let blk_check_inst4 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_y.clone_value()]),
ops: RwLock::new(vec![blk_check_iref_y.clone()]),
v: Instruction_::Load {
is_ptr: false,
order: MemoryOrder::Relaxed,
mem_loc: 0
}
});
// %res = ADD <@int64> %x %y
let blk_check_res = func_ver.new_ssa(vm.next_id(), int64.clone());
vm.set_name(blk_check_res.as_entity(), Mu("blk_check_res"));
let blk_check_inst5 = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: Some(vec![blk_check_res.clone_value()]),
ops: RwLock::new(vec![blk_check_x.clone(), blk_check_y.clone()]),
v: Instruction_::BinOp(BinOp::Add, 0, 1)
});
// CCALL exit(%res)
let blk_check_ccall = gen_ccall_exit(blk_check_res.clone(), &mut func_ver, &vm);
// RET <@int64> 0
let blk_check_ret = func_ver.new_inst(Instruction{
hdr: MuEntityHeader::unnamed(vm.next_id()),
value: None,
ops: RwLock::new(vec![int64_0_local]),
v: Instruction_::Return(vec![0])
});
blk_check.content = Some(BlockContent{
args: vec![blk_check_a.clone_value()],
exn_arg: None,
body: vec![
blk_check_inst0,
blk_check_inst1,
blk_check_inst2,
blk_check_inst3,
blk_check_inst4,
blk_check_inst5,
blk_check_ccall,
blk_check_ret
],
keepalives: None
});
func_ver.define(FunctionContent{
entry: blk_entry.id(),
blocks: hashmap!{
blk_entry.id() => blk_entry,
blk_check_id => blk_check
}
});
vm.define_func_version(func_ver);
vm
}
\ No newline at end of file
......@@ -44,7 +44,7 @@ fn test_exhaust_alloc_large() {
for _ in 0..WORK_LOAD {
mutator.yieldpoint();
let res = mm::alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
let res = mm::muentry_alloc_large(&mut mutator, LARGE_OBJECT_SIZE, OBJECT_ALIGN);
}
mutator.destroy();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment