GitLab will continue to be upgraded from 11.4.5-ce.0 on November 25th 2019 at 4.00pm (AEDT) to 5.00pm (AEDT) due to Critical Security Patch Availability. During the update, GitLab and Mattermost services will not be available.

Commit 7a2e02ab authored by qinsoon's avatar qinsoon

[wip] thread local with native implementation (doesnt work)

parent 19c8bba8
Pipeline #1268 canceled with stages
in 5 seconds
......@@ -1324,13 +1324,14 @@ pub enum MemoryLocation {
base: Option<P<Value>>,
label: MuName,
is_global: bool,
is_native: bool
is_native: bool,
is_threadlocal: bool
}
}
#[cfg(target_arch = "x86_64")]
rodal_enum!(MemoryLocation{{Address: scale, base, offset, index},
{Symbolic: is_global, is_native, base, label}});
{Symbolic: is_global, is_native, is_threadlocal, base, label}});
#[cfg(target_arch = "x86_64")]
impl fmt::Display for MemoryLocation {
......
......@@ -1214,13 +1214,9 @@ impl ASMCodeGen {
potentially_excepting: Option<MuName>,
use_vec: Vec<P<Value>>,
def_vec: Vec<P<Value>>,
target: Option<(MuID, ASMLocation)>
target: LinkedHashMap<MuID, Vec<ASMLocation>>
) {
let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new();
if target.is_some() {
let (id, loc) = target.unwrap();
uses.insert(id, vec![loc]);
}
let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = target;
for u in use_vec {
uses.insert(u.id(), vec![]);
}
......@@ -1513,20 +1509,25 @@ impl ASMCodeGen {
ref base,
ref label,
is_global,
is_native
is_native,
is_threadlocal
}) => {
let label = if is_native {
"/*C*/".to_string() + label.as_str()
String::from(label.as_str())
} else {
mangle_name(label.clone())
};
if base.is_some() && base.as_ref().unwrap().id() == x86_64::RIP.id() && is_global {
// pc relative address
let pic_symbol = pic_symbol(&label.clone());
let pic_symbol = pic_symbol(&label);
result_str.push_str(&pic_symbol);
loc_cursor += label.len();
} else if is_threadlocal {
let tls_symbol = tls_symbol(&label);
result_str.push_str(&tls_symbol);
loc_cursor += label.len();
} else {
let symbol = symbol(&label.clone());
let symbol = symbol(&label);
result_str.push_str(&symbol);
loc_cursor += label.len();
}
......@@ -3367,7 +3368,7 @@ impl CodeGenerator for ASMCodeGen {
format!("call {}@PLT", func)
};
self.add_asm_call(asm, pe, uses, defs, None);
self.add_asm_call(asm, pe, uses, defs, LinkedHashMap::new());
self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3385,8 +3386,14 @@ impl CodeGenerator for ASMCodeGen {
let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("call *{}", reg);
let target = {
let mut ret = LinkedHashMap::new();
ret.insert(id, vec![loc]);
ret
};
// the call uses the register
self.add_asm_call(asm, pe, uses, defs, Some((id, loc)));
self.add_asm_call(asm, pe, uses, defs, target);
self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3402,7 +3409,13 @@ impl CodeGenerator for ASMCodeGen {
defs: Vec<P<Value>>
) -> ValueLocation {
trace!("emit: call {}", func);
unimplemented!()
let (mem, target) = self.prepare_mem(func, 6);
let asm = format!("call *{}", mem);
self.add_asm_call(asm, pe, uses, defs, target);
self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
}
fn emit_call_jmp(
......@@ -3428,7 +3441,7 @@ impl CodeGenerator for ASMCodeGen {
format!("/*CALL*/ jmp {}@PLT", func)
};
self.add_asm_call(asm, pe, uses, defs, None);
self.add_asm_call(asm, pe, uses, defs, LinkedHashMap::new());
self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -3446,8 +3459,14 @@ impl CodeGenerator for ASMCodeGen {
let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("/*CALL*/ jmp *{}", reg);
let target = {
let mut ret = LinkedHashMap::new();
ret.insert(id, vec![loc]);
ret
};
// the call uses the register
self.add_asm_call(asm, pe, uses, defs, Some((id, loc)));
self.add_asm_call(asm, pe, uses, defs, target);
self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
......@@ -4434,6 +4453,23 @@ pub fn pic_symbol(name: &String) -> String {
format!("{}@GOTPCREL", name)
}
#[cfg(not(feature = "sel4-rumprun"))]
#[cfg(target_os = "macos")]
pub fn tls_symbol(name: &String) -> String {
format!("{}@TLVP", symbol(name))
}
#[cfg(not(feature = "sel4-rumprun"))]
#[cfg(target_os = "linux")]
pub fn tls_symbol(name: &String) -> String {
format!("%fs:{}@TPOFF", symbol(name))
}
#[cfg(feature = "sel4-rumprun")]
pub fn tls_symbol(name: &String) -> String {
format!("%fs:{}@TPOFF", symbol(name))
}
use compiler::machine_code::CompiledFunction;
/// rewrites the machine code of a function version for spilling.
......
......@@ -2264,7 +2264,18 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, true, false, f_context, vm)
self.make_memory_symbolic(name, ty, true, false, false, f_context, vm)
}
/// makes a symbolic memory operand for global values
fn make_memory_symbolic_threadlocal(
&mut self,
name: MuName,
ty: P<MuType>,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, false, true, true, f_context, vm)
}
/// makes a symbolic memory operand for native values
......@@ -2275,7 +2286,7 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, false, true, f_context, vm)
self.make_memory_symbolic(name, ty, false, true, false, f_context, vm)
}
/// makes a symbolic memory operand for a normal value (not global, not native)
......@@ -2286,7 +2297,7 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, false, false, f_context, vm)
self.make_memory_symbolic(name, ty, false, false, false, f_context, vm)
}
/// makes a symbolic memory operand
......@@ -2296,6 +2307,7 @@ impl<'a> InstructionSelection {
ty: P<MuType>,
is_global: bool,
is_native: bool,
is_threadlocal: bool,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
......@@ -2310,8 +2322,9 @@ impl<'a> InstructionSelection {
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
label: name,
is_global: is_global,
is_native: is_native
is_global,
is_native,
is_threadlocal
})
});
......@@ -2323,12 +2336,13 @@ impl<'a> InstructionSelection {
} else if cfg!(target_os = "macos") {
P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty,
ty,
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
label: name,
is_global: is_global,
is_native: is_native
is_global,
is_native,
is_threadlocal
})
})
} else if cfg!(target_os = "linux") {
......@@ -2339,10 +2353,15 @@ impl<'a> InstructionSelection {
hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ADDRESS_TYPE.clone(),
v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()),
base: if is_threadlocal {
None
} else {
Some(x86_64::RIP.clone())
},
label: name,
is_global: is_global,
is_native: is_native
is_global,
is_native,
is_threadlocal
})
});
......@@ -3947,6 +3966,7 @@ impl<'a> InstructionSelection {
}
/// emits code to get the thread local variable (not the client thread local)
#[allow(unused_variables)]
fn emit_get_threadlocal(
&mut self,
cur_node: Option<&TreeNode>,
......@@ -3954,17 +3974,43 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
let mut rets = self.emit_runtime_entry(
&entrypoints::GET_THREAD_LOCAL,
vec![],
None,
cur_node,
f_content,
f_context,
vm
);
let res = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
let mu_tls = entrypoints::MU_TLS.clone();
if cfg!(feature = "sel4-rumprun") {
unimplemented!()
} else if cfg!(target_os = "macos") {
// movq _mu_tls@TLVP(%rip) -> %tl_reg
let tl_mem =
self.make_memory_symbolic_threadlocal(mu_tls, ADDRESS_TYPE.clone(), f_context, vm);
let tl_reg = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.backend.emit_mov_r_mem(&tl_reg, &tl_mem);
// call (%tl_reg)
let callsite = self.new_callsite_label(cur_node);
let tl_call_mem = self.make_memory_op_base_offset(&tl_reg, 0, ADDRESS_TYPE.clone(), vm);
self.backend.emit_call_near_mem64(
callsite,
&tl_call_mem,
None,
vec![tl_reg.clone()],
x86_64::ALL_CALLER_SAVED_REGS.to_vec()
);
// movq [%rax] -> %res
let res_mem =
self.make_memory_op_base_offset(&x86_64::RAX, 0, ADDRESS_TYPE.clone(), vm);
self.backend.emit_mov_r_mem(&res, &res_mem);
} else if cfg!(target_os = "linux") {
// movq %fs:mu_tls@TPOFF -> %res
let tl_mem =
self.make_memory_symbolic_threadlocal(mu_tls, ADDRESS_TYPE.clone(), f_context, vm);
self.backend.emit_mov_r_mem(&res, &tl_mem);
} else {
panic!("unsupported OS");
}
rets.pop().unwrap()
res
}
/// emits code to call a runtime entry function, always returns result temporaries
......@@ -6380,7 +6426,8 @@ impl<'a> InstructionSelection {
base: Some(x86_64::RIP.clone()),
label: name.clone(),
is_global: false,
is_native: false
is_native: false,
is_threadlocal: false
})
})
}
......@@ -6407,7 +6454,8 @@ impl<'a> InstructionSelection {
base: Some(x86_64::RIP.clone()),
label: func_name,
is_global: true,
is_native: false
is_native: false,
is_threadlocal: false
})
})
}
......
......@@ -23,6 +23,10 @@ use compiler::backend::RegGroup;
use std::sync::{RwLock, Arc};
pub type EntryFuncSig = MuFuncSig;
lazy_static! {
pub static ref MU_TLS : MuName = Arc::new(String::from("mu_tls"));
}
pub struct RuntimeEntrypoint {
pub sig: P<MuFuncSig>,
pub aot: ValueLocation,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment