...
 
Commits (2)
...@@ -1324,13 +1324,14 @@ pub enum MemoryLocation { ...@@ -1324,13 +1324,14 @@ pub enum MemoryLocation {
base: Option<P<Value>>, base: Option<P<Value>>,
label: MuName, label: MuName,
is_global: bool, is_global: bool,
is_native: bool is_native: bool,
is_threadlocal: bool
} }
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
rodal_enum!(MemoryLocation{{Address: scale, base, offset, index}, rodal_enum!(MemoryLocation{{Address: scale, base, offset, index},
{Symbolic: is_global, is_native, base, label}}); {Symbolic: is_global, is_native, is_threadlocal, base, label}});
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
impl fmt::Display for MemoryLocation { impl fmt::Display for MemoryLocation {
......
...@@ -1214,13 +1214,9 @@ impl ASMCodeGen { ...@@ -1214,13 +1214,9 @@ impl ASMCodeGen {
potentially_excepting: Option<MuName>, potentially_excepting: Option<MuName>,
use_vec: Vec<P<Value>>, use_vec: Vec<P<Value>>,
def_vec: Vec<P<Value>>, def_vec: Vec<P<Value>>,
target: Option<(MuID, ASMLocation)> target: LinkedHashMap<MuID, Vec<ASMLocation>>
) { ) {
let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = LinkedHashMap::new(); let mut uses: LinkedHashMap<MuID, Vec<ASMLocation>> = target;
if target.is_some() {
let (id, loc) = target.unwrap();
uses.insert(id, vec![loc]);
}
for u in use_vec { for u in use_vec {
uses.insert(u.id(), vec![]); uses.insert(u.id(), vec![]);
} }
...@@ -1513,20 +1509,25 @@ impl ASMCodeGen { ...@@ -1513,20 +1509,25 @@ impl ASMCodeGen {
ref base, ref base,
ref label, ref label,
is_global, is_global,
is_native is_native,
is_threadlocal
}) => { }) => {
let label = if is_native { let label = if is_native {
"/*C*/".to_string() + label.as_str() String::from(label.as_str())
} else { } else {
mangle_name(label.clone()) mangle_name(label.clone())
}; };
if base.is_some() && base.as_ref().unwrap().id() == x86_64::RIP.id() && is_global { if base.is_some() && base.as_ref().unwrap().id() == x86_64::RIP.id() && is_global {
// pc relative address // pc relative address
let pic_symbol = pic_symbol(&label.clone()); let pic_symbol = pic_symbol(&label);
result_str.push_str(&pic_symbol); result_str.push_str(&pic_symbol);
loc_cursor += label.len(); loc_cursor += label.len();
} else if is_threadlocal {
let tls_symbol = tls_symbol(&label);
result_str.push_str(&tls_symbol);
loc_cursor += label.len();
} else { } else {
let symbol = symbol(&label.clone()); let symbol = symbol(&label);
result_str.push_str(&symbol); result_str.push_str(&symbol);
loc_cursor += label.len(); loc_cursor += label.len();
} }
...@@ -3367,7 +3368,7 @@ impl CodeGenerator for ASMCodeGen { ...@@ -3367,7 +3368,7 @@ impl CodeGenerator for ASMCodeGen {
format!("call {}@PLT", func) format!("call {}@PLT", func)
}; };
self.add_asm_call(asm, pe, uses, defs, None); self.add_asm_call(asm, pe, uses, defs, LinkedHashMap::new());
self.add_asm_global_label(symbol(&mangle_name(callsite.clone()))); self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite) ValueLocation::Relocatable(RegGroup::GPR, callsite)
...@@ -3385,8 +3386,14 @@ impl CodeGenerator for ASMCodeGen { ...@@ -3385,8 +3386,14 @@ impl CodeGenerator for ASMCodeGen {
let (reg, id, loc) = self.prepare_reg(func, 6); let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("call *{}", reg); let asm = format!("call *{}", reg);
let target = {
let mut ret = LinkedHashMap::new();
ret.insert(id, vec![loc]);
ret
};
// the call uses the register // the call uses the register
self.add_asm_call(asm, pe, uses, defs, Some((id, loc))); self.add_asm_call(asm, pe, uses, defs, target);
self.add_asm_global_label(symbol(&mangle_name(callsite.clone()))); self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite) ValueLocation::Relocatable(RegGroup::GPR, callsite)
...@@ -3402,7 +3409,13 @@ impl CodeGenerator for ASMCodeGen { ...@@ -3402,7 +3409,13 @@ impl CodeGenerator for ASMCodeGen {
defs: Vec<P<Value>> defs: Vec<P<Value>>
) -> ValueLocation { ) -> ValueLocation {
trace!("emit: call {}", func); trace!("emit: call {}", func);
unimplemented!() let (mem, target) = self.prepare_mem(func, 6);
let asm = format!("call *{}", mem);
self.add_asm_call(asm, pe, uses, defs, target);
self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite)
} }
fn emit_call_jmp( fn emit_call_jmp(
...@@ -3428,7 +3441,7 @@ impl CodeGenerator for ASMCodeGen { ...@@ -3428,7 +3441,7 @@ impl CodeGenerator for ASMCodeGen {
format!("/*CALL*/ jmp {}@PLT", func) format!("/*CALL*/ jmp {}@PLT", func)
}; };
self.add_asm_call(asm, pe, uses, defs, None); self.add_asm_call(asm, pe, uses, defs, LinkedHashMap::new());
self.add_asm_global_label(symbol(&mangle_name(callsite.clone()))); self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite) ValueLocation::Relocatable(RegGroup::GPR, callsite)
...@@ -3446,8 +3459,14 @@ impl CodeGenerator for ASMCodeGen { ...@@ -3446,8 +3459,14 @@ impl CodeGenerator for ASMCodeGen {
let (reg, id, loc) = self.prepare_reg(func, 6); let (reg, id, loc) = self.prepare_reg(func, 6);
let asm = format!("/*CALL*/ jmp *{}", reg); let asm = format!("/*CALL*/ jmp *{}", reg);
let target = {
let mut ret = LinkedHashMap::new();
ret.insert(id, vec![loc]);
ret
};
// the call uses the register // the call uses the register
self.add_asm_call(asm, pe, uses, defs, Some((id, loc))); self.add_asm_call(asm, pe, uses, defs, target);
self.add_asm_global_label(symbol(&mangle_name(callsite.clone()))); self.add_asm_global_label(symbol(&mangle_name(callsite.clone())));
ValueLocation::Relocatable(RegGroup::GPR, callsite) ValueLocation::Relocatable(RegGroup::GPR, callsite)
...@@ -4434,6 +4453,23 @@ pub fn pic_symbol(name: &String) -> String { ...@@ -4434,6 +4453,23 @@ pub fn pic_symbol(name: &String) -> String {
format!("{}@GOTPCREL", name) format!("{}@GOTPCREL", name)
} }
#[cfg(not(feature = "sel4-rumprun"))]
#[cfg(target_os = "macos")]
pub fn tls_symbol(name: &String) -> String {
format!("{}@TLVP", symbol(name))
}
#[cfg(not(feature = "sel4-rumprun"))]
#[cfg(target_os = "linux")]
pub fn tls_symbol(name: &String) -> String {
format!("%fs:{}@TPOFF", symbol(name))
}
#[cfg(feature = "sel4-rumprun")]
pub fn tls_symbol(name: &String) -> String {
format!("%fs:{}@TPOFF", symbol(name))
}
use compiler::machine_code::CompiledFunction; use compiler::machine_code::CompiledFunction;
/// rewrites the machine code of a function version for spilling. /// rewrites the machine code of a function version for spilling.
......
...@@ -2264,7 +2264,18 @@ impl<'a> InstructionSelection { ...@@ -2264,7 +2264,18 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM vm: &VM
) -> P<Value> { ) -> P<Value> {
self.make_memory_symbolic(name, ty, true, false, f_context, vm) self.make_memory_symbolic(name, ty, true, false, false, f_context, vm)
}
/// makes a symbolic memory operand for global values
fn make_memory_symbolic_threadlocal(
&mut self,
name: MuName,
ty: P<MuType>,
f_context: &mut FunctionContext,
vm: &VM
) -> P<Value> {
self.make_memory_symbolic(name, ty, false, true, true, f_context, vm)
} }
/// makes a symbolic memory operand for native values /// makes a symbolic memory operand for native values
...@@ -2275,7 +2286,7 @@ impl<'a> InstructionSelection { ...@@ -2275,7 +2286,7 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM vm: &VM
) -> P<Value> { ) -> P<Value> {
self.make_memory_symbolic(name, ty, false, true, f_context, vm) self.make_memory_symbolic(name, ty, false, true, false, f_context, vm)
} }
/// makes a symbolic memory operand for a normal value (not global, not native) /// makes a symbolic memory operand for a normal value (not global, not native)
...@@ -2286,7 +2297,7 @@ impl<'a> InstructionSelection { ...@@ -2286,7 +2297,7 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM vm: &VM
) -> P<Value> { ) -> P<Value> {
self.make_memory_symbolic(name, ty, false, false, f_context, vm) self.make_memory_symbolic(name, ty, false, false, false, f_context, vm)
} }
/// makes a symbolic memory operand /// makes a symbolic memory operand
...@@ -2296,6 +2307,7 @@ impl<'a> InstructionSelection { ...@@ -2296,6 +2307,7 @@ impl<'a> InstructionSelection {
ty: P<MuType>, ty: P<MuType>,
is_global: bool, is_global: bool,
is_native: bool, is_native: bool,
is_threadlocal: bool,
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM vm: &VM
) -> P<Value> { ) -> P<Value> {
...@@ -2310,8 +2322,9 @@ impl<'a> InstructionSelection { ...@@ -2310,8 +2322,9 @@ impl<'a> InstructionSelection {
v: Value_::Memory(MemoryLocation::Symbolic { v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()), base: Some(x86_64::RIP.clone()),
label: name, label: name,
is_global: is_global, is_global,
is_native: is_native is_native,
is_threadlocal
}) })
}); });
...@@ -2323,12 +2336,13 @@ impl<'a> InstructionSelection { ...@@ -2323,12 +2336,13 @@ impl<'a> InstructionSelection {
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
P(Value { P(Value {
hdr: MuEntityHeader::unnamed(vm.next_id()), hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ty, ty,
v: Value_::Memory(MemoryLocation::Symbolic { v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()), base: Some(x86_64::RIP.clone()),
label: name, label: name,
is_global: is_global, is_global,
is_native: is_native is_native,
is_threadlocal
}) })
}) })
} else if cfg!(target_os = "linux") { } else if cfg!(target_os = "linux") {
...@@ -2339,10 +2353,15 @@ impl<'a> InstructionSelection { ...@@ -2339,10 +2353,15 @@ impl<'a> InstructionSelection {
hdr: MuEntityHeader::unnamed(vm.next_id()), hdr: MuEntityHeader::unnamed(vm.next_id()),
ty: ADDRESS_TYPE.clone(), ty: ADDRESS_TYPE.clone(),
v: Value_::Memory(MemoryLocation::Symbolic { v: Value_::Memory(MemoryLocation::Symbolic {
base: Some(x86_64::RIP.clone()), base: if is_threadlocal {
None
} else {
Some(x86_64::RIP.clone())
},
label: name, label: name,
is_global: is_global, is_global,
is_native: is_native is_native,
is_threadlocal
}) })
}); });
...@@ -3947,6 +3966,7 @@ impl<'a> InstructionSelection { ...@@ -3947,6 +3966,7 @@ impl<'a> InstructionSelection {
} }
/// emits code to get the thread local variable (not the client thread local) /// emits code to get the thread local variable (not the client thread local)
#[allow(unused_variables)]
fn emit_get_threadlocal( fn emit_get_threadlocal(
&mut self, &mut self,
cur_node: Option<&TreeNode>, cur_node: Option<&TreeNode>,
...@@ -3954,17 +3974,43 @@ impl<'a> InstructionSelection { ...@@ -3954,17 +3974,43 @@ impl<'a> InstructionSelection {
f_context: &mut FunctionContext, f_context: &mut FunctionContext,
vm: &VM vm: &VM
) -> P<Value> { ) -> P<Value> {
let mut rets = self.emit_runtime_entry( let res = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
&entrypoints::GET_THREAD_LOCAL, let mu_tls = entrypoints::MU_TLS.clone();
vec![],
None, if cfg!(feature = "sel4-rumprun") {
cur_node, unimplemented!()
f_content, } else if cfg!(target_os = "macos") {
f_context, // movq _mu_tls@TLVP(%rip) -> %tl_reg
vm let tl_mem =
); self.make_memory_symbolic_threadlocal(mu_tls, ADDRESS_TYPE.clone(), f_context, vm);
let tl_reg = self.make_temporary(f_context, ADDRESS_TYPE.clone(), vm);
self.backend.emit_mov_r_mem(&tl_reg, &tl_mem);
// call (%tl_reg)
let callsite = self.new_callsite_label(cur_node);
let tl_call_mem = self.make_memory_op_base_offset(&tl_reg, 0, ADDRESS_TYPE.clone(), vm);
self.backend.emit_call_near_mem64(
callsite,
&tl_call_mem,
None,
vec![tl_reg.clone()],
x86_64::ALL_CALLER_SAVED_REGS.to_vec()
);
// movq [%rax] -> %res
let res_mem =
self.make_memory_op_base_offset(&x86_64::RAX, 0, ADDRESS_TYPE.clone(), vm);
self.backend.emit_mov_r_mem(&res, &res_mem);
} else if cfg!(target_os = "linux") {
// movq %fs:mu_tls@TPOFF -> %res
let tl_mem =
self.make_memory_symbolic_threadlocal(mu_tls, ADDRESS_TYPE.clone(), f_context, vm);
self.backend.emit_mov_r_mem(&res, &tl_mem);
} else {
panic!("unsupported OS");
}
rets.pop().unwrap() res
} }
/// emits code to call a runtime entry function, always returns result temporaries /// emits code to call a runtime entry function, always returns result temporaries
...@@ -6380,7 +6426,8 @@ impl<'a> InstructionSelection { ...@@ -6380,7 +6426,8 @@ impl<'a> InstructionSelection {
base: Some(x86_64::RIP.clone()), base: Some(x86_64::RIP.clone()),
label: name.clone(), label: name.clone(),
is_global: false, is_global: false,
is_native: false is_native: false,
is_threadlocal: false
}) })
}) })
} }
...@@ -6407,7 +6454,8 @@ impl<'a> InstructionSelection { ...@@ -6407,7 +6454,8 @@ impl<'a> InstructionSelection {
base: Some(x86_64::RIP.clone()), base: Some(x86_64::RIP.clone()),
label: func_name, label: func_name,
is_global: true, is_global: true,
is_native: false is_native: false,
is_threadlocal: false
}) })
}) })
} }
......
...@@ -461,13 +461,15 @@ lazy_static! { ...@@ -461,13 +461,15 @@ lazy_static! {
R10.clone(), R10.clone(),
R11.clone(), R11.clone(),
// callee saved registers // callee saved registers
RBX.clone(),
R12.clone(), R12.clone(),
R13.clone(), R13.clone(),
R14.clone(), R14.clone(),
R15.clone(), R15.clone(),
]; ];
/// use RBX as thread local register
pub static ref THREAD_LOCAL_REG: P<Value> = RBX.clone();
/// all the usable floating point registers for reg allocator to assign /// all the usable floating point registers for reg allocator to assign
// order matters here (since register allocator will prioritize assigning temporaries // order matters here (since register allocator will prioritize assigning temporaries
// to a register that appears early) // to a register that appears early)
......
...@@ -23,6 +23,10 @@ use compiler::backend::RegGroup; ...@@ -23,6 +23,10 @@ use compiler::backend::RegGroup;
use std::sync::{RwLock, Arc}; use std::sync::{RwLock, Arc};
pub type EntryFuncSig = MuFuncSig; pub type EntryFuncSig = MuFuncSig;
lazy_static! {
pub static ref MU_TLS : MuName = Arc::new(String::from("mu_tls"));
}
pub struct RuntimeEntrypoint { pub struct RuntimeEntrypoint {
pub sig: P<MuFuncSig>, pub sig: P<MuFuncSig>,
pub aot: ValueLocation, pub aot: ValueLocation,
......