summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/xray/xray_interface.cc
diff options
context:
space:
mode:
authorDean Michael Berris <dberris@google.com>2018-06-05 06:12:42 +0000
committerDean Michael Berris <dberris@google.com>2018-06-05 06:12:42 +0000
commit5eaaff60954f37b6db04319c4a0b65da9cd88d21 (patch)
treec1096a3591520481da89bd1fe9ed896b3b392e5c /compiler-rt/lib/xray/xray_interface.cc
parent8ba925d2c35e55a48c8817194e333c9599304a78 (diff)
downloadbcm5719-llvm-5eaaff60954f37b6db04319c4a0b65da9cd88d21.tar.gz
bcm5719-llvm-5eaaff60954f37b6db04319c4a0b65da9cd88d21.zip
[XRay][compiler-rt] Remove __sanitizer:: from namespace __xray (NFC)
This is a non-functional change that removes the full qualification of functions in __sanitizer:: being used in __xray. llvm-svn: 333983
Diffstat (limited to 'compiler-rt/lib/xray/xray_interface.cc')
-rw-r--r--compiler-rt/lib/xray/xray_interface.cc90
1 files changed, 45 insertions, 45 deletions
diff --git a/compiler-rt/lib/xray/xray_interface.cc b/compiler-rt/lib/xray/xray_interface.cc
index a8aa630551d..01bf6ddc607 100644
--- a/compiler-rt/lib/xray/xray_interface.cc
+++ b/compiler-rt/lib/xray/xray_interface.cc
@@ -51,31 +51,31 @@ static const int16_t cSledLength = 8;
#endif /* CPU architecture */
// This is the function to call when we encounter the entry or exit sleds.
-__sanitizer::atomic_uintptr_t XRayPatchedFunction{0};
+atomic_uintptr_t XRayPatchedFunction{0};
// This is the function to call from the arg1-enabled sleds/trampolines.
-__sanitizer::atomic_uintptr_t XRayArgLogger{0};
+atomic_uintptr_t XRayArgLogger{0};
// This is the function to call when we encounter a custom event log call.
-__sanitizer::atomic_uintptr_t XRayPatchedCustomEvent{0};
+atomic_uintptr_t XRayPatchedCustomEvent{0};
// This is the function to call when we encounter a typed event log call.
-__sanitizer::atomic_uintptr_t XRayPatchedTypedEvent{0};
+atomic_uintptr_t XRayPatchedTypedEvent{0};
// This is the global status to determine whether we are currently
// patching/unpatching.
-__sanitizer::atomic_uint8_t XRayPatching{0};
+atomic_uint8_t XRayPatching{0};
struct TypeDescription {
uint32_t type_id;
std::size_t description_string_length;
};
-using TypeDescriptorMapType = __sanitizer::AddrHashMap<TypeDescription, 11>;
+using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
// An address map from immutable descriptors to type ids.
TypeDescriptorMapType TypeDescriptorAddressMap{};
-__sanitizer::atomic_uint32_t TypeEventDescriptorCounter{0};
+atomic_uint32_t TypeEventDescriptorCounter{0};
// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
// undo any successful mprotect(...) changes. This is used to make a page
@@ -145,19 +145,19 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable,
XRayPatchingStatus patchFunction(int32_t FuncId,
bool Enable) XRAY_NEVER_INSTRUMENT {
- if (!__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire))
+ if (!atomic_load(&XRayInitialized,
+ memory_order_acquire))
return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
uint8_t NotPatching = false;
- if (!__sanitizer::atomic_compare_exchange_strong(
- &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel))
+ if (!atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, memory_order_acq_rel))
return XRayPatchingStatus::ONGOING; // Already patching.
// Next, we look for the function index.
XRaySledMap InstrMap;
{
- __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ SpinMutexLock Guard(&XRayInstrMapMutex);
InstrMap = XRayInstrMap;
}
@@ -181,8 +181,8 @@ XRayPatchingStatus patchFunction(int32_t FuncId,
while (f != e)
SucceedOnce |= patchSled(*f++, Enable, FuncId);
- __sanitizer::atomic_store(&XRayPatching, false,
- __sanitizer::memory_order_release);
+ atomic_store(&XRayPatching, false,
+ memory_order_release);
if (!SucceedOnce) {
Report("Failed patching any sled for function '%d'.", FuncId);
@@ -196,26 +196,26 @@ XRayPatchingStatus patchFunction(int32_t FuncId,
// implementation. |Enable| defines whether we're enabling or disabling the
// runtime XRay instrumentation.
XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
- if (!__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire))
+ if (!atomic_load(&XRayInitialized,
+ memory_order_acquire))
return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
uint8_t NotPatching = false;
- if (!__sanitizer::atomic_compare_exchange_strong(
- &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel))
+ if (!atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, memory_order_acq_rel))
return XRayPatchingStatus::ONGOING; // Already patching.
uint8_t PatchingSuccess = false;
auto XRayPatchingStatusResetter =
- __sanitizer::at_scope_exit([&PatchingSuccess] {
+ at_scope_exit([&PatchingSuccess] {
if (!PatchingSuccess)
- __sanitizer::atomic_store(&XRayPatching, false,
- __sanitizer::memory_order_release);
+ atomic_store(&XRayPatching, false,
+ memory_order_release);
});
XRaySledMap InstrMap;
{
- __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ SpinMutexLock Guard(&XRayInstrMapMutex);
InstrMap = XRayInstrMap;
}
if (InstrMap.Entries == 0)
@@ -271,8 +271,8 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
}
patchSled(Sled, Enable, FuncId);
}
- __sanitizer::atomic_store(&XRayPatching, false,
- __sanitizer::memory_order_release);
+ atomic_store(&XRayPatching, false,
+ memory_order_release);
PatchingSuccess = true;
return XRayPatchingStatus::SUCCESS;
}
@@ -281,7 +281,7 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
bool Enable) XRAY_NEVER_INSTRUMENT {
XRaySledMap InstrMap;
{
- __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ SpinMutexLock Guard(&XRayInstrMapMutex);
InstrMap = XRayInstrMap;
}
@@ -338,12 +338,12 @@ using namespace __xray;
int __xray_set_handler(void (*entry)(int32_t,
XRayEntryType)) XRAY_NEVER_INSTRUMENT {
- if (__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire)) {
+ if (atomic_load(&XRayInitialized,
+ memory_order_acquire)) {
- __sanitizer::atomic_store(&__xray::XRayPatchedFunction,
+ atomic_store(&__xray::XRayPatchedFunction,
reinterpret_cast<uintptr_t>(entry),
- __sanitizer::memory_order_release);
+ memory_order_release);
return 1;
}
return 0;
@@ -351,11 +351,11 @@ int __xray_set_handler(void (*entry)(int32_t,
int __xray_set_customevent_handler(void (*entry)(void *, size_t))
XRAY_NEVER_INSTRUMENT {
- if (__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire)) {
- __sanitizer::atomic_store(&__xray::XRayPatchedCustomEvent,
+ if (atomic_load(&XRayInitialized,
+ memory_order_acquire)) {
+ atomic_store(&__xray::XRayPatchedCustomEvent,
reinterpret_cast<uintptr_t>(entry),
- __sanitizer::memory_order_release);
+ memory_order_release);
return 1;
}
return 0;
@@ -363,11 +363,11 @@ int __xray_set_customevent_handler(void (*entry)(void *, size_t))
int __xray_set_typedevent_handler(void (*entry)(
uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
- if (__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire)) {
- __sanitizer::atomic_store(&__xray::XRayPatchedTypedEvent,
+ if (atomic_load(&XRayInitialized,
+ memory_order_acquire)) {
+ atomic_store(&__xray::XRayPatchedTypedEvent,
reinterpret_cast<uintptr_t>(entry),
- __sanitizer::memory_order_release);
+ memory_order_release);
return 1;
}
return 0;
@@ -389,8 +389,8 @@ uint16_t __xray_register_event_type(
const char *const event_type) XRAY_NEVER_INSTRUMENT {
TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
if (h.created()) {
- h->type_id = __sanitizer::atomic_fetch_add(
- &TypeEventDescriptorCounter, 1, __sanitizer::memory_order_acq_rel);
+ h->type_id = atomic_fetch_add(
+ &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
h->description_string_length = strnlen(event_type, 1024);
}
return h->type_id;
@@ -414,22 +414,22 @@ __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
}
int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
- if (!__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire))
+ if (!atomic_load(&XRayInitialized,
+ memory_order_acquire))
return 0;
// A relaxed write might not be visible even if the current thread gets
// scheduled on a different CPU/NUMA node. We need to wait for everyone to
// have this handler installed for consistency of collected data across CPUs.
- __sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
- __sanitizer::memory_order_release);
+ atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
+ memory_order_release);
return 1;
}
int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
- __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ SpinMutexLock Guard(&XRayInstrMapMutex);
if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
return 0;
return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function
@@ -443,6 +443,6 @@ uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
}
size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
- __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ SpinMutexLock Guard(&XRayInstrMapMutex);
return XRayInstrMap.Functions;
}
OpenPOWER on IntegriCloud