summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/xray/xray_interface.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib/xray/xray_interface.cc')
-rw-r--r--compiler-rt/lib/xray/xray_interface.cc51
1 files changed, 38 insertions, 13 deletions
diff --git a/compiler-rt/lib/xray/xray_interface.cc b/compiler-rt/lib/xray/xray_interface.cc
index ccbe7e2f2f7..5ef3fc7aac9 100644
--- a/compiler-rt/lib/xray/xray_interface.cc
+++ b/compiler-rt/lib/xray/xray_interface.cc
@@ -82,6 +82,8 @@ int __xray_set_handler(void (*entry)(int32_t, XRayEntryType)) {
return 0;
}
+int __xray_remove_handler() { return __xray_set_handler(nullptr); }
+
std::atomic<bool> XRayPatching{false};
using namespace __xray;
@@ -104,7 +106,10 @@ template <class Function> CleanupInvoker<Function> ScopeCleanup(Function Fn) {
return CleanupInvoker<Function>{Fn};
}
-XRayPatchingStatus __xray_patch() {
+// ControlPatching implements the common internals of the patching/unpatching
+// implementation. |Enable| defines whether we're enabling or disabling the
+// runtime XRay instrumentation.
+XRayPatchingStatus ControlPatching(bool Enable) {
if (!XRayInitialized.load(std::memory_order_acquire))
return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
@@ -131,7 +136,9 @@ XRayPatchingStatus __xray_patch() {
int32_t FuncId = 1;
static constexpr uint8_t CallOpCode = 0xe8;
static constexpr uint16_t MovR10Seq = 0xba41;
+ static constexpr uint16_t Jmp9Seq = 0x09eb;
static constexpr uint8_t JmpOpCode = 0xe9;
+ static constexpr uint8_t RetOpCode = 0xc3;
uint64_t CurFun = 0;
for (std::size_t I = 0; I < InstrMap.Entries; I++) {
auto Sled = InstrMap.Sleds[I];
@@ -191,12 +198,19 @@ XRayPatchingStatus __xray_patch() {
TrampolineOffset);
continue;
}
- *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
- *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
- *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
- std::memory_order_release);
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
}
if (Sled.Kind == XRayEntryType::EXIT) {
@@ -231,15 +245,26 @@ XRayPatchingStatus __xray_patch() {
TrampolineOffset);
continue;
}
- *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
- *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode;
- *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
- std::memory_order_release);
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint8_t> *>(Sled.Address), RetOpCode,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
}
}
XRayPatching.store(false, std::memory_order_release);
PatchingSuccess = true;
return XRayPatchingStatus::SUCCESS;
}
+
+XRayPatchingStatus __xray_patch() { return ControlPatching(true); }
+
+XRayPatchingStatus __xray_unpatch() { return ControlPatching(false); }
OpenPOWER on IntegriCloud