summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/xray/xray_buffer_queue.cc
diff options
context:
space:
mode:
authorDean Michael Berris <dberris@google.com>2018-10-22 04:53:58 +0000
committerDean Michael Berris <dberris@google.com>2018-10-22 04:53:58 +0000
commit788b17ca78e25f7eb079ce7836ce45c1dde3bb67 (patch)
treeb2d4d0db309b675e243c441b58a1fc822f3da7f7 /compiler-rt/lib/xray/xray_buffer_queue.cc
parentebfbf89000f7b698e502dcb8a8a5d8bd2ec2742f (diff)
downloadbcm5719-llvm-788b17ca78e25f7eb079ce7836ce45c1dde3bb67.tar.gz
bcm5719-llvm-788b17ca78e25f7eb079ce7836ce45c1dde3bb67.zip
[XRay][compiler-rt] Generational Buffer Management
Summary: This change updates the buffer queue implementation to support using a generation number to identify the lifetime of buffers. This first part introduces the notion of the generation number, without changing the way we handle the buffers yet. What's missing here is the cleanup of the buffers. Ideally we'll keep the two most recent generations. We need to ensure that before we do any writes to the buffers, that we check the generation number(s) first. Those changes will follow-on from this change. Depends on D52588. Reviewers: mboerger, eizan Subscribers: llvm-commits, jfb Differential Revision: https://reviews.llvm.org/D52974 llvm-svn: 344881
Diffstat (limited to 'compiler-rt/lib/xray/xray_buffer_queue.cc')
-rw-r--r--compiler-rt/lib/xray/xray_buffer_queue.cc157
1 files changed, 100 insertions, 57 deletions
diff --git a/compiler-rt/lib/xray/xray_buffer_queue.cc b/compiler-rt/lib/xray/xray_buffer_queue.cc
index 5a88ecd3399..fd41e5ff942 100644
--- a/compiler-rt/lib/xray/xray_buffer_queue.cc
+++ b/compiler-rt/lib/xray/xray_buffer_queue.cc
@@ -24,89 +24,132 @@
using namespace __xray;
using namespace __sanitizer;
-BufferQueue::BufferQueue(size_t B, size_t N,
- bool &Success) XRAY_NEVER_INSTRUMENT
- : BufferSize(B),
- BufferCount(N),
- Mutex(),
- Finalizing{0},
- BackingStore(allocateBuffer(B *N)),
- Buffers(initArray<BufferQueue::BufferRep>(N)),
- Next(Buffers),
- First(Buffers),
- LiveBuffers(0) {
- if (BackingStore == nullptr) {
- Success = false;
- return;
- }
- if (Buffers == nullptr) {
+BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
+ SpinMutexLock Guard(&Mutex);
+
+ if (!finalizing())
+ return BufferQueue::ErrorCode::AlreadyInitialized;
+
+ bool Success = false;
+ BufferSize = BS;
+ BufferCount = BC;
+ BackingStore = allocateBuffer(BufferSize * BufferCount);
+ if (BackingStore == nullptr)
+ return BufferQueue::ErrorCode::NotEnoughMemory;
+
+ auto CleanupBackingStore = __sanitizer::at_scope_exit([&, this] {
+ if (Success)
+ return;
deallocateBuffer(BackingStore, BufferSize * BufferCount);
- Success = false;
- return;
- }
+ });
+
+ Buffers = initArray<BufferRep>(BufferCount);
+ if (Buffers == nullptr)
+ return BufferQueue::ErrorCode::NotEnoughMemory;
- for (size_t i = 0; i < N; ++i) {
+ // At this point we increment the generation number to associate the buffers
+ // to the new generation.
+ atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
+
+ Success = true;
+ for (size_t i = 0; i < BufferCount; ++i) {
auto &T = Buffers[i];
auto &Buf = T.Buff;
- Buf.Data = reinterpret_cast<char *>(BackingStore) + (BufferSize * i);
- Buf.Size = B;
atomic_store(&Buf.Extents, 0, memory_order_release);
+ Buf.Generation = generation();
+ Buf.Data = reinterpret_cast<char *>(BackingStore) + (BufferSize * i);
+ Buf.Size = BufferSize;
T.Used = false;
}
- Success = true;
+
+ Next = Buffers;
+ First = Buffers;
+ LiveBuffers = 0;
+ atomic_store(&Finalizing, 0, memory_order_release);
+ return BufferQueue::ErrorCode::Ok;
+}
+
+BufferQueue::BufferQueue(size_t B, size_t N,
+ bool &Success) XRAY_NEVER_INSTRUMENT
+ : BufferSize(B),
+ BufferCount(N),
+ Mutex(),
+ Finalizing{1},
+ BackingStore(nullptr),
+ Buffers(nullptr),
+ Next(Buffers),
+ First(Buffers),
+ LiveBuffers(0),
+ Generation{0} {
+ Success = init(B, N) == BufferQueue::ErrorCode::Ok;
}
BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
if (atomic_load(&Finalizing, memory_order_acquire))
return ErrorCode::QueueFinalizing;
- SpinMutexLock Guard(&Mutex);
- if (LiveBuffers == BufferCount)
- return ErrorCode::NotEnoughMemory;
-
- auto &T = *Next;
- auto &B = T.Buff;
- auto Extents = atomic_load(&B.Extents, memory_order_acquire);
- atomic_store(&Buf.Extents, Extents, memory_order_release);
- Buf.Data = B.Data;
- Buf.Size = B.Size;
- T.Used = true;
- ++LiveBuffers;
-
- if (++Next == (Buffers + BufferCount))
- Next = Buffers;
+ BufferRep *B = nullptr;
+ {
+ SpinMutexLock Guard(&Mutex);
+ if (LiveBuffers == BufferCount)
+ return ErrorCode::NotEnoughMemory;
+ B = Next++;
+ if (Next == (Buffers + BufferCount))
+ Next = Buffers;
+ ++LiveBuffers;
+ }
+ Buf.Data = B->Buff.Data;
+ Buf.Generation = generation();
+ Buf.Size = B->Buff.Size;
+ B->Used = true;
return ErrorCode::Ok;
}
BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
// Check whether the buffer being referred to is within the bounds of the
// backing store's range.
- if (Buf.Data < BackingStore ||
- Buf.Data >
- reinterpret_cast<char *>(BackingStore) + (BufferCount * BufferSize))
- return ErrorCode::UnrecognizedBuffer;
+ BufferRep *B = nullptr;
+ {
+ SpinMutexLock Guard(&Mutex);
+ if (Buf.Data < BackingStore ||
+ Buf.Data > reinterpret_cast<char *>(BackingStore) +
+ (BufferCount * BufferSize)) {
+ if (Buf.Generation != generation()) {
+ Buf.Data = nullptr;
+ Buf.Size = 0;
+ Buf.Generation = 0;
+ return BufferQueue::ErrorCode::Ok;
+ }
+ return BufferQueue::ErrorCode::UnrecognizedBuffer;
+ }
- SpinMutexLock Guard(&Mutex);
+ // This points to a semantic bug, we really ought to not be releasing more
+ // buffers than we actually get.
+ if (LiveBuffers == 0) {
+ Buf.Data = nullptr;
+ Buf.Size = Buf.Size;
+ Buf.Generation = 0;
+ return ErrorCode::NotEnoughMemory;
+ }
- // This points to a semantic bug, we really ought to not be releasing more
- // buffers than we actually get.
- if (LiveBuffers == 0)
- return ErrorCode::NotEnoughMemory;
+ --LiveBuffers;
+ B = First++;
+ if (First == (Buffers + BufferCount))
+ First = Buffers;
+ }
// Now that the buffer has been released, we mark it as "used".
- auto Extents = atomic_load(&Buf.Extents, memory_order_acquire);
- atomic_store(&First->Buff.Extents, Extents, memory_order_release);
- First->Buff.Data = Buf.Data;
- First->Buff.Size = Buf.Size;
- First->Used = true;
+ B->Buff.Data = Buf.Data;
+ B->Buff.Size = Buf.Size;
+ B->Buff.Generation = Buf.Generation;
+ B->Used = true;
+ atomic_store(&B->Buff.Extents,
+ atomic_load(&Buf.Extents, memory_order_acquire),
+ memory_order_release);
Buf.Data = nullptr;
Buf.Size = 0;
- atomic_store(&Buf.Extents, 0, memory_order_release);
- --LiveBuffers;
- if (++First == (Buffers + BufferCount))
- First = Buffers;
-
+ Buf.Generation = 0;
return ErrorCode::Ok;
}
OpenPOWER on IntegriCloud