diff options
| author | Dean Michael Berris <dberris@google.com> | 2018-09-17 03:09:01 +0000 |
|---|---|---|
| committer | Dean Michael Berris <dberris@google.com> | 2018-09-17 03:09:01 +0000 |
| commit | 1a23d3bbce5ac663f92791b0d74bff70b7fb86ee (patch) | |
| tree | 624a2a16b91b05f5fca0faec170b1801b74ecba9 /compiler-rt/lib/xray/xray_buffer_queue.h | |
| parent | d5577aea07ccca29baf7f1fe22840cf188ad1688 (diff) | |
| download | bcm5719-llvm-1a23d3bbce5ac663f92791b0d74bff70b7fb86ee.tar.gz bcm5719-llvm-1a23d3bbce5ac663f92791b0d74bff70b7fb86ee.zip | |
[XRay] Simplify FDR buffer management
Summary:
This change makes XRay FDR mode use a single backing store for the
buffer queue, and have indexes into that backing store instead. We also
remove the reliance on the internal allocator implementation in the FDR
mode logging implementation.
In the process of making this change we found an inconsistency with the
way we're returning buffers to the queue, and how we're setting the
extents. We take the chance to simplify the way we're managing the
extents of each buffer. It turns out we do not need the indirection for
the extents, so we co-host the atomic 64-bit int with the buffer object.
It also seems that we've not been returning the buffers for the thread
running the flush functionality when writing out the files, so we can
run into a situation where we could be missing data.
We consolidate all the allocation routines now into xray_allocator.h,
where we used to have routines defined in xray_buffer_queue.cc.
Reviewers: mboerger, eizan
Subscribers: jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D52077
llvm-svn: 342356
Diffstat (limited to 'compiler-rt/lib/xray/xray_buffer_queue.h')
| -rw-r--r-- | compiler-rt/lib/xray/xray_buffer_queue.h | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/compiler-rt/lib/xray/xray_buffer_queue.h b/compiler-rt/lib/xray/xray_buffer_queue.h index e76fa7983c9..bfb091e3f83 100644 --- a/compiler-rt/lib/xray/xray_buffer_queue.h +++ b/compiler-rt/lib/xray/xray_buffer_queue.h @@ -18,6 +18,7 @@ #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_mutex.h" +#include "xray_defs.h" #include <cstddef> namespace __xray { @@ -29,14 +30,10 @@ namespace __xray { /// trace collection. class BufferQueue { public: - struct alignas(64) BufferExtents { - atomic_uint64_t Size; - }; - struct Buffer { + atomic_uint64_t Extents{0}; void *Data = nullptr; size_t Size = 0; - BufferExtents *Extents; }; struct BufferRep { @@ -76,8 +73,10 @@ private: T *operator->() const { return &(Buffers[Offset].Buff); } - Iterator(BufferRep *Root, size_t O, size_t M) - : Buffers(Root), Offset(O), Max(M) { + Iterator(BufferRep *Root, size_t O, size_t M) XRAY_NEVER_INSTRUMENT + : Buffers(Root), + Offset(O), + Max(M) { // We want to advance to the first Offset where the 'Used' property is // true, or to the end of the list/queue. while (!Buffers[Offset].Used && Offset != Max) { @@ -107,16 +106,18 @@ private: // Size of each individual Buffer. size_t BufferSize; - BufferRep *Buffers; - // Amount of pre-allocated buffers. size_t BufferCount; SpinMutex Mutex; atomic_uint8_t Finalizing; - // Pointers to buffers managed/owned by the BufferQueue. - void **OwnedBuffers; + // A pointer to a contiguous block of memory to serve as the backing store for + // all the individual buffers handed out. + void *BackingStore; + + // A dynamically allocated array of BufferRep instances. + BufferRep *Buffers; // Pointer to the next buffer to be handed out. BufferRep *Next; @@ -198,7 +199,7 @@ public: /// Applies the provided function F to each Buffer in the queue, only if the /// Buffer is marked 'used' (i.e. has been the result of getBuffer(...) and a /// releaseBuffer(...) operation). - template <class F> void apply(F Fn) { + template <class F> void apply(F Fn) XRAY_NEVER_INSTRUMENT { SpinMutexLock G(&Mutex); for (auto I = begin(), E = end(); I != E; ++I) Fn(*I); |

