summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler-rt/lib/xray/tests/unit/buffer_queue_test.cc13
-rw-r--r--compiler-rt/lib/xray/xray_buffer_queue.cc95
-rw-r--r--compiler-rt/lib/xray/xray_buffer_queue.h27
3 files changed, 75 insertions, 60 deletions
diff --git a/compiler-rt/lib/xray/tests/unit/buffer_queue_test.cc b/compiler-rt/lib/xray/tests/unit/buffer_queue_test.cc
index 8aa366a20df..a30343e188f 100644
--- a/compiler-rt/lib/xray/tests/unit/buffer_queue_test.cc
+++ b/compiler-rt/lib/xray/tests/unit/buffer_queue_test.cc
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "xray_buffer_queue.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <atomic>
@@ -19,9 +20,12 @@
#include <unistd.h>
namespace __xray {
+namespace {
static constexpr size_t kSize = 4096;
+using ::testing::Eq;
+
TEST(BufferQueueTest, API) {
bool Success = false;
BufferQueue Buffers(kSize, 1, Success);
@@ -58,8 +62,12 @@ TEST(BufferQueueTest, ReleaseUnknown) {
Buf.Data = reinterpret_cast<void *>(0xdeadbeef);
Buf.Size = kSize;
Buf.Generation = Buffers.generation();
- EXPECT_EQ(BufferQueue::ErrorCode::UnrecognizedBuffer,
- Buffers.releaseBuffer(Buf));
+
+ BufferQueue::Buffer Known;
+ EXPECT_THAT(Buffers.getBuffer(Known), Eq(BufferQueue::ErrorCode::Ok));
+ EXPECT_THAT(Buffers.releaseBuffer(Buf),
+ Eq(BufferQueue::ErrorCode::UnrecognizedBuffer));
+ EXPECT_THAT(Buffers.releaseBuffer(Known), Eq(BufferQueue::ErrorCode::Ok));
}
TEST(BufferQueueTest, ErrorsWhenFinalising) {
@@ -223,4 +231,5 @@ TEST(BufferQueueTest, GenerationalSupportAcrossThreads) {
ASSERT_EQ(Counter.load(std::memory_order_acquire), 0);
}
+} // namespace
} // namespace __xray
diff --git a/compiler-rt/lib/xray/xray_buffer_queue.cc b/compiler-rt/lib/xray/xray_buffer_queue.cc
index 3f08356c3a6..3cfe0bc3dd1 100644
--- a/compiler-rt/lib/xray/xray_buffer_queue.cc
+++ b/compiler-rt/lib/xray/xray_buffer_queue.cc
@@ -27,19 +27,30 @@ using namespace __sanitizer;
namespace {
-void decRefCount(unsigned char *ControlBlock, size_t Size, size_t Count) {
- if (ControlBlock == nullptr)
+BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {
+ auto B =
+ allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
+ return B == nullptr ? nullptr
+ : reinterpret_cast<BufferQueue::ControlBlock *>(B);
+}
+
+void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,
+ size_t Count) {
+ deallocateBuffer(reinterpret_cast<unsigned char *>(C),
+ (sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
+}
+
+void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {
+ if (C == nullptr)
return;
- auto *RefCount = reinterpret_cast<atomic_uint64_t *>(ControlBlock);
- if (atomic_fetch_sub(RefCount, 1, memory_order_acq_rel) == 1)
- deallocateBuffer(ControlBlock, (Size * Count) + kCacheLineSize);
+ if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)
+ deallocControlBlock(C, Size, Count);
}
-void incRefCount(unsigned char *ControlBlock) {
- if (ControlBlock == nullptr)
+void incRefCount(BufferQueue::ControlBlock *C) {
+ if (C == nullptr)
return;
- auto *RefCount = reinterpret_cast<atomic_uint64_t *>(ControlBlock);
- atomic_fetch_add(RefCount, 1, memory_order_acq_rel);
+ atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);
}
} // namespace
@@ -55,14 +66,15 @@ BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
bool Success = false;
BufferSize = BS;
BufferCount = BC;
- BackingStore = allocateBuffer((BufferSize * BufferCount) + kCacheLineSize);
+
+ BackingStore = allocControlBlock(BufferSize, BufferCount);
if (BackingStore == nullptr)
return BufferQueue::ErrorCode::NotEnoughMemory;
auto CleanupBackingStore = __sanitizer::at_scope_exit([&, this] {
if (Success)
return;
- deallocateBuffer(BackingStore, (BufferSize * BufferCount) + kCacheLineSize);
+ deallocControlBlock(BackingStore, BufferSize, BufferCount);
BackingStore = nullptr;
});
@@ -74,19 +86,19 @@ BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
// to the new generation.
atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
- Success = true;
-
- // First, we initialize the refcount in the RefCountedBackingStore, which we
- // treat as being at the start of the BackingStore pointer.
- auto ControlBlock = reinterpret_cast<atomic_uint64_t *>(BackingStore);
- atomic_store(ControlBlock, 1, memory_order_release);
+ // First, we initialize the refcount in the ControlBlock, which we treat as
+ // being at the start of the BackingStore pointer.
+ atomic_store(&BackingStore->RefCount, 1, memory_order_release);
+ // Then we initialise the individual buffers that sub-divide the whole backing
+ // store. Each buffer will start at the `Data` member of the ControlBlock, and
+ // will be offsets from these locations.
for (size_t i = 0; i < BufferCount; ++i) {
auto &T = Buffers[i];
auto &Buf = T.Buff;
atomic_store(&Buf.Extents, 0, memory_order_release);
Buf.Generation = generation();
- Buf.Data = BackingStore + kCacheLineSize + (BufferSize * i);
+ Buf.Data = &BackingStore->Data + (BufferSize * i);
Buf.Size = BufferSize;
Buf.BackingStore = BackingStore;
Buf.Count = BufferCount;
@@ -97,6 +109,7 @@ BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
First = Buffers;
LiveBuffers = 0;
atomic_store(&Finalizing, 0, memory_order_release);
+ Success = true;
return BufferQueue::ErrorCode::Ok;
}
@@ -131,11 +144,8 @@ BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
}
incRefCount(BackingStore);
- Buf.Data = B->Buff.Data;
+ Buf = B->Buff;
Buf.Generation = generation();
- Buf.Size = B->Buff.Size;
- Buf.BackingStore = BackingStore;
- Buf.Count = BufferCount;
B->Used = true;
return ErrorCode::Ok;
}
@@ -146,31 +156,16 @@ BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
BufferRep *B = nullptr;
{
SpinMutexLock Guard(&Mutex);
- if (Buf.Data < BackingStore ||
- Buf.Data > reinterpret_cast<char *>(BackingStore) +
- (BufferCount * BufferSize)) {
- if (Buf.Generation != generation()) {
- decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
- Buf.Data = nullptr;
- Buf.Size = 0;
- Buf.Generation = 0;
- Buf.Count = 0;
- Buf.BackingStore = nullptr;
- return BufferQueue::ErrorCode::Ok;
- }
- return BufferQueue::ErrorCode::UnrecognizedBuffer;
- }
-
- if (LiveBuffers == 0) {
+ if (Buf.Generation != generation() || LiveBuffers == 0) {
+ Buf = {};
decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
- Buf.Data = nullptr;
- Buf.Size = Buf.Size;
- Buf.Generation = 0;
- Buf.BackingStore = nullptr;
- Buf.Count = 0;
- return ErrorCode::Ok;
+ return BufferQueue::ErrorCode::Ok;
}
+ if (Buf.Data < &BackingStore->Data ||
+ Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))
+ return BufferQueue::ErrorCode::UnrecognizedBuffer;
+
--LiveBuffers;
B = First++;
if (First == (Buffers + BufferCount))
@@ -178,21 +173,13 @@ BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
}
// Now that the buffer has been released, we mark it as "used".
- B->Buff.Data = Buf.Data;
- B->Buff.Size = Buf.Size;
- B->Buff.Generation = Buf.Generation;
- B->Buff.BackingStore = Buf.BackingStore;
- B->Buff.Count = Buf.Count;
+ B->Buff = Buf;
B->Used = true;
decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
atomic_store(&B->Buff.Extents,
atomic_load(&Buf.Extents, memory_order_acquire),
memory_order_release);
- Buf.Data = nullptr;
- Buf.Size = 0;
- Buf.Generation = 0;
- Buf.BackingStore = nullptr;
- Buf.Count = 0;
+ Buf = {};
return ErrorCode::Ok;
}
diff --git a/compiler-rt/lib/xray/xray_buffer_queue.h b/compiler-rt/lib/xray/xray_buffer_queue.h
index 91e104e5eb5..a60f7c18eed 100644
--- a/compiler-rt/lib/xray/xray_buffer_queue.h
+++ b/compiler-rt/lib/xray/xray_buffer_queue.h
@@ -31,6 +31,26 @@ namespace __xray {
/// trace collection.
class BufferQueue {
public:
+ /// ControlBlock represents the memory layout of how we interpret the backing
+ /// store for all buffers managed by a BufferQueue instance. The ControlBlock
+ /// has the reference count as the first member, sized according to
+ /// platform-specific cache-line size. We never use the Buffer member of the
+ /// union, which is only there for compiler-supported alignment and sizing.
+ ///
+ /// This ensures that the `Data` member will be placed at least kCacheLineSize
+ /// bytes from the beginning of the structure.
+ struct ControlBlock {
+ union {
+ atomic_uint64_t RefCount;
+ char Buffer[kCacheLineSize];
+ };
+
+ /// We need to make this size 1, to conform to the C++ rules for array data
+ /// members. Typically, we want to subtract this 1 byte for sizing
+ /// information.
+ char Data[1];
+ };
+
struct Buffer {
atomic_uint64_t Extents{0};
uint64_t Generation{0};
@@ -39,7 +59,7 @@ public:
private:
friend class BufferQueue;
- unsigned char *BackingStore = nullptr;
+ ControlBlock *BackingStore = nullptr;
size_t Count = 0;
};
@@ -119,9 +139,8 @@ private:
SpinMutex Mutex;
atomic_uint8_t Finalizing;
- // A pointer to a contiguous block of memory to serve as the backing store for
- // all the individual buffers handed out.
- uint8_t *BackingStore;
+ // The collocated ControlBlock and buffer storage.
+ ControlBlock *BackingStore;
// A dynamically allocated array of BufferRep instances.
BufferRep *Buffers;
OpenPOWER on IntegriCloud