summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/xray/xray_buffer_queue.h
diff options
context:
space:
mode:
authorDean Michael Berris <dberris@google.com>2018-11-20 01:00:26 +0000
committerDean Michael Berris <dberris@google.com>2018-11-20 01:00:26 +0000
commitba02cb58cf2ae104e6cf2c8b3d51e70cc027a470 (patch)
tree86d5687f80d3610d0b0459393a84dedde9eb779d /compiler-rt/lib/xray/xray_buffer_queue.h
parent8e0e35a3f5b7469f162ab14777e9cc22455214a0 (diff)
downloadbcm5719-llvm-ba02cb58cf2ae104e6cf2c8b3d51e70cc027a470.tar.gz
bcm5719-llvm-ba02cb58cf2ae104e6cf2c8b3d51e70cc027a470.zip
[XRay] Move buffer extents back to the heap
Summary: This change addresses an issue which shows up with the synchronised race between threads writing into a buffer, and another thread reading the buffer. In a lot of cases, we cannot guarantee that threads will always see the signal to finalise their buffers in time despite the grace periods and state machine maintained through atomic variables. This change addresses it by ensuring that the same instance being updated to indicate how much of the buffer is "used" by the writing thread is the same instance being read by the thread processing the buffer to be written out to disk or handled through the iterators. To do this, we ensure that all the "extents" instances live in their own the backing store, in a different contiguous page from the buffer-specific backing store. We also take precautions to ensure that the atomic variables are cache-line-sized to prevent false-sharing from unnecessarily causing cache contention on unrelated writes/reads. It's feasible that we may in the future be able to move the storage of the extents objects into the single backing store, slightly changing the way to compute the size(s) of the buffers, but in the meantime we'll settle for the isolation afforded by having a different backing store for the extents instances. Reviewers: mboerger Subscribers: jfb, llvm-commits Differential Revision: https://reviews.llvm.org/D54684 llvm-svn: 347280
Diffstat (limited to 'compiler-rt/lib/xray/xray_buffer_queue.h')
-rw-r--r--compiler-rt/lib/xray/xray_buffer_queue.h15
1 files changed, 10 insertions, 5 deletions
diff --git a/compiler-rt/lib/xray/xray_buffer_queue.h b/compiler-rt/lib/xray/xray_buffer_queue.h
index a60f7c18eed..ef2b433f9a3 100644
--- a/compiler-rt/lib/xray/xray_buffer_queue.h
+++ b/compiler-rt/lib/xray/xray_buffer_queue.h
@@ -32,10 +32,11 @@ namespace __xray {
class BufferQueue {
public:
/// ControlBlock represents the memory layout of how we interpret the backing
- /// store for all buffers managed by a BufferQueue instance. The ControlBlock
- /// has the reference count as the first member, sized according to
- /// platform-specific cache-line size. We never use the Buffer member of the
- /// union, which is only there for compiler-supported alignment and sizing.
+ /// store for all buffers and extents managed by a BufferQueue instance. The
+ /// ControlBlock has the reference count as the first member, sized according
+ /// to platform-specific cache-line size. We never use the Buffer member of
+ /// the union, which is only there for compiler-supported alignment and
+ /// sizing.
///
/// This ensures that the `Data` member will be placed at least kCacheLineSize
/// bytes from the beginning of the structure.
@@ -52,7 +53,7 @@ public:
};
struct Buffer {
- atomic_uint64_t Extents{0};
+ atomic_uint64_t *Extents = nullptr;
uint64_t Generation{0};
void *Data = nullptr;
size_t Size = 0;
@@ -60,6 +61,7 @@ public:
private:
friend class BufferQueue;
ControlBlock *BackingStore = nullptr;
+ ControlBlock *ExtentsBackingStore = nullptr;
size_t Count = 0;
};
@@ -142,6 +144,9 @@ private:
// The collocated ControlBlock and buffer storage.
ControlBlock *BackingStore;
+ // The collocated ControlBlock and extents storage.
+ ControlBlock *ExtentsBackingStore;
+
// A dynamically allocated array of BufferRep instances.
BufferRep *Buffers;
OpenPOWER on IntegriCloud