summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/xray/tests/unit/segmented_array_test.cc
diff options
context:
space:
mode:
authorDean Michael Berris <dberris@google.com>2018-07-10 08:25:44 +0000
committerDean Michael Berris <dberris@google.com>2018-07-10 08:25:44 +0000
commit0dd4f9f22fdba48cd30d4546f67c9ac8de3f1372 (patch)
tree4a2172684cda45414f3f90e510def847e7ad9412 /compiler-rt/lib/xray/tests/unit/segmented_array_test.cc
parentd1bf9ef0c7926a64a202865a6b879190c9b4cf9c (diff)
downloadbcm5719-llvm-0dd4f9f22fdba48cd30d4546f67c9ac8de3f1372.tar.gz
bcm5719-llvm-0dd4f9f22fdba48cd30d4546f67c9ac8de3f1372.zip
[XRay][compiler-rt] xray::Array Freelist and Iterator Updates
Summary: We found a bug while working on a benchmark for the profiling mode which manifests as a segmentation fault in the profiling handler's implementation. This change adds unit tests which replicate the issues in isolation. We've tracked this down as a bug in the implementation of the Freelist in the `xray::Array` type. This happens when we trim the array by a number of elements, where we've been incorrectly assigning pointers for the links in the freelist of chunk nodes. We've taken the chance to add more debug-only assertions to the code path and allow us to verify these assumptions in debug builds. In the process, we also took the opportunity to use iterators to implement both `front()` and `back()` which exposes a bug in the iterator decrement operation. In particular, when we decrement past a chunk size boundary, we end up moving too far back and reaching the `SentinelChunk` prematurely. This change unblocks us to allow for contributing the non-crashing version of the benchmarks in the test-suite as well. Reviewers: kpw Subscribers: mgorny, llvm-commits Differential Revision: https://reviews.llvm.org/D48653 llvm-svn: 336644
Diffstat (limited to 'compiler-rt/lib/xray/tests/unit/segmented_array_test.cc')
-rw-r--r--compiler-rt/lib/xray/tests/unit/segmented_array_test.cc80
1 files changed, 66 insertions, 14 deletions
diff --git a/compiler-rt/lib/xray/tests/unit/segmented_array_test.cc b/compiler-rt/lib/xray/tests/unit/segmented_array_test.cc
index dd07e7f137b..539162d7d2f 100644
--- a/compiler-rt/lib/xray/tests/unit/segmented_array_test.cc
+++ b/compiler-rt/lib/xray/tests/unit/segmented_array_test.cc
@@ -107,32 +107,84 @@ TEST(SegmentedArrayTest, IteratorRetreat) {
}
TEST(SegmentedArrayTest, IteratorTrimBehaviour) {
- Array<TestData> data;
- ASSERT_TRUE(data.empty());
- auto I0Begin = data.begin(), I0End = data.end();
- // Add enough elements in data to have more than one chunk.
- constexpr auto ChunkX2 = Array<TestData>::ChunkSize * 2;
+ using AllocatorType = typename Array<TestData>::AllocatorType;
+ AllocatorType A(1 << 10, 0);
+ Array<TestData> Data(A);
+ ASSERT_TRUE(Data.empty());
+ auto I0Begin = Data.begin(), I0End = Data.end();
+ // Add enough elements in Data to have more than one chunk.
+ constexpr auto Chunk = Array<TestData>::ChunkSize;
+ constexpr auto ChunkX2 = Chunk * 2;
for (auto i = ChunkX2; i > 0u; --i) {
- data.Append({static_cast<s64>(i), static_cast<s64>(i)});
+ Data.AppendEmplace(static_cast<s64>(i), static_cast<s64>(i));
}
- ASSERT_EQ(data.size(), ChunkX2);
+ ASSERT_EQ(Data.size(), ChunkX2);
+ {
+ auto &Back = Data.back();
+ ASSERT_EQ(Back.First, 1);
+ ASSERT_EQ(Back.Second, 1);
+ }
+
// Trim one chunk's elements worth.
- data.trim(ChunkX2 / 2);
- ASSERT_EQ(data.size(), ChunkX2 / 2);
+ Data.trim(Chunk);
+ ASSERT_EQ(Data.size(), Chunk);
+
+ // Check that we are still able to access 'back' properly.
+ {
+ auto &Back = Data.back();
+ ASSERT_EQ(Back.First, static_cast<s64>(Chunk + 1));
+ ASSERT_EQ(Back.Second, static_cast<s64>(Chunk + 1));
+ }
+
// Then trim until it's empty.
- data.trim(ChunkX2 / 2);
- ASSERT_TRUE(data.empty());
+ Data.trim(Chunk);
+ ASSERT_TRUE(Data.empty());
// Here our iterators should be the same.
- auto I1Begin = data.begin(), I1End = data.end();
+ auto I1Begin = Data.begin(), I1End = Data.end();
EXPECT_EQ(I0Begin, I1Begin);
EXPECT_EQ(I0End, I1End);
// Then we ensure that adding elements back works just fine.
for (auto i = ChunkX2; i > 0u; --i) {
- data.Append({static_cast<s64>(i), static_cast<s64>(i)});
+ Data.AppendEmplace(static_cast<s64>(i), static_cast<s64>(i));
+ }
+ EXPECT_EQ(Data.size(), ChunkX2);
+}
+
+struct ShadowStackEntry {
+ uint64_t EntryTSC = 0;
+ uint64_t *NodePtr = nullptr;
+ ShadowStackEntry(uint64_t T, uint64_t *N) : EntryTSC(T), NodePtr(N) {}
+};
+
+TEST(SegmentedArrayTest, SimulateStackBehaviour) {
+ using AllocatorType = typename Array<ShadowStackEntry>::AllocatorType;
+ AllocatorType A(1 << 10, 0);
+ Array<ShadowStackEntry> Data(A);
+ static uint64_t Dummy = 0;
+ constexpr uint64_t Max = 9;
+
+ for (uint64_t i = 0; i < Max; ++i) {
+ auto P = Data.Append({i, &Dummy});
+ ASSERT_NE(P, nullptr);
+ ASSERT_EQ(P->NodePtr, &Dummy);
+ auto &Back = Data.back();
+ ASSERT_EQ(Back.NodePtr, &Dummy);
+ ASSERT_EQ(Back.EntryTSC, i);
+ }
+
+ // Simulate a stack by checking the data from the end as we're trimming.
+ auto Counter = Max;
+ ASSERT_EQ(Data.size(), size_t(Max));
+ while (!Data.empty()) {
+ const auto &Top = Data.back();
+ uint64_t *TopNode = Top.NodePtr;
+ EXPECT_EQ(TopNode, &Dummy) << "Counter = " << Counter;
+ Data.trim(1);
+ --Counter;
+ ASSERT_EQ(Data.size(), size_t(Counter));
}
- EXPECT_EQ(data.size(), ChunkX2);
}
} // namespace
OpenPOWER on IntegriCloud