summaryrefslogtreecommitdiffstats
path: root/llvm/unittests/XRay
diff options
context:
space:
mode:
authorDean Michael Berris <dberris@google.com>2018-08-30 01:43:22 +0000
committerDean Michael Berris <dberris@google.com>2018-08-30 01:43:22 +0000
commitf6c87eb9651dfdf0f9c9b35a08258be152468e16 (patch)
treebb0e2a238abdc457b4d691ef98e04ce015b32819 /llvm/unittests/XRay
parent6518929569f102bafbfd2b85df4a49642a2704df (diff)
downloadbcm5719-llvm-f6c87eb9651dfdf0f9c9b35a08258be152468e16.tar.gz
bcm5719-llvm-f6c87eb9651dfdf0f9c9b35a08258be152468e16.zip
[XRay][llvm] Load XRay Profiles
Summary: This change implements the profile loading functionality in LLVM to support XRay's profiling mode in compiler-rt. We introduce a type named `llvm::xray::Profile` which allows building a profile representation. We can load an XRay profile from a file to build Profile instances, or do it manually through the Profile type's API. The intent is to get the `llvm-xray` tool to generate `Profile` instances and use that as the common abstraction through which all conversion and analysis can be done. In the future we can generate `Profile` instances from `Trace` instances as well, through conversion functions. Some of the key operations supported by the `Profile` API are: - Path interning (`Profile::internPath(...)`) which returns a unique path identifier. - Block appending (`Profile::addBlock(...)`) to add thread-associated profile information. - Path ID to Path lookup (`Profile::expandPath(...)`) to look up a PathID and return the original interned path. - Block iteration. A 'Path' in this context represents the function call stack in leaf-to-root order. This is represented as a path in an internally managed prefix tree in the `Profile` instance. Having a handle (PathID) to identify the unique Paths we encounter for a particular Profile allows us to reduce the amount of memory required to associate profile data to a particular Path. This is the first of a series of patches to migrate the `llvm-stacks` tool towards using a single profile representation. Depends on D48653. Reviewers: kpw, eizan Reviewed By: kpw Subscribers: kpw, thakis, mgorny, llvm-commits, hiraditya Differential Revision: https://reviews.llvm.org/D48370 llvm-svn: 341012
Diffstat (limited to 'llvm/unittests/XRay')
-rw-r--r--llvm/unittests/XRay/CMakeLists.txt2
-rw-r--r--llvm/unittests/XRay/ProfileTest.cpp267
2 files changed, 269 insertions, 0 deletions
diff --git a/llvm/unittests/XRay/CMakeLists.txt b/llvm/unittests/XRay/CMakeLists.txt
index 9872c901d13..89e15047161 100644
--- a/llvm/unittests/XRay/CMakeLists.txt
+++ b/llvm/unittests/XRay/CMakeLists.txt
@@ -1,9 +1,11 @@
set(LLVM_LINK_COMPONENTS
Support
+ XRay
)
add_llvm_unittest(XRayTests
GraphTest.cpp
+ ProfileTest.cpp
)
add_dependencies(XRayTests intrinsics_gen)
diff --git a/llvm/unittests/XRay/ProfileTest.cpp b/llvm/unittests/XRay/ProfileTest.cpp
new file mode 100644
index 00000000000..8486b7b27e8
--- /dev/null
+++ b/llvm/unittests/XRay/ProfileTest.cpp
@@ -0,0 +1,267 @@
+//===- ProfileTest.cpp - XRay Profile unit tests ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/Profile.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include <numeric>
+
+namespace llvm {
+namespace xray {
+namespace {
+
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::Eq;
+using ::testing::Field;
+using ::testing::Not;
+using ::testing::Pair;
+using ::testing::UnorderedElementsAre;
+
+TEST(ProfileTest, CreateProfile) { Profile P; }
+
+TEST(ProfileTest, InternPath) {
+ Profile P;
+ auto Path0 = P.internPath({3, 2, 1});
+ auto Path1 = P.internPath({3, 2, 1});
+ auto Path2 = P.internPath({2, 1});
+ EXPECT_THAT(Path0, Eq(Path1));
+ EXPECT_THAT(Path0, Not(Eq(Path2)));
+}
+
+TEST(ProfileTest, ExpandPath) {
+ Profile P;
+ auto PathID = P.internPath({3, 2, 1});
+ auto PathOrError = P.expandPath(PathID);
+ if (!PathOrError)
+ FAIL() << "Error: " << PathOrError.takeError();
+ EXPECT_THAT(PathOrError.get(), ElementsAre(3, 2, 1));
+}
+
+TEST(ProfileTest, AddBlocks) {
+ Profile P;
+ // Expect an error on adding empty blocks.
+ EXPECT_TRUE(errorToBool(P.addBlock({})));
+
+ // Thread blocks may not be empty.
+ EXPECT_TRUE(errorToBool(P.addBlock({1, {}})));
+
+ // Thread blocks with data must succeed.
+ EXPECT_FALSE(errorToBool(P.addBlock(
+ Profile::Block{Profile::ThreadID{1},
+ {
+ {P.internPath({2, 1}), Profile::Data{1, 1000}},
+ {P.internPath({3, 2, 1}), Profile::Data{10, 100}},
+ }})));
+}
+
+TEST(ProfileTest, CopyProfile) {
+ Profile P0, P1;
+ EXPECT_FALSE(errorToBool(P0.addBlock(
+ Profile::Block{Profile::ThreadID{1},
+ {
+ {P0.internPath({2, 1}), Profile::Data{1, 1000}},
+ {P0.internPath({3, 2, 1}), Profile::Data{10, 100}},
+ }})));
+ P1 = P0;
+ EXPECT_THAT(
+ P1, UnorderedElementsAre(AllOf(
+ Field(&Profile::Block::Thread, Eq(Profile::ThreadID{1})),
+ Field(&Profile::Block::PathData,
+ UnorderedElementsAre(
+ Pair(P1.internPath({2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(1u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(1000u)))),
+ Pair(P1.internPath({3, 2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(10u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(100u)))))))));
+}
+
+TEST(ProfileTest, MoveProfile) {
+ Profile P0, P1;
+ EXPECT_FALSE(errorToBool(P0.addBlock(
+ Profile::Block{Profile::ThreadID{1},
+ {
+ {P0.internPath({2, 1}), Profile::Data{1, 1000}},
+ {P0.internPath({3, 2, 1}), Profile::Data{10, 100}},
+ }})));
+ P1 = std::move(P0);
+ EXPECT_THAT(
+ P1, UnorderedElementsAre(AllOf(
+ Field(&Profile::Block::Thread, Eq(Profile::ThreadID{1})),
+ Field(&Profile::Block::PathData,
+ UnorderedElementsAre(
+ Pair(P1.internPath({2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(1u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(1000u)))),
+ Pair(P1.internPath({3, 2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(10u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(100u)))))))));
+ EXPECT_THAT(P0, UnorderedElementsAre());
+}
+
+TEST(ProfileTest, MergeProfilesByThread) {
+ Profile P0, P1;
+
+ // Set up the blocks for two different threads in P0.
+ EXPECT_FALSE(errorToBool(P0.addBlock(
+ Profile::Block{Profile::ThreadID{1},
+ {{P0.internPath({2, 1}), Profile::Data{1, 1000}},
+ {P0.internPath({4, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(P0.addBlock(
+ Profile::Block{Profile::ThreadID{2},
+ {{P0.internPath({3, 1}), Profile::Data{1, 1000}}}})));
+
+ // Set up the blocks for two different threads in P1.
+ EXPECT_FALSE(errorToBool(P1.addBlock(
+ Profile::Block{Profile::ThreadID{1},
+ {{P1.internPath({2, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(P1.addBlock(
+ Profile::Block{Profile::ThreadID{2},
+ {{P1.internPath({3, 1}), Profile::Data{1, 1000}},
+ {P1.internPath({4, 1}), Profile::Data{1, 1000}}}})));
+
+ Profile Merged = mergeProfilesByThread(P0, P1);
+ EXPECT_THAT(
+ Merged,
+ UnorderedElementsAre(
+ // We want to see two threads after the merge.
+ AllOf(Field(&Profile::Block::Thread, Eq(Profile::ThreadID{1})),
+ Field(&Profile::Block::PathData,
+ UnorderedElementsAre(
+ Pair(Merged.internPath({2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(2u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(2000u)))),
+ Pair(Merged.internPath({4, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(1u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(1000u))))))),
+ AllOf(Field(&Profile::Block::Thread, Eq(Profile::ThreadID{2})),
+ Field(&Profile::Block::PathData,
+ UnorderedElementsAre(
+ Pair(Merged.internPath({3, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(2u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(2000u)))),
+ Pair(Merged.internPath({4, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(1u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(1000u)))))))));
+}
+
+TEST(ProfileTest, MergeProfilesByStack) {
+ Profile P0, P1;
+ EXPECT_FALSE(errorToBool(P0.addBlock(
+ Profile::Block{Profile::ThreadID{1},
+ {{P0.internPath({2, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(P1.addBlock(
+ Profile::Block{Profile::ThreadID{2},
+ {{P1.internPath({2, 1}), Profile::Data{1, 1000}}}})));
+
+ Profile Merged = mergeProfilesByStack(P0, P1);
+ EXPECT_THAT(Merged,
+ ElementsAre(AllOf(
+ // We expect that we lose the ThreadID dimension in this
+ // algorithm.
+ Field(&Profile::Block::Thread, Eq(Profile::ThreadID{0})),
+ Field(&Profile::Block::PathData,
+ ElementsAre(Pair(
+ Merged.internPath({2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(2u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(2000u)))))))));
+}
+
+TEST(ProfileTest, MergeProfilesByStackAccumulate) {
+ std::vector<Profile> Profiles(3);
+ EXPECT_FALSE(errorToBool(Profiles[0].addBlock(Profile::Block{
+ Profile::ThreadID{1},
+ {{Profiles[0].internPath({2, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(Profiles[1].addBlock(Profile::Block{
+ Profile::ThreadID{2},
+ {{Profiles[1].internPath({2, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(Profiles[2].addBlock(Profile::Block{
+ Profile::ThreadID{3},
+ {{Profiles[2].internPath({2, 1}), Profile::Data{1, 1000}}}})));
+ Profile Merged = std::accumulate(Profiles.begin(), Profiles.end(), Profile(),
+ mergeProfilesByStack);
+ EXPECT_THAT(Merged,
+ ElementsAre(AllOf(
+ // We expect that we lose the ThreadID dimension in this
+ // algorithm.
+ Field(&Profile::Block::Thread, Eq(Profile::ThreadID{0})),
+ Field(&Profile::Block::PathData,
+ ElementsAre(Pair(
+ Merged.internPath({2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(3u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(3000u)))))))));
+}
+
+TEST(ProfileTest, MergeProfilesByThreadAccumulate) {
+ std::vector<Profile> Profiles(2);
+
+ // Set up the blocks for two different threads in Profiles[0].
+ EXPECT_FALSE(errorToBool(Profiles[0].addBlock(Profile::Block{
+ Profile::ThreadID{1},
+ {{Profiles[0].internPath({2, 1}), Profile::Data{1, 1000}},
+ {Profiles[0].internPath({4, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(Profiles[0].addBlock(Profile::Block{
+ Profile::ThreadID{2},
+ {{Profiles[0].internPath({3, 1}), Profile::Data{1, 1000}}}})));
+
+ // Set up the blocks for two different threads in Profiles[1].
+ EXPECT_FALSE(errorToBool(Profiles[1].addBlock(Profile::Block{
+ Profile::ThreadID{1},
+ {{Profiles[1].internPath({2, 1}), Profile::Data{1, 1000}}}})));
+ EXPECT_FALSE(errorToBool(Profiles[1].addBlock(Profile::Block{
+ Profile::ThreadID{2},
+ {{Profiles[1].internPath({3, 1}), Profile::Data{1, 1000}},
+ {Profiles[1].internPath({4, 1}), Profile::Data{1, 1000}}}})));
+
+ Profile Merged = std::accumulate(Profiles.begin(), Profiles.end(), Profile(),
+ mergeProfilesByThread);
+ EXPECT_THAT(
+ Merged,
+ UnorderedElementsAre(
+ // We want to see two threads after the merge.
+ AllOf(Field(&Profile::Block::Thread, Eq(Profile::ThreadID{1})),
+ Field(&Profile::Block::PathData,
+ UnorderedElementsAre(
+ Pair(Merged.internPath({2, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(2u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(2000u)))),
+ Pair(Merged.internPath({4, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(1u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(1000u))))))),
+ AllOf(Field(&Profile::Block::Thread, Eq(Profile::ThreadID{2})),
+ Field(&Profile::Block::PathData,
+ UnorderedElementsAre(
+ Pair(Merged.internPath({3, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(2u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(2000u)))),
+ Pair(Merged.internPath({4, 1}),
+ AllOf(Field(&Profile::Data::CallCount, Eq(1u)),
+ Field(&Profile::Data::CumulativeLocalTime,
+ Eq(1000u)))))))));
+}
+// FIXME: Add a test creating a Trace and generating a Profile
+// FIXME: Add tests for ranking/sorting profile blocks by dimension
+
+} // namespace
+} // namespace xray
+} // namespace llvm
OpenPOWER on IntegriCloud