diff options
author | George Burgess IV <george.burgess.iv@gmail.com> | 2018-09-13 20:33:04 +0000 |
---|---|---|
committer | George Burgess IV <george.burgess.iv@gmail.com> | 2018-09-13 20:33:04 +0000 |
commit | d565b0f017e216aa5693484249628d03e98e8d27 (patch) | |
tree | 13d6233bfdd58f7be63e0d6ece982409992fa0b2 | |
parent | 083744852ab4a95ccde9fae49377583c181247d7 (diff) | |
download | bcm5719-llvm-d565b0f017e216aa5693484249628d03e98e8d27.tar.gz bcm5719-llvm-d565b0f017e216aa5693484249628d03e98e8d27.zip |
[PartiallyInlineLibCalls] Add DebugCounter support
This adds DebugCounter support to the PartiallyInlineLibCalls pass,
which should make debugging/automated bisection easier in the future.
Patch by Zhizhou Yang!
Differential Revision: https://reviews.llvm.org/D50093
llvm-svn: 342172
-rw-r--r-- | llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp | 6 | ||||
-rw-r--r-- | llvm/test/Other/X86/debugcounter-partiallyinlinelibcalls.ll | 44 |
2 files changed, 50 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp index 1748815c594..05ea9144f66 100644 --- a/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp +++ b/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp @@ -17,6 +17,7 @@ #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/Support/DebugCounter.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" @@ -24,6 +25,8 @@ using namespace llvm; #define DEBUG_TYPE "partially-inline-libcalls" +DEBUG_COUNTER(PILCounter, "partially-inline-libcalls-transform", + "Controls transformations in partially-inline-libcalls"); static bool optimizeSQRT(CallInst *Call, Function *CalledFunc, BasicBlock &CurrBB, Function::iterator &BB, @@ -33,6 +36,9 @@ static bool optimizeSQRT(CallInst *Call, Function *CalledFunc, if (Call->onlyReadsMemory()) return false; + if (!DebugCounter::shouldExecute(PILCounter)) + return false; + // Do the following transformation: // // (before) diff --git a/llvm/test/Other/X86/debugcounter-partiallyinlinelibcalls.ll b/llvm/test/Other/X86/debugcounter-partiallyinlinelibcalls.ll new file mode 100644 index 00000000000..f9ca2ebcaa0 --- /dev/null +++ b/llvm/test/Other/X86/debugcounter-partiallyinlinelibcalls.ll @@ -0,0 +1,44 @@ +; REQUIRES: asserts +; RUN: opt -S -debug-counter=partially-inline-libcalls-transform-skip=1,partially-inline-libcalls-transform-count=1 \ +; RUN: -partially-inline-libcalls -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s +;; Test that, with debug counters on, we will skip the first optimization opportunity, perform next 1, +;; and ignore all the others left. + +define float @f1(float %val) { +; CHECK-LABEL: @f1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RES:%.*]] = tail call float @sqrtf(float [[VAL:%.*]]) +; CHECK-NEXT: ret float [[RES:%.*]] +entry: + %res = tail call float @sqrtf(float %val) + ret float %res +} + +define float @f2(float %val) { +; CHECK-LABEL: @f2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RES:%.*]] = tail call float @sqrtf(float [[VAL:%.*]]) #0 +; CHECK-NEXT: [[TMP0:%.*]] = fcmp oge float [[VAL]], 0.000000e+00 +; CHECK-NEXT: br i1 [[TMP0]], label [[ENTRY_SPLIT:%.*]], label [[CALL_SQRT:%.*]] +; CHECK: call.sqrt: +; CHECK-NEXT: [[TMP1:%.*]] = tail call float @sqrtf(float [[VAL]]) +; CHECK-NEXT: br label [[ENTRY_SPLIT]] +; CHECK: entry.split: +; CHECK-NEXT: [[TMP2:%.*]] = phi float [ [[RES]], [[ENTRY:%.*]] ], [ [[TMP1]], [[CALL_SQRT]] ] +; CHECK-NEXT: ret float [[TMP2]] +entry: + %res = tail call float @sqrtf(float %val) + ret float %res +} + +define float @f3(float %val) { +; CHECK-LABEL: @f3( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RES:%.*]] = tail call float @sqrtf(float [[VAL:%.*]]) +; CHECK-NEXT: ret float [[RES:%.*]] +entry: + %res = tail call float @sqrtf(float %val) + ret float %res +} + +declare float @sqrtf(float) |