summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/BasicTargetTransformInfo.cpp63
-rw-r--r--llvm/lib/Target/X86/X86SchedHaswell.td3
-rw-r--r--llvm/lib/Target/X86/X86SchedSandyBridge.td3
-rw-r--r--llvm/lib/Target/X86/X86ScheduleAtom.td4
-rw-r--r--llvm/lib/Target/X86/X86ScheduleSLM.td3
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.cpp76
6 files changed, 75 insertions, 77 deletions
diff --git a/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp b/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
index 4b895092d3b..763a4c0b3cf 100644
--- a/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
+++ b/llvm/lib/CodeGen/BasicTargetTransformInfo.cpp
@@ -16,11 +16,18 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Passes.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <utility>
using namespace llvm;
+static cl::opt<unsigned>
+PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
+ cl::desc("Threshold for partial unrolling"), cl::Hidden);
+
#define DEBUG_TYPE "basictti"
namespace {
@@ -187,7 +194,61 @@ bool BasicTTI::haveFastSqrt(Type *Ty) const {
return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
}
-void BasicTTI::getUnrollingPreferences(Loop *, UnrollingPreferences &) const { }
+void BasicTTI::getUnrollingPreferences(Loop *L,
+ UnrollingPreferences &UP) const {
+ // This unrolling functionality is target independent, but to provide some
+ // motivation for its indended use, for x86:
+
+ // According to the Intel 64 and IA-32 Architectures Optimization Reference
+ // Manual, Intel Core models and later have a loop stream detector
+ // (and associated uop queue) that can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have no more than 4 (8 for Nehalem and later) branches
+ // taken, and none of them may be calls.
+ // - The loop can have no more than 18 (28 for Nehalem and later) uops.
+
+ // According to the Software Optimization Guide for AMD Family 15h Processors,
+ // models 30h-4fh (Steamroller and later) have a loop predictor and loop
+ // buffer which can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have fewer than 16 branches
+ // - The loop must have less than 40 uops in all executed loop branches
+
+ // The number of taken branches in a loop is hard to estimate here, and
+ // benchmarking has revealed that it is better not to be conservative when
+ // estimating the branch count. As a result, we'll ignore the branch limits
+ // until someone finds a case where it matters in practice.
+
+ unsigned MaxOps;
+ const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
+ if (PartialUnrollingThreshold.getNumOccurrences() > 0)
+ MaxOps = PartialUnrollingThreshold;
+ else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0)
+ MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize;
+ else
+ return;
+
+ // Scan the loop: don't unroll loops with calls.
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
+ I != E; ++I) {
+ BasicBlock *BB = *I;
+
+ for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
+ if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
+ ImmutableCallSite CS(J);
+ if (const Function *F = CS.getCalledFunction()) {
+ if (!TopTTI->isLoweredToCall(F))
+ continue;
+ }
+
+ return;
+ }
+ }
+
+ // Enable runtime and partial unrolling up to the specified size.
+ UP.Partial = UP.Runtime = true;
+ UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
+}
//===----------------------------------------------------------------------===//
//
diff --git a/llvm/lib/Target/X86/X86SchedHaswell.td b/llvm/lib/Target/X86/X86SchedHaswell.td
index f5b51eec05d..6966d616f8e 100644
--- a/llvm/lib/Target/X86/X86SchedHaswell.td
+++ b/llvm/lib/Target/X86/X86SchedHaswell.td
@@ -20,6 +20,9 @@ def HaswellModel : SchedMachineModel {
let LoadLatency = 4;
let MispredictPenalty = 16;
+ // Based on the LSD (loop-stream detector) queue size and benchmarking data.
+ let LoopMicroOpBufferSize = 50;
+
// FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
// the scheduler to assign a default model to unrecognized opcodes.
let CompleteModel = 0;
diff --git a/llvm/lib/Target/X86/X86SchedSandyBridge.td b/llvm/lib/Target/X86/X86SchedSandyBridge.td
index a58859aa15f..83f053425aa 100644
--- a/llvm/lib/Target/X86/X86SchedSandyBridge.td
+++ b/llvm/lib/Target/X86/X86SchedSandyBridge.td
@@ -21,6 +21,9 @@ def SandyBridgeModel : SchedMachineModel {
let LoadLatency = 4;
let MispredictPenalty = 16;
+ // Based on the LSD (loop-stream detector) queue size.
+ let LoopMicroOpBufferSize = 28;
+
// FIXME: SSE4 and AVX are unimplemented. This flag is set to allow
// the scheduler to assign a default model to unrecognized opcodes.
let CompleteModel = 0;
diff --git a/llvm/lib/Target/X86/X86ScheduleAtom.td b/llvm/lib/Target/X86/X86ScheduleAtom.td
index ba72f29910f..3256ee7c6e4 100644
--- a/llvm/lib/Target/X86/X86ScheduleAtom.td
+++ b/llvm/lib/Target/X86/X86ScheduleAtom.td
@@ -535,5 +535,9 @@ def AtomModel : SchedMachineModel {
let LoadLatency = 3; // Expected cycles, may be overriden by OperandCycles.
let HighLatency = 30;// Expected, may be overriden by OperandCycles.
+ // On the Atom, the throughput for taken branches is 2 cycles. For small
+ // simple loops, expand by a small factor to hide the backedge cost.
+ let LoopMicroOpBufferSize = 10;
+
let Itineraries = AtomItineraries;
}
diff --git a/llvm/lib/Target/X86/X86ScheduleSLM.td b/llvm/lib/Target/X86/X86ScheduleSLM.td
index 509f892deb0..823d10140e3 100644
--- a/llvm/lib/Target/X86/X86ScheduleSLM.td
+++ b/llvm/lib/Target/X86/X86ScheduleSLM.td
@@ -20,6 +20,9 @@ def SLMModel : SchedMachineModel {
let LoadLatency = 3;
let MispredictPenalty = 10;
+ // For small loops, expand by a small factor to hide the backedge cost.
+ let LoopMicroOpBufferSize = 10;
+
// FIXME: SSE4 is unimplemented. This flag is set to allow
// the scheduler to assign a default model to unrecognized opcodes.
let CompleteModel = 0;
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index cad8dfd5221..101574c84c3 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -16,11 +16,8 @@
#include "X86.h"
#include "X86TargetMachine.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@@ -35,13 +32,6 @@ namespace llvm {
void initializeX86TTIPass(PassRegistry &);
}
-static cl::opt<bool>
-UsePartialUnrolling("x86-use-partial-unrolling", cl::init(true),
- cl::desc("Use partial unrolling for some X86 targets"), cl::Hidden);
-static cl::opt<unsigned>
-PartialUnrollingThreshold("x86-partial-unrolling-threshold", cl::init(0),
- cl::desc("Threshold for X86 partial unrolling"), cl::Hidden);
-
namespace {
class X86TTI final : public ImmutablePass, public TargetTransformInfo {
@@ -84,8 +74,6 @@ public:
/// \name Scalar TTI Implementations
/// @{
PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
- void getUnrollingPreferences(Loop *L,
- UnrollingPreferences &UP) const override;
/// @}
@@ -150,70 +138,6 @@ X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
}
-void X86TTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
- if (!UsePartialUnrolling)
- return;
- // According to the Intel 64 and IA-32 Architectures Optimization Reference
- // Manual, Intel Core models and later have a loop stream detector
- // (and associated uop queue) that can benefit from partial unrolling.
- // The relevant requirements are:
- // - The loop must have no more than 4 (8 for Nehalem and later) branches
- // taken, and none of them may be calls.
- // - The loop can have no more than 18 (28 for Nehalem and later) uops.
-
- // According to the Software Optimization Guide for AMD Family 15h Processors,
- // models 30h-4fh (Steamroller and later) have a loop predictor and loop
- // buffer which can benefit from partial unrolling.
- // The relevant requirements are:
- // - The loop must have fewer than 16 branches
- // - The loop must have less than 40 uops in all executed loop branches
-
- // The number of taken branches in a loop is hard to estimate here, and
- // benchmarking has revealed that it is better not to be conservative when
- // estimating the branch count. As a result, we'll ignore the branch limits
- // until someone finds a case where it matters in practice.
-
- unsigned MaxOps;
- if (PartialUnrollingThreshold.getNumOccurrences() > 0) {
- MaxOps = PartialUnrollingThreshold;
- } else if (ST->isAtom()) {
- // On the Atom, the throughput for taken branches is 2 cycles. For small
- // simple loops, expand by a small factor to hide the backedge cost.
- MaxOps = 10;
- } else if (ST->hasFSGSBase() && ST->hasXOP() /* Steamroller and later */) {
- MaxOps = 40;
- } else if (ST->hasFMA4() /* Any other recent AMD */) {
- return;
- } else if (ST->hasAVX() || ST->hasSSE42() /* Nehalem and later */) {
- MaxOps = 28;
- } else if (ST->hasSSSE3() /* Intel Core */) {
- MaxOps = 18;
- } else {
- return;
- }
-
- // Scan the loop: don't unroll loops with calls.
- for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
- I != E; ++I) {
- BasicBlock *BB = *I;
-
- for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
- if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
- ImmutableCallSite CS(J);
- if (const Function *F = CS.getCalledFunction()) {
- if (!isLoweredToCall(F))
- continue;
- }
-
- return;
- }
- }
-
- // Enable runtime and partial unrolling up to the specified size.
- UP.Partial = UP.Runtime = true;
- UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
-}
-
unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
if (Vector && !ST->hasSSE1())
return 0;
OpenPOWER on IntegriCloud