From 9d9cb274ea8830e0163e58a2fe0ff2b20ea367d3 Mon Sep 17 00:00:00 2001 From: Adam Nemet Date: Thu, 18 Feb 2016 21:38:19 +0000 Subject: [PPCLoopDataPrefetch] Move pass to Transforms/Scalar/LoopDataPrefetch. NFC This patch is part of the work to make PPCLoopDataPrefetch target-independent (http://thread.gmane.org/gmane.comp.compilers.llvm.devel/92758). Obviously the pass still only used from PPC at this point. Subsequent patches will start driving this from ARM64 as well. Due to the previous patch most lines should show up as moved lines. llvm-svn: 261265 --- llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp | 226 ++++++++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp (limited to 'llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp') diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp new file mode 100644 index 00000000000..0edceec8e13 --- /dev/null +++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp @@ -0,0 +1,226 @@ +//===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a Loop Data Prefetching Pass. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "loop-data-prefetch" +#include "llvm/Transforms/Scalar.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/CodeMetrics.h" +#include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" +#include "llvm/Analysis/ScalarEvolutionExpander.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/ValueMapper.h" +using namespace llvm; + +// By default, we limit this to creating 16 PHIs (which is a little over half +// of the allocatable register set). +static cl::opt +PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(false), + cl::desc("Prefetch write addresses")); + +namespace llvm { + void initializeLoopDataPrefetchPass(PassRegistry&); +} + +namespace { + + class LoopDataPrefetch : public FunctionPass { + public: + static char ID; // Pass ID, replacement for typeid + LoopDataPrefetch() : FunctionPass(ID) { + initializeLoopDataPrefetchPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + // FIXME: For some reason, preserving SE here breaks LSR (even if + // this pass changes nothing). + // AU.addPreserved(); + AU.addRequired(); + } + + bool runOnFunction(Function &F) override; + bool runOnLoop(Loop *L); + + private: + AssumptionCache *AC; + LoopInfo *LI; + ScalarEvolution *SE; + const TargetTransformInfo *TTI; + const DataLayout *DL; + }; +} + +char LoopDataPrefetch::ID = 0; +INITIALIZE_PASS_BEGIN(LoopDataPrefetch, "loop-data-prefetch", + "Loop Data Prefetch", false, false) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) +INITIALIZE_PASS_END(LoopDataPrefetch, "loop-data-prefetch", + "Loop Data Prefetch", false, false) + +FunctionPass *llvm::createLoopDataPrefetchPass() { return new LoopDataPrefetch(); } + +bool LoopDataPrefetch::runOnFunction(Function &F) { + LI = &getAnalysis().getLoopInfo(); + SE = &getAnalysis().getSE(); + DL = &F.getParent()->getDataLayout(); + AC = &getAnalysis().getAssumptionCache(F); + TTI = &getAnalysis().getTTI(F); + + assert(TTI->getCacheLineSize() && "Cache line size is not set for target"); + assert(TTI->getPrefetchDistance() && + "Prefetch distance is not set for target"); + + bool MadeChange = false; + + for (auto I = LI->begin(), IE = LI->end(); I != IE; ++I) + for (auto L = df_begin(*I), LE = df_end(*I); L != LE; ++L) + MadeChange |= runOnLoop(*L); + + return MadeChange; +} + +bool LoopDataPrefetch::runOnLoop(Loop *L) { + bool MadeChange = false; + + // Only prefetch in the inner-most loop + if (!L->empty()) + return MadeChange; + + SmallPtrSet EphValues; + CodeMetrics::collectEphemeralValues(L, AC, EphValues); + + // Calculate the number of iterations ahead to prefetch + CodeMetrics Metrics; + for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); + I != IE; ++I) { + + // If the loop already has prefetches, then assume that the user knows + // what he or she is doing and don't add any more. + for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end(); + J != JE; ++J) + if (CallInst *CI = dyn_cast(J)) + if (Function *F = CI->getCalledFunction()) + if (F->getIntrinsicID() == Intrinsic::prefetch) + return MadeChange; + + Metrics.analyzeBasicBlock(*I, *TTI, EphValues); + } + unsigned LoopSize = Metrics.NumInsts; + if (!LoopSize) + LoopSize = 1; + + unsigned ItersAhead = TTI->getPrefetchDistance() / LoopSize; + if (!ItersAhead) + ItersAhead = 1; + + SmallVector, 16> PrefLoads; + for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); + I != IE; ++I) { + for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end(); + J != JE; ++J) { + Value *PtrValue; + Instruction *MemI; + + if (LoadInst *LMemI = dyn_cast(J)) { + MemI = LMemI; + PtrValue = LMemI->getPointerOperand(); + } else if (StoreInst *SMemI = dyn_cast(J)) { + if (!PrefetchWrites) continue; + MemI = SMemI; + PtrValue = SMemI->getPointerOperand(); + } else continue; + + unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace(); + if (PtrAddrSpace) + continue; + + if (L->isLoopInvariant(PtrValue)) + continue; + + const SCEV *LSCEV = SE->getSCEV(PtrValue); + const SCEVAddRecExpr *LSCEVAddRec = dyn_cast(LSCEV); + if (!LSCEVAddRec) + continue; + + // We don't want to double prefetch individual cache lines. If this load + // is known to be within one cache line of some other load that has + // already been prefetched, then don't prefetch this one as well. + bool DupPref = false; + for (SmallVector, + 16>::iterator K = PrefLoads.begin(), KE = PrefLoads.end(); + K != KE; ++K) { + const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, K->second); + if (const SCEVConstant *ConstPtrDiff = + dyn_cast(PtrDiff)) { + int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue()); + if (PD < (int64_t) TTI->getCacheLineSize()) { + DupPref = true; + break; + } + } + } + if (DupPref) + continue; + + const SCEV *NextLSCEV = SE->getAddExpr(LSCEVAddRec, SE->getMulExpr( + SE->getConstant(LSCEVAddRec->getType(), ItersAhead), + LSCEVAddRec->getStepRecurrence(*SE))); + if (!isSafeToExpand(NextLSCEV, *SE)) + continue; + + PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec)); + + Type *I8Ptr = Type::getInt8PtrTy((*I)->getContext(), PtrAddrSpace); + SCEVExpander SCEVE(*SE, J->getModule()->getDataLayout(), "prefaddr"); + Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI); + + IRBuilder<> Builder(MemI); + Module *M = (*I)->getParent()->getParent(); + Type *I32 = Type::getInt32Ty((*I)->getContext()); + Value *PrefetchFunc = Intrinsic::getDeclaration(M, Intrinsic::prefetch); + Builder.CreateCall( + PrefetchFunc, + {PrefPtrValue, + ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1), + ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)}); + + MadeChange = true; + } + } + + return MadeChange; +} + -- cgit v1.2.3