summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorNAKAMURA Takumi <geek4civic@gmail.com>2015-02-18 08:36:14 +0000
committerNAKAMURA Takumi <geek4civic@gmail.com>2015-02-18 08:36:14 +0000
commita250484c4c7a7b1c88b5383fc922f64a7f70e40e (patch)
treede6da6151f23c2acc85d6b260f5e1f06907c62ff /llvm/lib
parentfa520c5f494ddb4584b2b2cff255c81ed7a5c7f5 (diff)
downloadbcm5719-llvm-a250484c4c7a7b1c88b5383fc922f64a7f70e40e.tar.gz
bcm5719-llvm-a250484c4c7a7b1c88b5383fc922f64a7f70e40e.zip
Reformat.
llvm-svn: 229651
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp86
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp50
2 files changed, 63 insertions, 73 deletions
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 35c5807be08..521b4e87fe5 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -302,7 +302,7 @@ bool AccessAnalysis::canCheckPtrAtRT(
unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
if (ASi != ASj) {
DEBUG(dbgs() << "LV: Runtime check would require comparison between"
- " different address spaces\n");
+ " different address spaces\n");
return false;
}
}
@@ -553,8 +553,8 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
// Make sure that the pointer does not point to aggregate types.
const PointerType *PtrTy = cast<PointerType>(Ty);
if (PtrTy->getElementType()->isAggregateType()) {
- DEBUG(dbgs() << "LV: Bad stride - Not a pointer to a scalar type" << *Ptr <<
- "\n");
+ DEBUG(dbgs() << "LV: Bad stride - Not a pointer to a scalar type" << *Ptr
+ << "\n");
return 0;
}
@@ -562,15 +562,15 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
if (!AR) {
- DEBUG(dbgs() << "LV: Bad stride - Not an AddRecExpr pointer "
- << *Ptr << " SCEV: " << *PtrScev << "\n");
+ DEBUG(dbgs() << "LV: Bad stride - Not an AddRecExpr pointer " << *Ptr
+ << " SCEV: " << *PtrScev << "\n");
return 0;
}
// The accesss function must stride over the innermost loop.
if (Lp != AR->getLoop()) {
- DEBUG(dbgs() << "LV: Bad stride - Not striding over innermost loop " <<
- *Ptr << " SCEV: " << *PtrScev << "\n");
+ DEBUG(dbgs() << "LV: Bad stride - Not striding over innermost loop " << *Ptr
+ << " SCEV: " << *PtrScev << "\n");
}
// The address calculation must not wrap. Otherwise, a dependence could be
@@ -585,7 +585,7 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
DEBUG(dbgs() << "LV: Bad stride - Pointer may wrap in the address space "
- << *Ptr << " SCEV: " << *PtrScev << "\n");
+ << *Ptr << " SCEV: " << *PtrScev << "\n");
return 0;
}
@@ -595,8 +595,8 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
// Calculate the pointer stride and check if it is consecutive.
const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
if (!C) {
- DEBUG(dbgs() << "LV: Bad stride - Not a constant strided " << *Ptr <<
- " SCEV: " << *PtrScev << "\n");
+ DEBUG(dbgs() << "LV: Bad stride - Not a constant strided " << *Ptr
+ << " SCEV: " << *PtrScev << "\n");
return 0;
}
@@ -638,8 +638,9 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
// Store-load forwarding distance.
const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize;
// Maximum vector factor.
- unsigned MaxVFWithoutSLForwardIssues = VectParams.MaxVectorWidth*TypeByteSize;
- if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues)
+ unsigned MaxVFWithoutSLForwardIssues =
+ VectParams.MaxVectorWidth * TypeByteSize;
+ if (MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues)
MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes;
for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues;
@@ -650,14 +651,14 @@ bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
}
}
- if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) {
- DEBUG(dbgs() << "LV: Distance " << Distance <<
- " that could cause a store-load forwarding conflict\n");
+ if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
+ DEBUG(dbgs() << "LV: Distance " << Distance
+ << " that could cause a store-load forwarding conflict\n");
return true;
}
if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
- MaxVFWithoutSLForwardIssues != VectParams.MaxVectorWidth*TypeByteSize)
+ MaxVFWithoutSLForwardIssues != VectParams.MaxVectorWidth * TypeByteSize)
MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
return false;
}
@@ -705,9 +706,9 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const SCEV *Dist = SE->getMinusSCEV(Sink, Src);
DEBUG(dbgs() << "LV: Src Scev: " << *Src << "Sink Scev: " << *Sink
- << "(Induction step: " << StrideAPtr << ")\n");
+ << "(Induction step: " << StrideAPtr << ")\n");
DEBUG(dbgs() << "LV: Distance for " << *InstMap[AIdx] << " to "
- << *InstMap[BIdx] << ": " << *Dist << "\n");
+ << *InstMap[BIdx] << ": " << *Dist << "\n");
// Need consecutive accesses. We don't want to vectorize
// "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
@@ -754,18 +755,19 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
// Positive distance bigger than max vectorization factor.
if (ATy != BTy) {
- DEBUG(dbgs() <<
- "LV: ReadWrite-Write positive dependency with different types\n");
+ DEBUG(dbgs()
+ << "LV: ReadWrite-Write positive dependency with different types\n");
return false;
}
unsigned Distance = (unsigned) Val.getZExtValue();
// Bail out early if passed-in parameters make vectorization not feasible.
- unsigned ForcedFactor = (VectParams.VectorizationFactor ?
- VectParams.VectorizationFactor : 1);
- unsigned ForcedUnroll = (VectParams.VectorizationInterleave ?
- VectParams.VectorizationInterleave : 1);
+ unsigned ForcedFactor =
+ (VectParams.VectorizationFactor ? VectParams.VectorizationFactor : 1);
+ unsigned ForcedUnroll =
+ (VectParams.VectorizationInterleave ? VectParams.VectorizationInterleave
+ : 1);
// The distance must be bigger than the size needed for a vectorized version
// of the operation and the size of the vectorized operation must not be
@@ -774,7 +776,7 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2*TypeByteSize > MaxSafeDepDistBytes ||
Distance < TypeByteSize * ForcedUnroll * ForcedFactor) {
DEBUG(dbgs() << "LV: Failure because of Positive distance "
- << Val.getSExtValue() << '\n');
+ << Val.getSExtValue() << '\n');
return true;
}
@@ -786,8 +788,9 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
couldPreventStoreLoadForward(Distance, TypeByteSize))
return true;
- DEBUG(dbgs() << "LV: Positive distance " << Val.getSExtValue() <<
- " with max VF = " << MaxSafeDepDistBytes / TypeByteSize << '\n');
+ DEBUG(dbgs() << "LV: Positive distance " << Val.getSExtValue()
+ << " with max VF = " << MaxSafeDepDistBytes / TypeByteSize
+ << '\n');
return false;
}
@@ -886,8 +889,8 @@ bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) {
if (it->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(it);
if (!St) {
- emitAnalysis(VectorizationReport(it) <<
- "instruction cannot be vectorized");
+ emitAnalysis(VectorizationReport(it)
+ << "instruction cannot be vectorized");
return false;
}
if (!St->isSimple() && !IsAnnotatedParallel) {
@@ -953,9 +956,8 @@ bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) {
}
if (IsAnnotatedParallel) {
- DEBUG(dbgs()
- << "LV: A loop annotated parallel, ignore memory dependency "
- << "checks.\n");
+ DEBUG(dbgs() << "LV: A loop annotated parallel, ignore memory dependency "
+ << "checks.\n");
return true;
}
@@ -1007,8 +1009,8 @@ bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) {
CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE, TheLoop,
Strides);
- DEBUG(dbgs() << "LV: We need to do " << NumComparisons <<
- " pointer comparisons.\n");
+ DEBUG(dbgs() << "LV: We need to do " << NumComparisons
+ << " pointer comparisons.\n");
// If we only have one set of dependences to check pointers among we don't
// need a runtime check.
@@ -1028,8 +1030,8 @@ bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) {
if (NeedRTCheck && !CanDoRT) {
emitAnalysis(VectorizationReport() << "cannot identify array bounds");
- DEBUG(dbgs() << "LV: We can't vectorize because we can't find " <<
- "the array bounds.\n");
+ DEBUG(dbgs() << "LV: We can't vectorize because we can't find "
+ << "the array bounds.\n");
PtrRtCheck.reset();
return false;
}
@@ -1076,11 +1078,11 @@ bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) {
}
if (!CanVecMem)
- emitAnalysis(VectorizationReport() <<
- "unsafe dependent memory operations in loop");
+ emitAnalysis(VectorizationReport()
+ << "unsafe dependent memory operations in loop");
- DEBUG(dbgs() << "LV: We" << (NeedRTCheck ? "" : " don't") <<
- " need a runtime memory check.\n");
+ DEBUG(dbgs() << "LV: We" << (NeedRTCheck ? "" : " don't")
+ << " need a runtime memory check.\n");
return CanVecMem;
}
@@ -1132,8 +1134,8 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) {
const SCEV *Sc = SE->getSCEV(Ptr);
if (SE->isLoopInvariant(Sc, TheLoop)) {
- DEBUG(dbgs() << "LV: Adding RT check for a loop invariant ptr:" <<
- *Ptr <<"\n");
+ DEBUG(dbgs() << "LV: Adding RT check for a loop invariant ptr:" << *Ptr
+ << "\n");
Starts.push_back(Ptr);
Ends.push_back(Ptr);
} else {
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 1ddb45e5df6..3945807fee7 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -107,13 +107,13 @@ STATISTIC(LoopsVectorized, "Number of loops vectorized");
STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
static cl::opt<unsigned>
-VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden,
- cl::desc("Sets the SIMD width. Zero is autoselect."));
+ VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden,
+ cl::desc("Sets the SIMD width. Zero is autoselect."));
static cl::opt<unsigned>
-VectorizationInterleave("force-vector-interleave", cl::init(0), cl::Hidden,
- cl::desc("Sets the vectorization interleave count. "
- "Zero is autoselect."));
+ VectorizationInterleave("force-vector-interleave", cl::init(0), cl::Hidden,
+ cl::desc("Sets the vectorization interleave count. "
+ "Zero is autoselect."));
static cl::opt<bool>
EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
@@ -548,9 +548,8 @@ public:
DominatorTree *DT, TargetLibraryInfo *TLI,
AliasAnalysis *AA, Function *F,
const TargetTransformInfo *TTI)
- : NumPredStores(0), TheLoop(L), SE(SE), DL(DL),
- TLI(TLI), TheFunction(F), TTI(TTI), DT(DT), Induction(nullptr),
- WidestIndTy(nullptr),
+ : NumPredStores(0), TheLoop(L), SE(SE), DL(DL), TLI(TLI), TheFunction(F),
+ TTI(TTI), DT(DT), Induction(nullptr), WidestIndTy(nullptr),
LAI(F, L, SE, DL, TLI, AA, DT,
LoopAccessInfo::VectorizerParams(
MaxVectorWidth, VectorizationFactor, VectorizationInterleave,
@@ -744,9 +743,7 @@ public:
return LAI.getRuntimePointerCheck();
}
- LoopAccessInfo *getLAI() {
- return &LAI;
- }
+ LoopAccessInfo *getLAI() { return &LAI; }
/// This function returns the identity element (or neutral element) for
/// the operation K.
@@ -773,18 +770,11 @@ public:
}
/// Returns true if vector representation of the instruction \p I
/// requires mask.
- bool isMaskRequired(const Instruction* I) {
- return (MaskedOp.count(I) != 0);
- }
- unsigned getNumStores() const {
- return LAI.getNumStores();
- }
- unsigned getNumLoads() const {
- return LAI.getNumLoads();
- }
- unsigned getNumPredStores() const {
- return NumPredStores;
- }
+ bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
+ unsigned getNumStores() const { return LAI.getNumStores(); }
+ unsigned getNumLoads() const { return LAI.getNumLoads(); }
+ unsigned getNumPredStores() const { return NumPredStores; }
+
private:
/// Check if a single basic block loop is vectorizable.
/// At this point we know that this is a loop with a constant trip count
@@ -875,7 +865,7 @@ private:
SmallPtrSet<Value*, 4> AllowedExit;
/// This set holds the variables which are known to be uniform after
/// vectorization.
- SmallPtrSet<Instruction*, 4> Uniforms;
+ SmallPtrSet<Instruction *, 4> Uniforms;
LoopAccessInfo LAI;
/// Can we assume the absence of NaNs.
bool HasFunNoNaNAttr;
@@ -1659,9 +1649,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
return 0;
}
-bool LoopVectorizationLegality::isUniform(Value *V) {
- return LAI.isUniform(V);
-}
+bool LoopVectorizationLegality::isUniform(Value *V) { return LAI.isUniform(V); }
InnerLoopVectorizer::VectorParts&
InnerLoopVectorizer::getVectorValue(Value *V) {
@@ -3399,10 +3387,10 @@ bool LoopVectorizationLegality::canVectorize() {
// Collect all of the variables that remain uniform after vectorization.
collectLoopUniforms();
- DEBUG(dbgs() << "LV: We can vectorize this loop" <<
- (LAI.getRuntimePointerCheck()->Need ? " (with a runtime bound check)" :
- "")
- <<"!\n");
+ DEBUG(dbgs() << "LV: We can vectorize this loop"
+ << (LAI.getRuntimePointerCheck()->Need
+ ? " (with a runtime bound check)"
+ : "") << "!\n");
// Okay! We can vectorize. At this point we don't have any other mem analysis
// which may limit our maximum vectorization factor, so just return true with
OpenPOWER on IntegriCloud