summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
diff options
context:
space:
mode:
authorJonas Paulsson <paulsson@linux.vnet.ibm.com>2018-10-30 14:34:15 +0000
committerJonas Paulsson <paulsson@linux.vnet.ibm.com>2018-10-30 14:34:15 +0000
commit1f067c94dc003206d32fcdea3d63a004c3dc8ae7 (patch)
treec8bc80dcf6952961a978c12975763153d8ff6f7a /llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
parent8b207defeac47d7394cb1e68011b031729d77791 (diff)
downloadbcm5719-llvm-1f067c94dc003206d32fcdea3d63a004c3dc8ae7.tar.gz
bcm5719-llvm-1f067c94dc003206d32fcdea3d63a004c3dc8ae7.zip
[LoopVectorizer] Fix for cost values of memory accesses.
This commit is a combination of two patches: * "Fix in getScalarizationOverhead()" If target returns false in TTI.prefersVectorizedAddressing(), it means the address registers will not need to be extracted. Therefore, there should be no operands scalarization overhead for a load instruction. * "Don't pass the instruction pointer from getMemInstScalarizationCost." Since VF is always > 1, this is a cost query for an instruction in the vectorized loop and it should not be evaluated within the scalar context of the instruction. Review: Ulrich Weigand, Hal Finkel https://reviews.llvm.org/D52351 https://reviews.llvm.org/D52417 llvm-svn: 345603
Diffstat (limited to 'llvm/lib/Transforms/Vectorize/LoopVectorize.cpp')
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp9
1 files changed, 8 insertions, 1 deletions
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f0a07eddc3b..006c13c233e 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2982,6 +2982,10 @@ static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
!TTI.supportsEfficientVectorElementLoadStore()))
Cost += TTI.getScalarizationOverhead(RetTy, true, false);
+ // Some targets keep addresses scalar.
+ if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
+ return Cost;
+
if (CallInst *CI = dyn_cast<CallInst>(I)) {
SmallVector<const Value *, 4> Operands(CI->arg_operands());
Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
@@ -5372,6 +5376,7 @@ static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
unsigned VF) {
+ assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
Type *ValTy = getMemInstValueType(I);
auto SE = PSE.getSE();
@@ -5387,9 +5392,11 @@ unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
// Get the cost of the scalar memory instruction and address computation.
unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
+ // Don't pass *I here, since it is scalar but will actually be part of a
+ // vectorized loop where the user of it is a vectorized instruction.
Cost += VF *
TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
- AS, I);
+ AS);
// Get the overhead of the extractelement and insertelement instructions
// we might create due to scalarization.
OpenPOWER on IntegriCloud