diff options
| author | Sanjoy Das <sanjoy@playingwithpointers.com> | 2015-02-25 20:02:59 +0000 |
|---|---|---|
| committer | Sanjoy Das <sanjoy@playingwithpointers.com> | 2015-02-25 20:02:59 +0000 |
| commit | dcc84db2641eaff6efece8502ccc5f3863ef9356 (patch) | |
| tree | d7b3f768f3dc130cc5a3b27c640e23f815c434fb /llvm/lib/Analysis | |
| parent | f42e1eca9fe959059079abf0dda590c23fddfb14 (diff) | |
| download | bcm5719-llvm-dcc84db2641eaff6efece8502ccc5f3863ef9356.tar.gz bcm5719-llvm-dcc84db2641eaff6efece8502ccc5f3863ef9356.zip | |
Bugfix: SCEVExpander incorrectly marks increment operations as no-wrap
(The change was landed in r230280 and caused the regression PR22674.
This version contains a fix and a test-case for PR22674).
When emitting the increment operation, SCEVExpander marks the
operation as nuw or nsw based on the flags on the preincrement SCEV.
This is incorrect because, for instance, it is possible that {-6,+,1}
is <nuw> while {-6,+,1}+1 = {-5,+,1} is not.
This change teaches SCEV to mark the increment as nuw/nsw only if it
can explicitly prove that the increment operation won't overflow.
Apart from the attached test case, another (more realistic)
manifestation of the bug can be seen in
Transforms/IndVarSimplify/pr20680.ll.
Differential Revision: http://reviews.llvm.org/D7778
llvm-svn: 230533
Diffstat (limited to 'llvm/lib/Analysis')
| -rw-r--r-- | llvm/lib/Analysis/ScalarEvolutionExpander.cpp | 36 |
1 files changed, 34 insertions, 2 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp index 59f19a002ec..61527283f68 100644 --- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1063,6 +1063,34 @@ static bool canBeCheaplyTransformed(ScalarEvolution &SE, return false; } +static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { + if (!isa<IntegerType>(AR->getType())) + return false; + + unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); + Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); + const SCEV *Step = AR->getStepRecurrence(SE); + const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy), + SE.getSignExtendExpr(AR, WideTy)); + const SCEV *ExtendAfterOp = + SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy); + return ExtendAfterOp == OpAfterExtend; +} + +static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { + if (!isa<IntegerType>(AR->getType())) + return false; + + unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); + Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); + const SCEV *Step = AR->getStepRecurrence(SE); + const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy), + SE.getZeroExtendExpr(AR, WideTy)); + const SCEV *ExtendAfterOp = + SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy); + return ExtendAfterOp == OpAfterExtend; +} + /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand /// the base addrec, which is the addrec without any non-loop-dominating /// values, and return the PHI. @@ -1154,6 +1182,9 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, } } + bool IncrementIsNUW = IsIncrementNUW(SE, Normalized); + bool IncrementIsNSW = IsIncrementNSW(SE, Normalized); + // Save the original insertion point so we can restore it when we're done. BuilderType::InsertPointGuard Guard(Builder); @@ -1213,10 +1244,11 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, IVIncInsertPos : Pred->getTerminator(); Builder.SetInsertPoint(InsertPos); Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); + if (isa<OverflowingBinaryOperator>(IncV)) { - if (Normalized->getNoWrapFlags(SCEV::FlagNUW)) + if (IncrementIsNUW) cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); - if (Normalized->getNoWrapFlags(SCEV::FlagNSW)) + if (IncrementIsNSW) cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); } PN->addIncoming(IncV, Pred); |

