summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandros Lamprineas <alexandros.lamprineas@arm.com>2016-11-09 08:53:07 +0000
committerAlexandros Lamprineas <alexandros.lamprineas@arm.com>2016-11-09 08:53:07 +0000
commit0ee3ec2fe474d4746173518c96b4f8c894e21bf4 (patch)
tree9cbcb1b84ebd27a841fa64899a1cd23be50c1a57
parent8530137de5577bb0d77adeb853261a005ee10207 (diff)
downloadbcm5719-llvm-0ee3ec2fe474d4746173518c96b4f8c894e21bf4.tar.gz
bcm5719-llvm-0ee3ec2fe474d4746173518c96b4f8c894e21bf4.zip
[ARM] Loop Strength Reduction crashes when targeting ARM or Thumb.
Scalar Evolution asserts when not all the operands of an Add Recurrence Expression are loop invariants. Loop Strength Reduction should only create affine Add Recurrences, so that both the start and the step of the expression are loop invariants. Differential Revision: https://reviews.llvm.org/D26185 llvm-svn: 286347
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp6
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/ARM/addrec-is-loop-invariant.ll35
2 files changed, 38 insertions, 3 deletions
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index e28c374549c..75677079e37 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -325,7 +325,7 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
// Look at addrec operands.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
- if (!AR->getStart()->isZero()) {
+ if (!AR->getStart()->isZero() && AR->isAffine()) {
DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
AR->getStepRecurrence(SE),
@@ -568,7 +568,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
// Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
- if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
+ if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) {
const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
IgnoreSignificantBits);
if (!Step) return nullptr;
@@ -3196,7 +3196,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
return nullptr;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Split a non-zero base out of an addrec.
- if (AR->getStart()->isZero())
+ if (AR->getStart()->isZero() || !AR->isAffine())
return S;
const SCEV *Remainder = CollectSubexprs(AR->getStart(),
diff --git a/llvm/test/Transforms/LoopStrengthReduce/ARM/addrec-is-loop-invariant.ll b/llvm/test/Transforms/LoopStrengthReduce/ARM/addrec-is-loop-invariant.ll
new file mode 100644
index 00000000000..261c3cceed6
--- /dev/null
+++ b/llvm/test/Transforms/LoopStrengthReduce/ARM/addrec-is-loop-invariant.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=armv8-eabi -verify-machineinstrs %s -o /dev/null
+
+; This test ensures that Loop Strength Reduction will
+; not create an Add Reccurence Expression if not all
+; its operands are loop invariants.
+
+define void @add_rec_expr() {
+entry:
+ br label %loop0
+
+loop0:
+ %c.0 = phi i32 [ 0, %entry ], [ %inc.0, %loop0 ]
+ %inc.0 = add nuw i32 %c.0, 1
+ br i1 undef, label %loop0, label %bb1
+
+bb1:
+ %mul.0 = mul i32 %c.0, %c.0
+ %gelptr.0 = getelementptr inbounds i16, i16* undef, i32 %mul.0
+ br label %loop1
+
+loop1:
+ %inc.1 = phi i32 [ %inc.2, %bb4 ], [ 0, %bb1 ]
+ %mul.1 = mul i32 %inc.1, %c.0
+ br label %bb3
+
+bb3:
+ %add.0 = add i32 undef, %mul.1
+ %gelptr.1 = getelementptr inbounds i16, i16* %gelptr.0, i32 %add.0
+ store i16 undef, i16* %gelptr.1, align 2
+ br label %bb4
+
+bb4:
+ %inc.2 = add nuw i32 %inc.1, 1
+ br label %loop1
+}
OpenPOWER on IntegriCloud