summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorSilviu Baranga <silviu.baranga@arm.com>2016-04-08 14:29:09 +0000
committerSilviu Baranga <silviu.baranga@arm.com>2016-04-08 14:29:09 +0000
commit6f444dfd5517d03a750918eaee4ef9f82a11c268 (patch)
treea1011268d57c4ccf1cc01fd74152758c0c6bd524 /llvm/test/Transforms
parent476170384fa92eaf1258f784709b6907041d313a (diff)
downloadbcm5719-llvm-6f444dfd5517d03a750918eaee4ef9f82a11c268.tar.gz
bcm5719-llvm-6f444dfd5517d03a750918eaee4ef9f82a11c268.zip
Re-commit [SCEV] Introduce a guarded backedge taken count and use it in LAA and LV
This re-commits r265535 which was reverted in r265541 because it broke the windows bots. The problem was that we had a PointerIntPair which took a pointer to a struct allocated with new. The problem was that new doesn't provide sufficient alignment guarantees. This pattern was already present before r265535 and it just happened to work. To fix this, we now separate the PointerToIntPair from the ExitNotTakenInfo struct into a pointer and a bool. Original commit message: Summary: When the backedge taken codition is computed from an icmp, SCEV can deduce the backedge taken count only if one of the sides of the icmp is an AddRecExpr. However, due to sign/zero extensions, we sometimes end up with something that is not an AddRecExpr. However, we can use SCEV predicates to produce a 'guarded' expression. This change adds a method to SCEV to get this expression, and the SCEV predicate associated with it. In HowManyGreaterThans and HowManyLessThans we will now add a SCEV predicate associated with the guarded backedge taken count when the analyzed SCEV expression is not an AddRecExpr. Note that we only do this as an alternative to returning a 'CouldNotCompute'. We use new feature in Loop Access Analysis and LoopVectorize to analyze and transform more loops. Reviewers: anemet, mzolotukhin, hfinkel, sanjoy Subscribers: flyingforyou, mcrosier, atrick, mssimpso, sanjoy, mzolotukhin, llvm-commits Differential Revision: http://reviews.llvm.org/D17201 llvm-svn: 265786
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll166
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll3
2 files changed, 168 insertions, 1 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll b/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll
new file mode 100644
index 00000000000..aba47f6c628
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/backedge-overflow.ll
@@ -0,0 +1,166 @@
+; RUN: opt -mtriple=aarch64--linux-gnueabi -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 < %s -S | FileCheck %s
+
+; The following tests contain loops for which SCEV cannot determine the backedge
+; taken count. This is because the backedge taken condition is produced by an
+; icmp with one of the sides being a loop varying non-AddRec expression.
+; However, there is a possibility to normalize this to an AddRec expression
+; using SCEV predicates. This allows us to compute a 'guarded' backedge count.
+; The Loop Vectorizer is able to version to loop in order to use this guarded
+; backedge count and vectorize more loops.
+
+
+; CHECK-LABEL: test_sge
+; CHECK-LABEL: vector.scevcheck
+; CHECK-LABEL: vector.body
+define void @test_sge(i32* noalias %A,
+ i32* noalias %B,
+ i32* noalias %C, i32 %N) {
+entry:
+ %cmp13 = icmp eq i32 %N, 0
+ br i1 %cmp13, label %for.end, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i16 [ %indvars.next, %for.body ], [ 0, %for.body.preheader ]
+ %indvars.next = add i16 %indvars.iv, 1
+ %indvars.ext = zext i16 %indvars.iv to i32
+
+ %arrayidx = getelementptr inbounds i32, i32* %B, i32 %indvars.ext
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %C, i32 %indvars.ext
+ %1 = load i32, i32* %arrayidx3, align 4
+
+ %mul4 = mul i32 %1, %0
+
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %indvars.ext
+ store i32 %mul4, i32* %arrayidx7, align 4
+
+ %exitcond = icmp sge i32 %indvars.ext, %N
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: test_uge
+; CHECK-LABEL: vector.scevcheck
+; CHECK-LABEL: vector.body
+define void @test_uge(i32* noalias %A,
+ i32* noalias %B,
+ i32* noalias %C, i32 %N, i32 %Offset) {
+entry:
+ %cmp13 = icmp eq i32 %N, 0
+ br i1 %cmp13, label %for.end, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i16 [ %indvars.next, %for.body ], [ 0, %for.body.preheader ]
+ %indvars.next = add i16 %indvars.iv, 1
+
+ %indvars.ext = sext i16 %indvars.iv to i32
+ %indvars.access = add i32 %Offset, %indvars.ext
+
+ %arrayidx = getelementptr inbounds i32, i32* %B, i32 %indvars.access
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %C, i32 %indvars.access
+ %1 = load i32, i32* %arrayidx3, align 4
+
+ %mul4 = add i32 %1, %0
+
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %indvars.access
+ store i32 %mul4, i32* %arrayidx7, align 4
+
+ %exitcond = icmp uge i32 %indvars.ext, %N
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: test_ule
+; CHECK-LABEL: vector.scevcheck
+; CHECK-LABEL: vector.body
+define void @test_ule(i32* noalias %A,
+ i32* noalias %B,
+ i32* noalias %C, i32 %N,
+ i16 %M) {
+entry:
+ %cmp13 = icmp eq i32 %N, 0
+ br i1 %cmp13, label %for.end, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i16 [ %indvars.next, %for.body ], [ %M, %for.body.preheader ]
+ %indvars.next = sub i16 %indvars.iv, 1
+ %indvars.ext = zext i16 %indvars.iv to i32
+
+ %arrayidx = getelementptr inbounds i32, i32* %B, i32 %indvars.ext
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %C, i32 %indvars.ext
+ %1 = load i32, i32* %arrayidx3, align 4
+
+ %mul4 = mul i32 %1, %0
+
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %indvars.ext
+ store i32 %mul4, i32* %arrayidx7, align 4
+
+ %exitcond = icmp ule i32 %indvars.ext, %N
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
+
+; CHECK-LABEL: test_sle
+; CHECK-LABEL: vector.scevcheck
+; CHECK-LABEL: vector.body
+define void @test_sle(i32* noalias %A,
+ i32* noalias %B,
+ i32* noalias %C, i32 %N,
+ i16 %M) {
+entry:
+ %cmp13 = icmp eq i32 %N, 0
+ br i1 %cmp13, label %for.end, label %for.body.preheader
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i16 [ %indvars.next, %for.body ], [ %M, %for.body.preheader ]
+ %indvars.next = sub i16 %indvars.iv, 1
+ %indvars.ext = sext i16 %indvars.iv to i32
+
+ %arrayidx = getelementptr inbounds i32, i32* %B, i32 %indvars.ext
+ %0 = load i32, i32* %arrayidx, align 4
+ %arrayidx3 = getelementptr inbounds i32, i32* %C, i32 %indvars.ext
+ %1 = load i32, i32* %arrayidx3, align 4
+
+ %mul4 = mul i32 %1, %0
+
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %indvars.ext
+ store i32 %mul4, i32* %arrayidx7, align 4
+
+ %exitcond = icmp sle i32 %indvars.ext, %N
+ br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:
+ br label %for.end
+
+for.end:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
index 42ec3b3ffda..740ff3682be 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
@@ -54,8 +54,9 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !16
%0 = trunc i64 %indvars.iv to i32, !dbg !16
+ %ld = load i32, i32* %arrayidx, align 4
store i32 %0, i32* %arrayidx, align 4, !dbg !16, !tbaa !18
- %cmp3 = icmp sle i32 %0, %Length, !dbg !22
+ %cmp3 = icmp sle i32 %ld, %Length, !dbg !22
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !12
%1 = trunc i64 %indvars.iv.next to i32
%cmp = icmp slt i32 %1, %Length, !dbg !12
OpenPOWER on IntegriCloud