summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoopVectorize
diff options
context:
space:
mode:
authorMatthew Simpson <mssimpso@codeaurora.org>2017-02-14 16:28:32 +0000
committerMatthew Simpson <mssimpso@codeaurora.org>2017-02-14 16:28:32 +0000
commitf09d13e5cc47b52cef8ca695aad6aa3c77dbe87e (patch)
tree326d51ee81321c01b5d083621334241c456af299 /llvm/test/Transforms/LoopVectorize
parent17ba44519ba85deb8d3db77b202dba75bc129f5c (diff)
downloadbcm5719-llvm-f09d13e5cc47b52cef8ca695aad6aa3c77dbe87e.tar.gz
bcm5719-llvm-f09d13e5cc47b52cef8ca695aad6aa3c77dbe87e.zip
Reapply "[LV] Extend trunc optimization to all IVs with constant integer steps"
This reapplies commit r294967 with a fix for the execution time regressions caught by the clang-cmake-aarch64-quick bot. We now extend the truncate optimization to non-primary induction variables only if the truncate isn't already free. Differential Revision: https://reviews.llvm.org/D29847 llvm-svn: 295063
Diffstat (limited to 'llvm/test/Transforms/LoopVectorize')
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction.ll31
-rw-r--r--llvm/test/Transforms/LoopVectorize/reverse_iter.ll5
3 files changed, 64 insertions, 2 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll
new file mode 100644
index 00000000000..e8ef4256235
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-trunc.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -force-vector-width=1 -force-vector-interleave=2 -loop-vectorize -S | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnu"
+
+; CHECK-LABEL: @non_primary_iv_trunc_free(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5
+; CHECK-NEXT: [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 5
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[INDUCTION]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[INDUCTION1]] to i32
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+;
+define void @non_primary_iv_trunc_free(i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %tmp0 = trunc i64 %i to i32
+ %i.next = add nuw nsw i64 %i, 5
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 85f00b4dfc8..da20a2a7c4d 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -773,3 +773,34 @@ for.body:
exit:
ret void
}
+
+; CHECK-LABEL: @non_primary_iv_trunc(
+; CHECK: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 2>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
+; CHECK: [[TMP3:%.*]] = add i64 %index, 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* %a, i64 [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <2 x i32>*
+; CHECK-NEXT: store <2 x i32> [[VEC_IND]], <2 x i32>* [[TMP6]], align 4
+; CHECK-NEXT: %index.next = add i64 %index, 2
+; CHECK: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 4, i32 4>
+; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
+define void @non_primary_iv_trunc(i32* %a, i64 %n) {
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
+ %j = phi i64 [ %j.next, %for.body ], [ 0, %entry ]
+ %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i
+ %tmp1 = trunc i64 %j to i32
+ store i32 %tmp1, i32* %tmp0, align 4
+ %i.next = add nuw nsw i64 %i, 1
+ %j.next = add nuw nsw i64 %j, 2
+ %cond = icmp slt i64 %i.next, %n
+ br i1 %cond, label %for.body, label %for.end
+
+for.end:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/reverse_iter.ll b/llvm/test/Transforms/LoopVectorize/reverse_iter.ll
index a6e2abda36d..bd057698280 100644
--- a/llvm/test/Transforms/LoopVectorize/reverse_iter.ll
+++ b/llvm/test/Transforms/LoopVectorize/reverse_iter.ll
@@ -2,7 +2,8 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-; Make sure that the reverse iterators are calculated using 64bit arithmetic, not 32.
+; PR15882: This test ensures that we do not produce wrapping arithmetic when
+; creating constant reverse step vectors.
;
; int foo(int n, int *A) {
; int sum;
@@ -13,7 +14,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
;
;CHECK-LABEL: @foo(
-;CHECK: <i64 0, i64 -1, i64 -2, i64 -3>
+;CHECK: <i32 0, i32 -1, i32 -2, i32 -3>
;CHECK: ret
define i32 @foo(i32 %n, i32* nocapture %A) {
%1 = icmp sgt i32 %n, 0
OpenPOWER on IntegriCloud