summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorWei Mi <wmi@google.com>2016-11-15 17:34:52 +0000
committerWei Mi <wmi@google.com>2016-11-15 17:34:52 +0000
commitd2948cef705997c38799a2cc8cba15d809fbf364 (patch)
tree66e0652d285b810c4eaf8f4ae173114b183d9616 /llvm/test
parent6d279e8e98f83dc1ed8769b1be2a846daef2e33d (diff)
downloadbcm5719-llvm-d2948cef705997c38799a2cc8cba15d809fbf364.tar.gz
bcm5719-llvm-d2948cef705997c38799a2cc8cba15d809fbf364.zip
[IndVars] Change the order to compute WidenAddRec in widenIVUse.
When both WidenIV::getWideRecurrence and WidenIV::getExtendedOperandRecurrence return non-null but different WideAddRec, if getWideRecurrence is called before getExtendedOperandRecurrence, we won't bother to call getExtendedOperandRecurrence again. But As we know it is possible that after SCEV folding, we cannot prove the legality using the SCEVAddRecExpr returned by getWideRecurrence. Meanwhile if getExtendedOperandRecurrence returns non-null WideAddRec, we know for sure that it is legal to do widening for current instruction. So it is better to put getExtendedOperandRecurrence before getWideRecurrence, which will increase the chance of successful widening. Differential Revision: https://reviews.llvm.org/D26059 llvm-svn: 286987
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll9
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-widen.ll56
2 files changed, 63 insertions, 2 deletions
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll b/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll
index 02c5f668734..8cb3f18c3d2 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll
@@ -21,8 +21,11 @@ define void @foo(i32* %A, i32* %B, i32* %C, i32 %N) {
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* %C, i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
+; CHECK-NEXT: [[TRUNC0:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT: [[DIV0:%.*]] = udiv i32 5, [[TRUNC0]]
+; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD3]], [[DIV0]]
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* %A, i64 [[INDVARS_IV]]
-; CHECK-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX5]], align 4
+; CHECK-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX5]], align 4
; CHECK-NEXT: br label %for.inc
; CHECK: for.inc:
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -51,9 +54,11 @@ for.body: ; preds = %for.body.lr.ph, %fo
%arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %idxprom1
%1 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %0, %1
+ %div0 = udiv i32 5, %add
+ %add4 = add nsw i32 %add3, %div0
%idxprom4 = zext i32 %i.02 to i64
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
- store i32 %add3, i32* %arrayidx5, align 4
+ store i32 %add4, i32* %arrayidx5, align 4
br label %for.inc
for.inc: ; preds = %for.body
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-widen.ll b/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
index bf635903fdf..aa69da4b505 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
@@ -69,3 +69,59 @@ define void @loop_1(i32 %lim) {
declare void @dummy(i32)
declare void @dummy.i64(i64)
+
+
+define void @loop_2(i32 %size, i32 %nsteps, i32 %hsize, i32* %lined, i8 %tmp1) {
+; CHECK-LABEL: @loop_2(
+entry:
+ %cmp215 = icmp sgt i32 %size, 1
+ %tmp0 = bitcast i32* %lined to i8*
+ br label %for.body
+
+for.body:
+ %j = phi i32 [ 0, %entry ], [ %inc6, %for.inc ]
+ %mul = mul nsw i32 %j, %size
+ %add = add nsw i32 %mul, %hsize
+ br i1 %cmp215, label %for.body2, label %for.inc
+
+; check that the induction variable of the inner loop has been widened after indvars.
+; CHECK: [[INNERLOOPINV:%[^ ]+]] = add nsw i64
+; CHECK: for.body2:
+; CHECK-NEXT: %indvars.iv = phi i64 [ 1, %for.body2.preheader ], [ %indvars.iv.next, %for.body2 ]
+; CHECK-NEXT: [[WIDENED:%[^ ]+]] = add nsw i64 [[INNERLOOPINV]], %indvars.iv
+; CHECK-NEXT: %add.ptr = getelementptr inbounds i8, i8* %tmp0, i64 [[WIDENED]]
+for.body2:
+ %k = phi i32 [ %inc, %for.body2 ], [ 1, %for.body ]
+ %add4 = add nsw i32 %add, %k
+ %idx.ext = sext i32 %add4 to i64
+ %add.ptr = getelementptr inbounds i8, i8* %tmp0, i64 %idx.ext
+ store i8 %tmp1, i8* %add.ptr, align 1
+ %inc = add nsw i32 %k, 1
+ %cmp2 = icmp slt i32 %inc, %size
+ br i1 %cmp2, label %for.body2, label %for.body3
+
+; check that the induction variable of the inner loop has been widened after indvars.
+; CHECK: for.body3.preheader:
+; CHECK: [[INNERLOOPINV:%[^ ]+]] = zext i32
+; CHECK: for.body3:
+; CHECK-NEXT: %indvars.iv2 = phi i64 [ 1, %for.body3.preheader ], [ %indvars.iv.next3, %for.body3 ]
+; CHECK-NEXT: [[WIDENED:%[^ ]+]] = add nuw nsw i64 [[INNERLOOPINV]], %indvars.iv2
+; CHECK-NEXT: %add.ptr2 = getelementptr inbounds i8, i8* %tmp0, i64 [[WIDENED]]
+for.body3:
+ %l = phi i32 [ %inc2, %for.body3 ], [ 1, %for.body2 ]
+ %add5 = add nuw i32 %add, %l
+ %idx.ext2 = zext i32 %add5 to i64
+ %add.ptr2 = getelementptr inbounds i8, i8* %tmp0, i64 %idx.ext2
+ store i8 %tmp1, i8* %add.ptr2, align 1
+ %inc2 = add nsw i32 %l, 1
+ %cmp3 = icmp slt i32 %inc2, %size
+ br i1 %cmp3, label %for.body3, label %for.inc
+
+for.inc:
+ %inc6 = add nsw i32 %j, 1
+ %cmp = icmp slt i32 %inc6, %nsteps
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit:
+ ret void
+}
OpenPOWER on IntegriCloud