summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorAndrew Trick <atrick@apple.com>2011-08-06 07:00:37 +0000
committerAndrew Trick <atrick@apple.com>2011-08-06 07:00:37 +0000
commit6d45a01b67df7e4e1b88bec8bc724a46caefb1a5 (patch)
tree6784eba0d0e3e086f993136bd24712f2e0b41ece /llvm/test
parent54d456758e3eba90f28750d8f404c15081a3b723 (diff)
downloadbcm5719-llvm-6d45a01b67df7e4e1b88bec8bc724a46caefb1a5.tar.gz
bcm5719-llvm-6d45a01b67df7e4e1b88bec8bc724a46caefb1a5.zip
Made SCEV's UDiv expressions more canonical. When dividing a
recurrence, the initial values low bits can sometimes be ignored. To take advantage of this, added FoldIVUser to IndVarSimplify to fold an IV operand into a udiv/lshr if the operator doesn't affect the result. -indvars -disable-iv-rewrite now transforms i = phi i4 i1 = i0 + 1 idx = i1 >> (2 or more) i4 = i + 4 into i = phi i4 idx = i0 >> ... i4 = i + 4 llvm-svn: 137013
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-fold.ll56
1 files changed, 56 insertions, 0 deletions
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-fold.ll b/llvm/test/Transforms/IndVarSimplify/iv-fold.ll
new file mode 100644
index 00000000000..7e11cdf098b
--- /dev/null
+++ b/llvm/test/Transforms/IndVarSimplify/iv-fold.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -indvars -disable-iv-rewrite -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n:32:64"
+
+; Indvars should be able to fold IV increments into shr when low bits are zero.
+;
+; CHECK: @foldIncShr
+; CHECK: shr.1 = lshr i32 %0, 5
+define i32 @foldIncShr(i32* %bitmap, i32 %bit_addr, i32 %nbits) nounwind {
+entry:
+ br label %while.body
+
+while.body:
+ %0 = phi i32 [ 0, %entry ], [ %inc.2, %while.body ]
+ %shr = lshr i32 %0, 5
+ %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr
+ %tmp6 = load i32* %arrayidx, align 4
+ %inc.1 = add i32 %0, 1
+ %shr.1 = lshr i32 %inc.1, 5
+ %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1
+ %tmp6.1 = load i32* %arrayidx.1, align 4
+ %inc.2 = add i32 %inc.1, 1
+ %exitcond.3 = icmp eq i32 %inc.2, 128
+ br i1 %exitcond.3, label %while.end, label %while.body
+
+while.end:
+ %r = add i32 %tmp6, %tmp6.1
+ ret i32 %r
+}
+
+; Invdars should not fold an increment into shr unless 2^shiftBits is
+; a multiple of the recurrence step.
+;
+; CHECK: @noFoldIncShr
+; CHECK: shr.1 = lshr i32 %inc.1, 5
+define i32 @noFoldIncShr(i32* %bitmap, i32 %bit_addr, i32 %nbits) nounwind {
+entry:
+ br label %while.body
+
+while.body:
+ %0 = phi i32 [ 0, %entry ], [ %inc.3, %while.body ]
+ %shr = lshr i32 %0, 5
+ %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr
+ %tmp6 = load i32* %arrayidx, align 4
+ %inc.1 = add i32 %0, 1
+ %shr.1 = lshr i32 %inc.1, 5
+ %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1
+ %tmp6.1 = load i32* %arrayidx.1, align 4
+ %inc.3 = add i32 %inc.1, 2
+ %exitcond.3 = icmp eq i32 %inc.3, 96
+ br i1 %exitcond.3, label %while.end, label %while.body
+
+while.end:
+ %r = add i32 %tmp6, %tmp6.1
+ ret i32 %r
+}
OpenPOWER on IntegriCloud