summaryrefslogtreecommitdiffstats
path: root/llvm/test/Analysis/ScalarEvolution/fold.ll
diff options
context:
space:
mode:
authorNick Lewycky <nicholas@mxc.ca>2014-01-27 10:04:03 +0000
committerNick Lewycky <nicholas@mxc.ca>2014-01-27 10:04:03 +0000
commit31eaca5513e9b77069f1d8390a4aba8657da4246 (patch)
tree1a47a44a95c21095003f1b30e2b4801163adb9b6 /llvm/test/Analysis/ScalarEvolution/fold.ll
parent55139555c4ec5a8c02a334b5a729c09b8787bbcd (diff)
downloadbcm5719-llvm-31eaca5513e9b77069f1d8390a4aba8657da4246.tar.gz
bcm5719-llvm-31eaca5513e9b77069f1d8390a4aba8657da4246.zip
Teach SCEV to handle more cases of 'and X, CST', specifically where CST is any number of contiguous 1 bits in a row, with any number of leading and trailing 0 bits.
Unfortunately, this in turn led to some lower quality SCEVs due to some different paths through expression simplification, so add getUDivExactExpr and use it. This fixes all instances of the problems that I found, but we can make that function smarter as necessary. Merge test "xor-and.ll" into "and-xor.ll" since I needed to update it anyways. Test 'nsw-offset.ll' analyzes a little deeper, %n now gets a scev in terms of %no instead of a SCEVUnknown. llvm-svn: 200203
Diffstat (limited to 'llvm/test/Analysis/ScalarEvolution/fold.ll')
-rw-r--r--llvm/test/Analysis/ScalarEvolution/fold.ll17
1 files changed, 17 insertions, 0 deletions
diff --git a/llvm/test/Analysis/ScalarEvolution/fold.ll b/llvm/test/Analysis/ScalarEvolution/fold.ll
index 57006dd9bb4..84b657050c5 100644
--- a/llvm/test/Analysis/ScalarEvolution/fold.ll
+++ b/llvm/test/Analysis/ScalarEvolution/fold.ll
@@ -60,3 +60,20 @@ loop:
exit:
ret void
}
+
+define void @test5(i32 %i) {
+; CHECK-LABEL: @test5
+ %A = and i32 %i, 1
+; CHECK: --> (zext i1 (trunc i32 %i to i1) to i32)
+ %B = and i32 %i, 2
+; CHECK: --> (2 * (zext i1 (trunc i32 (%i /u 2) to i1) to i32))
+ %C = and i32 %i, 63
+; CHECK: --> (zext i6 (trunc i32 %i to i6) to i32)
+ %D = and i32 %i, 126
+; CHECK: --> (2 * (zext i6 (trunc i32 (%i /u 2) to i6) to i32))
+ %E = and i32 %i, 64
+; CHECK: --> (64 * (zext i1 (trunc i32 (%i /u 64) to i1) to i32))
+ %F = and i32 %i, -2147483648
+; CHECK: --> (-2147483648 * (%i /u -2147483648))
+ ret void
+}
OpenPOWER on IntegriCloud