summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorRoman Lebedev <lebedev.ri@gmail.com>2019-09-25 19:06:26 +0000
committerRoman Lebedev <lebedev.ri@gmail.com>2019-09-25 19:06:26 +0000
commitdfda7d2d90df4034cb9d28f178dbbef93a57abe0 (patch)
treedc3c9c42bcea0d4f90d7295f268f20258091ca3f /llvm
parentefcad77431e10b615469595eff301804a5027f18 (diff)
downloadbcm5719-llvm-dfda7d2d90df4034cb9d28f178dbbef93a57abe0.tar.gz
bcm5719-llvm-dfda7d2d90df4034cb9d28f178dbbef93a57abe0.zip
[NFC][InstCombine] Add tests for (X - Y) < X --> Y <= X iff Y != 0
https://rise4fun.com/Alive/KtL This should go to InstCombiner::foldICmpBinO(), next to "Convert sub-with-unsigned-overflow comparisons into a comparison of args." llvm-svn: 372911
Diffstat (limited to 'llvm')
-rw-r--r--llvm/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll111
1 files changed, 111 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll b/llvm/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll
new file mode 100644
index 00000000000..ceef17a3737
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/strict-sub-underflow-check-to-comparison-of-sub-operands.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+declare void @llvm.assume(i1)
+declare i8 @gen8()
+declare void @use8(i8)
+
+define i1 @t0(i8 %base, i8 %offset) {
+; CHECK-LABEL: @t0(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
+; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
+; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %cmp = icmp slt i8 %offset, 0
+ call void @llvm.assume(i1 %cmp)
+
+ %adjusted = sub i8 %base, %offset
+ call void @use8(i8 %adjusted)
+ %res = icmp ult i8 %adjusted, %base
+ ret i1 %res
+}
+define i1 @t1(i8 %base, i8 %offset) {
+; CHECK-LABEL: @t1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
+; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
+; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %cmp = icmp slt i8 %offset, 0
+ call void @llvm.assume(i1 %cmp)
+
+ %adjusted = sub i8 %base, %offset
+ call void @use8(i8 %adjusted)
+ %res = icmp uge i8 %adjusted, %base
+ ret i1 %res
+}
+define i1 @t2(i8 %offset) {
+; CHECK-LABEL: @t2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
+; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
+; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
+; CHECK-NEXT: [[RES:%.*]] = icmp ugt i8 [[BASE]], [[ADJUSTED]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %cmp = icmp slt i8 %offset, 0
+ call void @llvm.assume(i1 %cmp)
+
+ %base = call i8 @gen8()
+ %adjusted = sub i8 %base, %offset
+ call void @use8(i8 %adjusted)
+ %res = icmp ugt i8 %base, %adjusted
+ ret i1 %res
+}
+define i1 @t3(i8 %offset) {
+; CHECK-LABEL: @t3(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
+; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
+; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
+; CHECK-NEXT: [[RES:%.*]] = icmp ule i8 [[BASE]], [[ADJUSTED]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %cmp = icmp slt i8 %offset, 0
+ call void @llvm.assume(i1 %cmp)
+
+ %base = call i8 @gen8()
+ %adjusted = sub i8 %base, %offset
+ call void @use8(i8 %adjusted)
+ %res = icmp ule i8 %base, %adjusted
+ ret i1 %res
+}
+
+; Here we don't know that offset is non-zero. Can't fold.
+define i1 @n4_maybezero(i8 %base, i8 %offset) {
+; CHECK-LABEL: @n4_maybezero(
+; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
+; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
+; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %adjusted = sub i8 %base, %offset
+ call void @use8(i8 %adjusted)
+ %res = icmp ult i8 %adjusted, %base
+ ret i1 %res
+}
+; We need to know that about %offset, %base won't do. Can't fold.
+define i1 @n5_wrongnonzero(i8 %base, i8 %offset) {
+; CHECK-LABEL: @n5_wrongnonzero(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[BASE:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET:%.*]]
+; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
+; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: ret i1 [[RES]]
+;
+ %cmp = icmp sgt i8 %base, 0
+ call void @llvm.assume(i1 %cmp)
+
+ %adjusted = sub i8 %base, %offset
+ call void @use8(i8 %adjusted)
+ %res = icmp ult i8 %adjusted, %base
+ ret i1 %res
+}
OpenPOWER on IntegriCloud