summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoman Lebedev <lebedev.ri@gmail.com>2019-09-24 16:10:50 +0000
committerRoman Lebedev <lebedev.ri@gmail.com>2019-09-24 16:10:50 +0000
commit45fd1e9d503c2f300a7d52ca2c468131c381dceb (patch)
treee76a0a7920d7a81b53ced05ad5219dd78d7f08f3
parent5b881f356cfecd2849fd5063c1fd816e7bbb35f7 (diff)
downloadbcm5719-llvm-45fd1e9d503c2f300a7d52ca2c468131c381dceb.tar.gz
bcm5719-llvm-45fd1e9d503c2f300a7d52ca2c468131c381dceb.zip
[InstCombine] (a+b) < a && (a+b) != 0 -> (0-b) < a iff a/b != 0 (PR43259)
Summary: This is again motivated by D67122 sanitizer check enhancement. That patch seemingly worsens `-fsanitize=pointer-overflow` overhead from 25% to 50%, which strongly implies missing folds. For ``` #include <cassert> char* test(char& base, signed long offset) { __builtin_assume(offset < 0); return &base + offset; } ``` We produce https://godbolt.org/z/r40U47 and again those two icmp's can be merged: ``` Name: 0 Pre: C != 0 %adjusted = add i8 %base, C %not_null = icmp ne i8 %adjusted, 0 %no_underflow = icmp ult i8 %adjusted, %base %r = and i1 %not_null, %no_underflow => %neg_offset = sub i8 0, C %r = icmp ugt i8 %base, %neg_offset ``` https://rise4fun.com/Alive/ALap https://rise4fun.com/Alive/slnN There are 3 other variants of this pattern, i believe they all will go into InstSimplify. https://bugs.llvm.org/show_bug.cgi?id=43259 Reviewers: spatel, xbolva00, nikic Reviewed By: spatel Subscribers: efriedma, hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D67849 llvm-svn: 372768
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp27
-rw-r--r--llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll61
2 files changed, 50 insertions, 38 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 7eb1e980fb7..3eeb2272613 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1061,6 +1061,10 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
!ICmpInst::isEquality(EqPred))
return nullptr;
+ auto IsKnownNonZero = [&](Value *V) {
+ return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ };
+
ICmpInst::Predicate UnsignedPred;
Value *A, *B;
@@ -1071,15 +1075,32 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
+ auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
+ if (!IsKnownNonZero(NonZero))
+ std::swap(NonZero, Other);
+ return IsKnownNonZero(NonZero);
+ };
+
// Given ZeroCmpOp = (A + B)
// ZeroCmpOp <= A && ZeroCmpOp != 0 --> (0-B) < A
// ZeroCmpOp > A || ZeroCmpOp == 0 --> (0-B) >= A
+ //
+ // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
+ // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
+ // with X being the value (A/B) that is known to be non-zero,
+ // and Y being remaining value.
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
IsAnd)
return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
+ if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
+ IsAnd && GetKnownNonZeroAndOther(B, A))
+ return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
!IsAnd)
return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
+ if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
+ !IsAnd && GetKnownNonZeroAndOther(B, A))
+ return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
}
Value *Base, *Offset;
@@ -1094,12 +1115,10 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
if (UnsignedPred == ICmpInst::ICMP_ULT && IsAnd &&
- EqPred == ICmpInst::ICMP_NE &&
- isKnownNonZero(Offset, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_NE && IsKnownNonZero(Offset))
return Builder.CreateICmpUGT(Base, Offset);
if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd &&
- EqPred == ICmpInst::ICMP_EQ &&
- isKnownNonZero(Offset, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_EQ && IsKnownNonZero(Offset))
return Builder.CreateICmpULE(Base, Offset);
}
diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
index 00a37a5822d..5b48b5e30ff 100644
--- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
+++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
@@ -31,10 +31,9 @@ define i1 @t1(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -54,10 +53,9 @@ define i1 @t2(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
@@ -79,9 +77,9 @@ define i1 @t3_oneuse0(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -100,11 +98,11 @@ define i1 @t4_oneuse1(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -149,10 +147,9 @@ define i1 @t6_commutativity0(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -170,10 +167,9 @@ define i1 @t7_commutativity1(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -191,10 +187,9 @@ define i1 @t7_commutativity3(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -214,10 +209,9 @@ define i1 @t8(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -237,10 +231,9 @@ define i1 @t9(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
OpenPOWER on IntegriCloud