summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp27
-rw-r--r--llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll61
2 files changed, 50 insertions, 38 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 7eb1e980fb7..3eeb2272613 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1061,6 +1061,10 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
!ICmpInst::isEquality(EqPred))
return nullptr;
+ auto IsKnownNonZero = [&](Value *V) {
+ return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ };
+
ICmpInst::Predicate UnsignedPred;
Value *A, *B;
@@ -1071,15 +1075,32 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
+ auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
+ if (!IsKnownNonZero(NonZero))
+ std::swap(NonZero, Other);
+ return IsKnownNonZero(NonZero);
+ };
+
// Given ZeroCmpOp = (A + B)
// ZeroCmpOp <= A && ZeroCmpOp != 0 --> (0-B) < A
// ZeroCmpOp > A || ZeroCmpOp == 0 --> (0-B) >= A
+ //
+ // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
+ // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
+ // with X being the value (A/B) that is known to be non-zero,
+ // and Y being remaining value.
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
IsAnd)
return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
+ if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
+ IsAnd && GetKnownNonZeroAndOther(B, A))
+ return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
!IsAnd)
return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
+ if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
+ !IsAnd && GetKnownNonZeroAndOther(B, A))
+ return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
}
Value *Base, *Offset;
@@ -1094,12 +1115,10 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
if (UnsignedPred == ICmpInst::ICMP_ULT && IsAnd &&
- EqPred == ICmpInst::ICMP_NE &&
- isKnownNonZero(Offset, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_NE && IsKnownNonZero(Offset))
return Builder.CreateICmpUGT(Base, Offset);
if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd &&
- EqPred == ICmpInst::ICMP_EQ &&
- isKnownNonZero(Offset, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_EQ && IsKnownNonZero(Offset))
return Builder.CreateICmpULE(Base, Offset);
}
diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
index 00a37a5822d..5b48b5e30ff 100644
--- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
+++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
@@ -31,10 +31,9 @@ define i1 @t1(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -54,10 +53,9 @@ define i1 @t2(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %offset, 0
call void @llvm.assume(i1 %cmp)
@@ -79,9 +77,9 @@ define i1 @t3_oneuse0(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -100,11 +98,11 @@ define i1 @t4_oneuse1(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -149,10 +147,9 @@ define i1 @t6_commutativity0(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -170,10 +167,9 @@ define i1 @t7_commutativity1(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -191,10 +187,9 @@ define i1 @t7_commutativity3(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -214,10 +209,9 @@ define i1 @t8(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
@@ -237,10 +231,9 @@ define i1 @t9(i8 %base, i8 %offset) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%cmp = icmp slt i8 %base, 0
call void @llvm.assume(i1 %cmp)
OpenPOWER on IntegriCloud