diff options
| author | Roman Lebedev <lebedev.ri@gmail.com> | 2019-09-24 16:10:38 +0000 | 
|---|---|---|
| committer | Roman Lebedev <lebedev.ri@gmail.com> | 2019-09-24 16:10:38 +0000 | 
| commit | 5b881f356cfecd2849fd5063c1fd816e7bbb35f7 (patch) | |
| tree | 3e96716d5a7c5857c936ba119a436d5a72d7acf3 | |
| parent | 682d41a506dec31c24ea7c5e3440155ef8f6110b (diff) | |
| download | bcm5719-llvm-5b881f356cfecd2849fd5063c1fd816e7bbb35f7.tar.gz bcm5719-llvm-5b881f356cfecd2849fd5063c1fd816e7bbb35f7.zip  | |
[InstCombine] (a+b) <= a && (a+b) != 0 -> (0-b) < a (PR43259)
Summary:
This is again motivated by D67122 sanitizer check enhancement.
That patch seemingly worsens `-fsanitize=pointer-overflow`
overhead from 25% to 50%, which strongly implies missing folds.
This pattern isn't exactly what we get there
(strict vs. non-strict predicate), but this pattern does not
require known-bits analysis, so it is best to handle it first.
```
Name: 0
  %adjusted = add i8 %base, %offset
  %not_null = icmp ne i8 %adjusted, 0
  %no_underflow = icmp ule i8 %adjusted, %base
  %r = and i1 %not_null, %no_underflow
=>
  %neg_offset = sub i8 0, %offset
  %r = icmp ugt i8 %base, %neg_offset
```
https://rise4fun.com/Alive/knp
There are 3 other variants of this pattern,
they all will go into InstSimplify:
https://rise4fun.com/Alive/bIDZ
https://bugs.llvm.org/show_bug.cgi?id=43259
Reviewers: spatel, xbolva00, nikic
Reviewed By: spatel
Subscribers: hiraditya, majnemer, vsk, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67846
llvm-svn: 372767
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp | 23 | ||||
| -rw-r--r-- | llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll | 47 | 
2 files changed, 42 insertions, 28 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index cde4cc1e842..7eb1e980fb7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1061,12 +1061,31 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,        !ICmpInst::isEquality(EqPred))      return nullptr; +  ICmpInst::Predicate UnsignedPred; + +  Value *A, *B; +  if (match(UnsignedICmp, +            m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) && +      match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) && +      (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) { +    if (UnsignedICmp->getOperand(0) != ZeroCmpOp) +      UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); + +    // Given  ZeroCmpOp = (A + B) +    //   ZeroCmpOp <= A && ZeroCmpOp != 0  -->  (0-B) <  A +    //   ZeroCmpOp >  A || ZeroCmpOp == 0  -->  (0-B) >= A +    if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && +        IsAnd) +      return Builder.CreateICmpULT(Builder.CreateNeg(B), A); +    if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && +        !IsAnd) +      return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); +  } +    Value *Base, *Offset;    if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset))))      return nullptr; -  ICmpInst::Predicate UnsignedPred; -    // ZeroCmpOp <  Base && ZeroCmpOp != 0  --> Base >  Offset  iff Offset != 0    // ZeroCmpOp >= Base || ZeroCmpOp == 0  --> Base <= Base    iff Offset != 0    if (match(UnsignedICmp, diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll index de4fd42b5bf..b8a29e4fab2 100644 --- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll +++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll @@ -10,10 +10,9 @@ define i1 @t0(i8 %base, i8 %offset) {  ; CHECK-LABEL: @t0(  ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted) @@ -30,9 +29,9 @@ define i1 @t1_oneuse0(i8 %base, i8 %offset) {  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])  ; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0  ; CHECK-NEXT:    call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted) @@ -46,11 +45,11 @@ define i1 @t2_oneuse1(i8 %base, i8 %offset) {  ; CHECK-LABEL: @t2_oneuse1(  ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0  ; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]  ; CHECK-NEXT:    call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted) @@ -85,10 +84,9 @@ define i1 @t4_commutativity0(i8 %base, i8 %offset) {  ; CHECK-LABEL: @t4_commutativity0(  ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted) @@ -101,10 +99,9 @@ define i1 @t5_commutativity1(i8 %base, i8 %offset) {  ; CHECK-LABEL: @t5_commutativity1(  ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted) @@ -117,10 +114,9 @@ define i1 @t6_commutativity3(i8 %base, i8 %offset) {  ; CHECK-LABEL: @t6_commutativity3(  ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted) @@ -152,10 +148,9 @@ define i1 @t8(i8 %base, i8 %offset) {  ; CHECK-LABEL: @t8(  ; CHECK-NEXT:    [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]  ; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT:    [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT:    [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[OFFSET]] -; CHECK-NEXT:    [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT:    ret i1 [[R]] +; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 0, [[BASE]] +; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT:    ret i1 [[TMP2]]  ;    %adjusted = add i8 %base, %offset    call void @use8(i8 %adjusted)  | 

