summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicola Zaghen <nicola.zaghen@imgtec.com>2018-09-04 10:29:48 +0000
committerNicola Zaghen <nicola.zaghen@imgtec.com>2018-09-04 10:29:48 +0000
commit9588ad9611368d65a6f619ae5e79fff70854a1fc (patch)
treef51068c83cdebf1b82bf5bdfd2512383917f2fb0
parentef16ea7f7ade8dbe3647e7c829a13d1dbeee0f0e (diff)
downloadbcm5719-llvm-9588ad9611368d65a6f619ae5e79fff70854a1fc.tar.gz
bcm5719-llvm-9588ad9611368d65a6f619ae5e79fff70854a1fc.zip
[InstCombine] Fold icmp ugt/ult (add nuw X, C2), C --> icmp ugt/ult X, (C - C2)
Support for sgt/slt was added in rL294898, this adds the same cases also for unsigned compares. This is the Alive proof: https://rise4fun.com/Alive/nyY Differential Revision: https://reviews.llvm.org/D50972 llvm-svn: 341353
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp13
-rwxr-xr-xllvm/test/Analysis/ValueTracking/non-negative-phi-bits.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-add.ll43
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check.ll2
6 files changed, 49 insertions, 19 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 2926e268190..e0a1083b927 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2338,12 +2338,15 @@ Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
CmpInst::Predicate Pred = Cmp.getPredicate();
// If the add does not wrap, we can always adjust the compare by subtracting
- // the constants. Equality comparisons are handled elsewhere. SGE/SLE are
- // canonicalized to SGT/SLT.
- if (Add->hasNoSignedWrap() &&
- (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) {
+ // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
+ // are canonicalized to SGT/SLT/UGT/ULT.
+ if ((Add->hasNoSignedWrap() &&
+ (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
+ (Add->hasNoUnsignedWrap() &&
+ (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
bool Overflow;
- APInt NewC = C.ssub_ov(*C2, Overflow);
+ APInt NewC =
+ Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
// If there is overflow, the result must be true or false.
// TODO: Can we assert there is no overflow because InstSimplify always
// handles those cases?
diff --git a/llvm/test/Analysis/ValueTracking/non-negative-phi-bits.ll b/llvm/test/Analysis/ValueTracking/non-negative-phi-bits.ll
index 059bbaa3c4e..3b1c43df5a7 100755
--- a/llvm/test/Analysis/ValueTracking/non-negative-phi-bits.ll
+++ b/llvm/test/Analysis/ValueTracking/non-negative-phi-bits.ll
@@ -8,7 +8,7 @@ define void @test() #0 {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 40
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[INDVARS_IV]], 39
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll
index 49517e580ef..901f98c8295 100644
--- a/llvm/test/Transforms/InstCombine/icmp-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-add.ll
@@ -283,8 +283,7 @@ define i1 @slt_zero_add_nuw_signbit(i8 %x) {
define i1 @reduce_add_ult(i32 %in) {
; CHECK-LABEL: @reduce_add_ult(
-; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
-; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[A6]], 12
+; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 9
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
@@ -294,8 +293,7 @@ define i1 @reduce_add_ult(i32 %in) {
define i1 @reduce_add_ugt(i32 %in) {
; CHECK-LABEL: @reduce_add_ugt(
-; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
-; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 12
+; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 9
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
@@ -305,8 +303,7 @@ define i1 @reduce_add_ugt(i32 %in) {
define i1 @reduce_add_ule(i32 %in) {
; CHECK-LABEL: @reduce_add_ule(
-; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
-; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[A6]], 13
+; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 10
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
@@ -316,11 +313,41 @@ define i1 @reduce_add_ule(i32 %in) {
define i1 @reduce_add_uge(i32 %in) {
; CHECK-LABEL: @reduce_add_uge(
-; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
-; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 11
+; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 8
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
%a18 = icmp uge i32 %a6, 12
ret i1 %a18
}
+
+define i1 @ult_add_ssubov(i32 %in) {
+; CHECK-LABEL: @ult_add_ssubov(
+; CHECK-NEXT: ret i1 false
+;
+ %a6 = add nuw i32 %in, 71
+ %a18 = icmp ult i32 %a6, 3
+ ret i1 %a18
+}
+
+define i1 @ult_add_nonuw(i8 %in) {
+; CHECK-LABEL: @ult_add_nonuw(
+; CHECK-NEXT: [[A6:%.*]] = add i8 [[IN:%.*]], 71
+; CHECK-NEXT: [[A18:%.*]] = icmp ult i8 [[A6]], 12
+; CHECK-NEXT: ret i1 [[A18]]
+;
+ %a6 = add i8 %in, 71
+ %a18 = icmp ult i8 %a6, 12
+ ret i1 %a18
+}
+
+define i1 @uge_add_nonuw(i32 %in) {
+; CHECK-LABEL: @uge_add_nonuw(
+; CHECK-NEXT: [[A6:%.*]] = add i32 [[IN:%.*]], 3
+; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 11
+; CHECK-NEXT: ret i1 [[A18]]
+;
+ %a6 = add i32 %in, 3
+ %a18 = icmp uge i32 %a6, 12
+ ret i1 %a18
+}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll
index 8e948639ba1..b3387435f2b 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll
@@ -1878,7 +1878,7 @@ define void @foo4(double* %A, double* %B, i32* %trigger) {
; AVX1-NEXT: br label [[FOR_INC]]
; AVX1: for.inc:
; AVX1-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 16
-; AVX1-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
+; AVX1-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV]], 9984
; AVX1-NEXT: br i1 [[CMP]], label [[FOR_BODY_1:%.*]], label [[FOR_END:%.*]]
; AVX1: for.end:
; AVX1-NEXT: ret void
@@ -1920,7 +1920,7 @@ define void @foo4(double* %A, double* %B, i32* %trigger) {
; AVX2-NEXT: br label [[FOR_INC]]
; AVX2: for.inc:
; AVX2-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 16
-; AVX2-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
+; AVX2-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV]], 9984
; AVX2-NEXT: br i1 [[CMP]], label [[FOR_BODY_1:%.*]], label [[FOR_END:%.*]]
; AVX2: for.end:
; AVX2-NEXT: ret void
@@ -2119,7 +2119,7 @@ define void @foo4(double* %A, double* %B, i32* %trigger) {
; AVX512-NEXT: br label [[FOR_INC_3]]
; AVX512: for.inc.3:
; AVX512-NEXT: [[INDVARS_IV_NEXT_3]] = add nsw i64 [[INDVARS_IV]], 64
-; AVX512-NEXT: [[CMP_3:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT_3]], 10000
+; AVX512-NEXT: [[CMP_3:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT_2]], 9984
; AVX512-NEXT: br i1 [[CMP_3]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop !52
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
index 311d92b94af..458e350a5e0 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -12,7 +12,7 @@ define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 3
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[N]], -1
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
index 88489faa831..de43a613c09 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
@@ -18,7 +18,7 @@ define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtab
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1, !dbg !9
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg !9
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1, !dbg !9
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4, !dbg !9
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 3, !dbg !9
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]], !dbg !9
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[N]], -1, !dbg !9
OpenPOWER on IntegriCloud