diff options
Diffstat (limited to 'llvm/test/Transforms/InstCombine')
6 files changed, 854 insertions, 8 deletions
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll new file mode 100644 index 00000000000..b39f2bb8bec --- /dev/null +++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll @@ -0,0 +1,236 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; Should fold +; (%x + %y) u>= %x +; or +; (%x + %y) u>= %y +; to +; @llvm.uadd.with.overflow(%x, %y) + extractvalue + not +; +; All tests here have extra uses, to ensure that the pattern isn't perturbed. + +declare void @use8(i8) +declare void @use2x8(<2 x i8>) + +define i1 @t0_basic(i8 %x, i8 %y) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp uge i8 %t0, %y + ret i1 %r +} + +define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @t1_vec( +; CHECK-NEXT: [[T0:%.*]] = add <2 x i8> [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[Y]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %t0 = add <2 x i8> %x, %y + call void @use2x8(<2 x i8> %t0) + %r = icmp uge <2 x i8> %t0, %y + ret <2 x i1> %r +} + +; Commutativity + +define i1 @t2_symmetry(i8 %x, i8 %y) { +; CHECK-LABEL: @t2_symmetry( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp uge i8 %t0, %x ; can check against either of `add` arguments + ret i1 %r +} + +declare i8 @gen8() + +define i1 @t3_commutative(i8 %x) { +; CHECK-LABEL: @t3_commutative( +; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[T0:%.*]] = add i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %y = call i8 @gen8() + %t0 = add i8 %y, %x ; swapped + call void @use8(i8 %t0) + %r = icmp uge i8 %t0, %y + ret i1 %r +} + +define i1 @t4_commutative() { +; CHECK-LABEL: @t4_commutative( +; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X]], [[Y]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[Y]], [[T0]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = call i8 @gen8() + %y = call i8 @gen8() + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ule i8 %y, %t0 ; swapped + ret i1 %r +} + +define i1 @t5_commutative(i8 %x) { +; CHECK-LABEL: @t5_commutative( +; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[T0:%.*]] = add i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[Y]], [[T0]] +; CHECK-NEXT: ret i1 [[R]] +; + %y = call i8 @gen8() + %t0 = add i8 %y, %x ; swapped + call void @use8(i8 %t0) + %r = icmp ule i8 %y, %t0 ; swapped + ret i1 %r +} + +; Extra-use tests + +define i1 @t6_no_extrause(i8 %x, i8 %y) { +; CHECK-LABEL: @t6_no_extrause( +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + %r = icmp uge i8 %t0, %y + ret i1 %r +} + +; Negative tests + +define i1 @n7_different_y(i8 %x, i8 %y0, i8 %y1) { +; CHECK-LABEL: @n7_different_y( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y0:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[Y1:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y0 + call void @use8(i8 %t0) + %r = icmp uge i8 %t0, %y1 + ret i1 %r +} + +define i1 @n8_wrong_pred0(i8 %x, i8 %y) { +; CHECK-LABEL: @n8_wrong_pred0( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ule i8 %t0, %y + ret i1 %r +} + +define i1 @n9_wrong_pred1(i8 %x, i8 %y) { +; CHECK-LABEL: @n9_wrong_pred1( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ugt i8 %t0, %y + ret i1 %r +} + +define i1 @n10_wrong_pred2(i8 %x, i8 %y) { +; CHECK-LABEL: @n10_wrong_pred2( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp eq i8 %t0, %y + ret i1 %r +} + +define i1 @n11_wrong_pred3(i8 %x, i8 %y) { +; CHECK-LABEL: @n11_wrong_pred3( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ne i8 %t0, %y + ret i1 %r +} + +define i1 @n12_wrong_pred4(i8 %x, i8 %y) { +; CHECK-LABEL: @n12_wrong_pred4( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp slt i8 %t0, %y + ret i1 %r +} + +define i1 @n13_wrong_pred5(i8 %x, i8 %y) { +; CHECK-LABEL: @n13_wrong_pred5( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp sle i8 %t0, %y + ret i1 %r +} + +define i1 @n14_wrong_pred6(i8 %x, i8 %y) { +; CHECK-LABEL: @n14_wrong_pred6( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp sgt i8 %t0, %y + ret i1 %r +} + +define i1 @n15_wrong_pred7(i8 %x, i8 %y) { +; CHECK-LABEL: @n15_wrong_pred7( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp sge i8 %t0, %y + ret i1 %r +} diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll new file mode 100644 index 00000000000..f48bd6c9abc --- /dev/null +++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll @@ -0,0 +1,189 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; Should fold +; (%y ^ -1) u>= %x +; to +; @llvm.uadd.with.overflow(%x, %y) + extractvalue + not +; +; All tests here have extra uses, to ensure that the pattern isn't perturbed. + +declare void @use8(i8) +declare void @use2x8(<2 x i8>) + +define i1 @t0_basic(i8 %x, i8 %y) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp uge i8 %t0, %x + ret i1 %r +} + +define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @t1_vec( +; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1> +; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %t0 = xor <2 x i8> %y, <i8 -1, i8 -1> + call void @use2x8(<2 x i8> %t0) + %r = icmp uge <2 x i8> %t0, %x + ret <2 x i1> %r +} + +; Commutativity + +declare i8 @gen8() + +define i1 @t2_commutative(i8 %y) { +; CHECK-LABEL: @t2_commutative( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[T0]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %x = call i8 @gen8() + %r = icmp ule i8 %x, %t0 ; swapped + ret i1 %r +} + +; Extra-use tests + +define i1 @t3_no_extrause(i8 %x, i8 %y) { +; CHECK-LABEL: @t3_no_extrause( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + %r = icmp uge i8 %t0, %x + ret i1 %r +} + +; Negative tests + +define i1 @n4_wrong_pred0(i8 %x, i8 %y) { +; CHECK-LABEL: @n4_wrong_pred0( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ule i8 %t0, %x + ret i1 %r +} + +define i1 @n5_wrong_pred1(i8 %x, i8 %y) { +; CHECK-LABEL: @n5_wrong_pred1( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ugt i8 %t0, %x + ret i1 %r +} + +define i1 @n6_wrong_pred2(i8 %x, i8 %y) { +; CHECK-LABEL: @n6_wrong_pred2( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp eq i8 %t0, %x + ret i1 %r +} + +define i1 @n7_wrong_pred3(i8 %x, i8 %y) { +; CHECK-LABEL: @n7_wrong_pred3( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ne i8 %t0, %x + ret i1 %r +} + +define i1 @n8_wrong_pred4(i8 %x, i8 %y) { +; CHECK-LABEL: @n8_wrong_pred4( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp slt i8 %t0, %x + ret i1 %r +} + +define i1 @n9_wrong_pred5(i8 %x, i8 %y) { +; CHECK-LABEL: @n9_wrong_pred5( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp sle i8 %t0, %x + ret i1 %r +} + +define i1 @n10_wrong_pred6(i8 %x, i8 %y) { +; CHECK-LABEL: @n10_wrong_pred6( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp sgt i8 %t0, %x + ret i1 %r +} + +define i1 @n11_wrong_pred7(i8 %x, i8 %y) { +; CHECK-LABEL: @n11_wrong_pred7( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp sge i8 %t0, %x + ret i1 %r +} + +define <2 x i1> @n12_vec_nonsplat(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @n12_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -2> +; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %t0 = xor <2 x i8> %y, <i8 -1, i8 -2> ; must be -1. + call void @use2x8(<2 x i8> %t0) + %r = icmp uge <2 x i8> %t0, %x + ret <2 x i1> %r +} diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll index 30e7d541615..29c195f3a6c 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt %s -instcombine -S | FileCheck %s -; Fold +; Should fold ; (%x + %y) u>= %x ; or ; (%x + %y) u>= %y ; to -; @llvm.add.with.overflow(%x, %y) + extractvalue + not +; @llvm.uadd.with.overflow(%x, %y) + extractvalue + not define i1 @t0_basic(i8 %x, i8 %y) { ; CHECK-LABEL: @t0_basic( @@ -86,8 +86,8 @@ define i1 @t5_commutative(i8 %x) { declare void @use8(i8) -define i1 @t6_extrause0(i8 %x, i8 %y) { -; CHECK-LABEL: @t6_extrause0( +define i1 @t6_extrause(i8 %x, i8 %y) { +; CHECK-LABEL: @t6_extrause( ; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[Y]] diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll new file mode 100644 index 00000000000..a614142ef45 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll @@ -0,0 +1,232 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; Should fold +; (%x + %y) u< %x +; or +; (%x + %y) u< %y +; to +; @llvm.uadd.with.overflow(%x, %y) + extractvalue +; +; All tests here have extra uses, to ensure that the pattern isn't perturbed. + +declare void @use8(i8) +declare void @use2x8(<2 x i8>) + +define i1 @t0_basic(i8 %x, i8 %y) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ult i8 %t0, %y + ret i1 %r +} + +define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @t1_vec( +; CHECK-NEXT: [[T0:%.*]] = add <2 x i8> [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T0]], [[Y]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %t0 = add <2 x i8> %x, %y + call void @use2x8(<2 x i8> %t0) + %r = icmp ult <2 x i8> %t0, %y + ret <2 x i1> %r +} + +; Commutativity + +define i1 @t2_symmetry(i8 %x, i8 %y) { +; CHECK-LABEL: @t2_symmetry( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ult i8 %t0, %x ; can check against either of `add` arguments + ret i1 %r +} + +declare i8 @gen8() + +define i1 @t3_commutative(i8 %x) { +; CHECK-LABEL: @t3_commutative( +; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[T0:%.*]] = add i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %y = call i8 @gen8() + %t0 = add i8 %y, %x ; swapped + call void @use8(i8 %t0) + %r = icmp ult i8 %t0, %y + ret i1 %r +} + +define i1 @t4_commutative(i8 %x, i8 %y) { +; CHECK-LABEL: @t4_commutative( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ugt i8 %y, %t0 ; swapped + ret i1 %r +} + +define i1 @t5_commutative(i8 %x) { +; CHECK-LABEL: @t5_commutative( +; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[T0:%.*]] = add i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y]], [[T0]] +; CHECK-NEXT: ret i1 [[R]] +; + %y = call i8 @gen8() + %t0 = add i8 %y, %x ; swapped + call void @use8(i8 %t0) + %r = icmp ugt i8 %y, %t0 ; swapped + ret i1 %r +} + +; Extra-use tests + +define i1 @t6_no_extrause(i8 %x, i8 %y) { +; CHECK-LABEL: @t6_no_extrause( +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + %r = icmp ult i8 %t0, %y + ret i1 %r +} + +; Negative tests + +define i1 @n7_different_y(i8 %x, i8 %y0, i8 %y1) { +; CHECK-LABEL: @n7_different_y( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y0:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y1:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y0 + call void @use8(i8 %t0) + %r = icmp ult i8 %t0, %y1 + ret i1 %r +} + +define i1 @n8_wrong_pred0(i8 %x, i8 %y) { +; CHECK-LABEL: @n8_wrong_pred0( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ule i8 %t0, %y + ret i1 %r +} + +define i1 @n9_wrong_pred1(i8 %x, i8 %y) { +; CHECK-LABEL: @n9_wrong_pred1( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ugt i8 %t0, %y + ret i1 %r +} + +define i1 @n10_wrong_pred2(i8 %x, i8 %y) { +; CHECK-LABEL: @n10_wrong_pred2( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp eq i8 %t0, %y + ret i1 %r +} + +define i1 @n11_wrong_pred3(i8 %x, i8 %y) { +; CHECK-LABEL: @n11_wrong_pred3( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp ne i8 %t0, %y + ret i1 %r +} + +define i1 @n12_wrong_pred4(i8 %x, i8 %y) { +; CHECK-LABEL: @n12_wrong_pred4( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp slt i8 %t0, %y + ret i1 %r +} + +define i1 @n13_wrong_pred5(i8 %x, i8 %y) { +; CHECK-LABEL: @n13_wrong_pred5( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp sle i8 %t0, %y + ret i1 %r +} + +define i1 @n14_wrong_pred6(i8 %x, i8 %y) { +; CHECK-LABEL: @n14_wrong_pred6( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp sgt i8 %t0, %y + ret i1 %r +} + +define i1 @n15_wrong_pred7(i8 %x, i8 %y) { +; CHECK-LABEL: @n15_wrong_pred7( +; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[Y]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = add i8 %x, %y + call void @use8(i8 %t0) + %r = icmp sge i8 %t0, %y + ret i1 %r +} diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll new file mode 100644 index 00000000000..e8fa5c84d14 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll @@ -0,0 +1,189 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; Should fold +; (%y ^ -1) u< %x +; to +; @llvm.uadd.with.overflow(%x, %y) + extractvalue + not +; +; All tests here have extra uses, to ensure that the pattern isn't perturbed. + +declare void @use8(i8) +declare void @use2x8(<2 x i8>) + +define i1 @t0_basic(i8 %x, i8 %y) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ult i8 %t0, %x + ret i1 %r +} + +define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @t1_vec( +; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1> +; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T0]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %t0 = xor <2 x i8> %y, <i8 -1, i8 -1> + call void @use2x8(<2 x i8> %t0) + %r = icmp ult <2 x i8> %t0, %x + ret <2 x i1> %r +} + +; Commutativity + +declare i8 @gen8() + +define i1 @t2_commutative(i8 %y) { +; CHECK-LABEL: @t2_commutative( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[T0]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %x = call i8 @gen8() + %r = icmp ule i8 %x, %t0 ; swapped + ret i1 %r +} + +; Extra-use tests + +define i1 @t3_no_extrause(i8 %x, i8 %y) { +; CHECK-LABEL: @t3_no_extrause( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + %r = icmp ult i8 %t0, %x + ret i1 %r +} + +; Negative tests + +define i1 @n4_wrong_pred0(i8 %x, i8 %y) { +; CHECK-LABEL: @n4_wrong_pred0( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ule i8 %t0, %x + ret i1 %r +} + +define i1 @n5_wrong_pred1(i8 %x, i8 %y) { +; CHECK-LABEL: @n5_wrong_pred1( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ugt i8 %t0, %x + ret i1 %r +} + +define i1 @n6_wrong_pred2(i8 %x, i8 %y) { +; CHECK-LABEL: @n6_wrong_pred2( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp eq i8 %t0, %x + ret i1 %r +} + +define i1 @n7_wrong_pred3(i8 %x, i8 %y) { +; CHECK-LABEL: @n7_wrong_pred3( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp ne i8 %t0, %x + ret i1 %r +} + +define i1 @n8_wrong_pred4(i8 %x, i8 %y) { +; CHECK-LABEL: @n8_wrong_pred4( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp slt i8 %t0, %x + ret i1 %r +} + +define i1 @n9_wrong_pred5(i8 %x, i8 %y) { +; CHECK-LABEL: @n9_wrong_pred5( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp sle i8 %t0, %x + ret i1 %r +} + +define i1 @n10_wrong_pred6(i8 %x, i8 %y) { +; CHECK-LABEL: @n10_wrong_pred6( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp sgt i8 %t0, %x + ret i1 %r +} + +define i1 @n11_wrong_pred7(i8 %x, i8 %y) { +; CHECK-LABEL: @n11_wrong_pred7( +; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: call void @use8(i8 [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %t0 = xor i8 %y, -1 + call void @use8(i8 %t0) + %r = icmp sge i8 %t0, %x + ret i1 %r +} + +define <2 x i1> @n12_vec_nonsplat(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @n12_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -2> +; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) +; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T0]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %t0 = xor <2 x i8> %y, <i8 -1, i8 -2> ; must be -1. + call void @use2x8(<2 x i8> %t0) + %r = icmp ult <2 x i8> %t0, %x + ret <2 x i1> %r +} diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll index cbc7ee24890..d1ab9ae41d6 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt %s -instcombine -S | FileCheck %s -; Fold +; Should fold ; (%x + %y) u< %x ; or ; (%x + %y) u< %y ; to -; @llvm.add.with.overflow(%x, %y) + extractvalue +; @llvm.uadd.with.overflow(%x, %y) + extractvalue define i1 @t0_basic(i8 %x, i8 %y) { ; CHECK-LABEL: @t0_basic( @@ -86,8 +86,8 @@ define i1 @t5_commutative(i8 %x) { declare void @use8(i8) -define i1 @t6_extrause0(i8 %x, i8 %y) { -; CHECK-LABEL: @t6_extrause0( +define i1 @t6_extrause(i8 %x, i8 %y) { +; CHECK-LABEL: @t6_extrause( ; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y]] |