summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoman Lebedev <lebedev.ri@gmail.com>2019-07-22 22:08:35 +0000
committerRoman Lebedev <lebedev.ri@gmail.com>2019-07-22 22:08:35 +0000
commit6b248fca333831baa0bc3a0de169befd2a832628 (patch)
treea38711c5c1136dc99918bdd668d46c2fd2230cd8
parentd5a52aeab6a58db33fe269b50a5ae0fe1ee149d0 (diff)
downloadbcm5719-llvm-6b248fca333831baa0bc3a0de169befd2a832628.tar.gz
bcm5719-llvm-6b248fca333831baa0bc3a0de169befd2a832628.zip
[NFC][PhaseOrdering] Add tests showcasing the problems of unsigned multiply overflow check
While we can form the @llvm.mul.with.overflow easily, we are still left with that check that was guarding against div-by-0. And in the second case we won't even flatten the CFG. llvm-svn: 366747
-rw-r--r--llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll85
1 files changed, 85 insertions, 0 deletions
diff --git a/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll b/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll
new file mode 100644
index 00000000000..b3e5f9b2e4a
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll
@@ -0,0 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,SIMPLIFYCFG
+; RUN: opt -instcombine -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINEONLY
+; RUN: opt -instcombine -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,BOTH
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+; #include <limits>
+; #include <cstdint>
+;
+; using size_type = std::size_t;
+; bool will_not_overflow(size_type size, size_type nmemb) {
+; return (size != 0 && (nmemb > std::numeric_limits<size_type>::max() / size));
+; }
+
+define i1 @will_not_overflow(i64 %arg, i64 %arg1) {
+; ALL-LABEL: @will_not_overflow(
+; ALL-NEXT: bb:
+; ALL-NEXT: [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
+; ALL-NEXT: br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
+; ALL: bb2:
+; ALL-NEXT: [[T3:%.*]] = udiv i64 -1, [[ARG]]
+; ALL-NEXT: [[T4:%.*]] = icmp ult i64 [[T3]], [[ARG1:%.*]]
+; ALL-NEXT: br label [[BB5]]
+; ALL: bb5:
+; ALL-NEXT: [[T6:%.*]] = phi i1 [ false, [[BB:%.*]] ], [ [[T4]], [[BB2]] ]
+; ALL-NEXT: ret i1 [[T6]]
+;
+bb:
+ %t0 = icmp eq i64 %arg, 0
+ br i1 %t0, label %bb5, label %bb2
+
+bb2: ; preds = %bb
+ %t3 = udiv i64 -1, %arg
+ %t4 = icmp ult i64 %t3, %arg1
+ br label %bb5
+
+bb5: ; preds = %bb2, %bb
+ %t6 = phi i1 [ false, %bb ], [ %t4, %bb2 ]
+ ret i1 %t6
+}
+
+; Same as @will_not_overflow, but inverting return value.
+
+define i1 @will_overflow(i64 %arg, i64 %arg1) {
+; SIMPLIFYCFG-LABEL: @will_overflow(
+; SIMPLIFYCFG-NEXT: bb:
+; SIMPLIFYCFG-NEXT: [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
+; SIMPLIFYCFG-NEXT: br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
+; SIMPLIFYCFG: bb2:
+; SIMPLIFYCFG-NEXT: [[T3:%.*]] = udiv i64 -1, [[ARG]]
+; SIMPLIFYCFG-NEXT: [[T4:%.*]] = icmp ult i64 [[T3]], [[ARG1:%.*]]
+; SIMPLIFYCFG-NEXT: br label [[BB5]]
+; SIMPLIFYCFG: bb5:
+; SIMPLIFYCFG-NEXT: [[T6:%.*]] = phi i1 [ false, [[BB:%.*]] ], [ [[T4]], [[BB2]] ]
+; SIMPLIFYCFG-NEXT: [[T7:%.*]] = xor i1 [[T6]], true
+; SIMPLIFYCFG-NEXT: ret i1 [[T7]]
+;
+; INSTCOMBINE-LABEL: @will_overflow(
+; INSTCOMBINE-NEXT: bb:
+; INSTCOMBINE-NEXT: [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
+; INSTCOMBINE-NEXT: br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
+; INSTCOMBINE: bb2:
+; INSTCOMBINE-NEXT: [[T3:%.*]] = udiv i64 -1, [[ARG]]
+; INSTCOMBINE-NEXT: [[T4:%.*]] = icmp uge i64 [[T3]], [[ARG1:%.*]]
+; INSTCOMBINE-NEXT: br label [[BB5]]
+; INSTCOMBINE: bb5:
+; INSTCOMBINE-NEXT: [[T6:%.*]] = phi i1 [ true, [[BB:%.*]] ], [ [[T4]], [[BB2]] ]
+; INSTCOMBINE-NEXT: ret i1 [[T6]]
+;
+bb:
+ %t0 = icmp eq i64 %arg, 0
+ br i1 %t0, label %bb5, label %bb2
+
+bb2: ; preds = %bb
+ %t3 = udiv i64 -1, %arg
+ %t4 = icmp ult i64 %t3, %arg1
+ br label %bb5
+
+bb5: ; preds = %bb2, %bb
+ %t6 = phi i1 [ false, %bb ], [ %t4, %bb2 ]
+ %t7 = xor i1 %t6, true
+ ret i1 %t7
+}
OpenPOWER on IntegriCloud