summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2017-08-21 13:55:49 +0000
committerSanjay Patel <spatel@rotateright.com>2017-08-21 13:55:49 +0000
commit7756edfa9368feaf400b49dc58247cf9f2f4d17c (patch)
tree2c033ef1a76030a035b7a67ab33f8bc83edbdda2 /llvm/test/Transforms
parent9495f33e457c1db54a412dbd3d839965acdd1268 (diff)
downloadbcm5719-llvm-7756edfa9368feaf400b49dc58247cf9f2f4d17c.tar.gz
bcm5719-llvm-7756edfa9368feaf400b49dc58247cf9f2f4d17c.zip
[LibCallSimplifier] try harder to fold memcmp with constant arguments
Try to fold: memcmp(X, C, ConstantLength) == 0 --> load X == *C Without this change, we're unnecessarily checking the alignment of the constant data, so we miss the transform in the first 2 tests in the patch. I noted this shortcoming of LibCallSimpifier in one of the recent CGP memcmp expansion patches. This doesn't help the example in: https://bugs.llvm.org/show_bug.cgi?id=34032#c13 ...directly, but it's worth short-circuiting more of these simple cases since we're already trying to do that. The benefit of transforming to load+cmp is that existing IR analysis/transforms may further simplify that code. For example, if the load of the variable is common to multiple memcmp calls, CSE can remove the duplicate instructions. Differential Revision: https://reviews.llvm.org/D36922 llvm-svn: 311333
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll65
1 files changed, 65 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll b/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll
new file mode 100644
index 00000000000..84ad96161f4
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll
@@ -0,0 +1,65 @@
+; RUN: opt < %s -instcombine -S -data-layout=e-n32 | FileCheck %s --check-prefix=ALL --check-prefix=LE
+; RUN: opt < %s -instcombine -S -data-layout=E-n32 | FileCheck %s --check-prefix=ALL --check-prefix=BE
+
+declare i32 @memcmp(i8*, i8*, i64)
+
+; The alignment of this constant does not matter. We constant fold the load.
+
+@charbuf = private unnamed_addr constant [4 x i8] [i8 0, i8 0, i8 0, i8 1], align 1
+
+define i1 @memcmp_4bytes_unaligned_constant_i8(i8* align 4 %x) {
+; LE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
+; LE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
+; LE-NEXT: [[LHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; LE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[LHSV]], 16777216
+; LE-NEXT: ret i1 [[TMP2]]
+;
+; BE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
+; BE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
+; BE-NEXT: [[LHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; BE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[LHSV]], 1
+; BE-NEXT: ret i1 [[TMP2]]
+;
+ %call = tail call i32 @memcmp(i8* %x, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @charbuf, i64 0, i64 0), i64 4)
+ %cmpeq0 = icmp eq i32 %call, 0
+ ret i1 %cmpeq0
+}
+
+; We still don't care about alignment of the constant. We are not limited to constant folding only i8 arrays.
+; It doesn't matter if the constant operand is the first operand to the memcmp.
+
+@intbuf_unaligned = private unnamed_addr constant [4 x i16] [i16 1, i16 2, i16 3, i16 4], align 1
+
+define i1 @memcmp_4bytes_unaligned_constant_i16(i8* align 4 %x) {
+; LE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
+; LE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
+; LE-NEXT: [[RHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; LE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[RHSV]], 131073
+; LE-NEXT: ret i1 [[TMP2]]
+;
+; BE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
+; BE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
+; BE-NEXT: [[RHSV:%.*]] = load i32, i32* [[TMP1]], align 4
+; BE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[RHSV]], 65538
+; BE-NEXT: ret i1 [[TMP2]]
+;
+ %call = tail call i32 @memcmp(i8* bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @intbuf_unaligned, i64 0, i64 0) to i8*), i8* %x, i64 4)
+ %cmpeq0 = icmp eq i32 %call, 0
+ ret i1 %cmpeq0
+}
+
+; TODO: Any memcmp where all arguments are constants should be constant folded. Currently, we only handle i8 array constants.
+
+@intbuf = private unnamed_addr constant [2 x i32] [i32 0, i32 1], align 4
+
+define i1 @memcmp_3bytes_aligned_constant_i32(i8* align 4 %x) {
+; ALL-LABEL: @memcmp_3bytes_aligned_constant_i32(
+; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* bitcast ([2 x i32]* @intbuf to i8*), i64 3)
+; ALL-NEXT: [[CMPEQ0:%.*]] = icmp eq i32 [[CALL]], 0
+; ALL-NEXT: ret i1 [[CMPEQ0]]
+;
+ %call = tail call i32 @memcmp(i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 0) to i8*), i64 3)
+ %cmpeq0 = icmp eq i32 %call, 0
+ ret i1 %cmpeq0
+}
+
OpenPOWER on IntegriCloud