diff options
Diffstat (limited to 'llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll')
-rw-r--r-- | llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll | 995 |
1 files changed, 995 insertions, 0 deletions
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll b/llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll new file mode 100644 index 00000000000..0002f21d0e5 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll @@ -0,0 +1,995 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -O2 -S -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=X86 +; RUN: opt < %s -O2 -S -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +; This tests interaction between the MergeICmp and ExpandMemCmp IR transform +; passes. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" + + +@.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1 + +declare i32 @memcmp(i8*, i8*, i64) +declare i32 @bcmp(i8*, i8*, i64) + +define i32 @length0(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length0( +; ALL-NEXT: ret i32 0 +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + ret i32 %m +} + +define i1 @length0_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length0_eq( +; ALL-NEXT: ret i1 true +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length0_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length0_lt( +; ALL-NEXT: ret i1 false +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i32 @length2(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32 +; ALL-NEXT: [[TMP8:%.*]] = zext i16 [[TMP6]] to i32 +; ALL-NEXT: [[TMP9:%.*]] = sub nsw i32 [[TMP7]], [[TMP8]] +; ALL-NEXT: ret i32 [[TMP9]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + ret i32 %m +} + +define i1 @length2_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP3]], [[TMP4]] +; ALL-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length2_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_lt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; ALL-NEXT: [[C:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length2_gt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_gt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; ALL-NEXT: [[C:%.*]] = icmp ugt i16 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp sgt i32 %m, 0 + ret i1 %c +} + +define i1 @length2_eq_const(i8* %X) nounwind { +; ALL-LABEL: @length2_eq_const( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP3:%.*]] = icmp ne i16 [[TMP2]], 12849 +; ALL-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_eq_nobuiltin_attr( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 2) #5 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length3(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length3( +; ALL-NEXT: loadbb: +; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]], align 2 +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = icmp eq i16 [[TMP2]], [[TMP3]] +; ALL-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; ALL: res_block: +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP2]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; ALL-NEXT: br label [[ENDBLOCK:%.*]] +; ALL: loadbb1: +; ALL-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i64 2 +; ALL-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 +; ALL-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32 +; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 +; ALL-NEXT: [[TMP15:%.*]] = sub nsw i32 [[TMP13]], [[TMP14]] +; ALL-NEXT: br label [[ENDBLOCK]] +; ALL: endblock: +; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; ALL-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind + ret i32 %m +} + +define i1 @length3_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length3_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]] +; ALL-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; ALL-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 +; ALL-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 +; ALL-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 +; ALL-NEXT: [[TMP10:%.*]] = xor i8 [[TMP8]], [[TMP9]] +; ALL-NEXT: [[TMP11:%.*]] = zext i8 [[TMP10]] to i16 +; ALL-NEXT: [[TMP12:%.*]] = or i16 [[TMP5]], [[TMP11]] +; ALL-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP12]], 0 +; ALL-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length4(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 +; ALL-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 +; ALL-NEXT: [[TMP11:%.*]] = sub nsw i32 [[TMP9]], [[TMP10]] +; ALL-NEXT: ret i32 [[TMP11]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + ret i32 %m +} + +define i1 @length4_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] +; ALL-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length4_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4_lt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[TMP7]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length4_gt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4_gt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[TMP7]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp sgt i32 %m, 0 + ret i1 %c +} + +define i1 @length4_eq_const(i8* %X) nounwind { +; ALL-LABEL: @length4_eq_const( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 875770417 +; ALL-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length5(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length5( +; ALL-NEXT: loadbb: +; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; ALL-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; ALL: res_block: +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; ALL-NEXT: br label [[ENDBLOCK:%.*]] +; ALL: loadbb1: +; ALL-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; ALL-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 +; ALL-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32 +; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 +; ALL-NEXT: [[TMP15:%.*]] = sub nsw i32 [[TMP13]], [[TMP14]] +; ALL-NEXT: br label [[ENDBLOCK]] +; ALL: endblock: +; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; ALL-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + ret i32 %m +} + +define i1 @length5_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length5_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; ALL-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; ALL-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 +; ALL-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 +; ALL-NEXT: [[TMP10:%.*]] = xor i8 [[TMP8]], [[TMP9]] +; ALL-NEXT: [[TMP11:%.*]] = zext i8 [[TMP10]] to i32 +; ALL-NEXT: [[TMP12:%.*]] = or i32 [[TMP5]], [[TMP11]] +; ALL-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 +; ALL-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length5_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length5_lt( +; ALL-NEXT: loadbb: +; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; ALL-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; ALL: res_block: +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; ALL-NEXT: br label [[ENDBLOCK:%.*]] +; ALL: loadbb1: +; ALL-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; ALL-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 +; ALL-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32 +; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 +; ALL-NEXT: [[TMP15:%.*]] = sub nsw i32 [[TMP13]], [[TMP14]] +; ALL-NEXT: br label [[ENDBLOCK]] +; ALL: endblock: +; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; ALL-NEXT: [[C:%.*]] = icmp slt i32 [[PHI_RES]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length7_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length7_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 3 +; ALL-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i32* +; ALL-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 +; ALL-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP6]], align 4 +; ALL-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 4 +; ALL-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] +; ALL-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]] +; ALL-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] +; ALL-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 7) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length8(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length8( +; X86-NEXT: loadbb: +; X86-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X86-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X86-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; X86-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; X86-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; X86-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; X86-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; X86-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; X86: res_block: +; X86-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ] +; X86-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ] +; X86-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]] +; X86-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; X86-NEXT: br label [[ENDBLOCK:%.*]] +; X86: loadbb1: +; X86-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X86-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32* +; X86-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; X86-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +; X86-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP10]], align 4 +; X86-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]], align 4 +; X86-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]]) +; X86-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]]) +; X86-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP13]], [[TMP14]] +; X86-NEXT: br i1 [[TMP17]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; X86: endblock: +; X86-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; X86-NEXT: ret i32 [[PHI_RES]] +; +; X64-LABEL: @length8( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; X64-NEXT: [[TMP6:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]]) +; X64-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP5]], [[TMP6]] +; X64-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP5]], [[TMP6]] +; X64-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 +; X64-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 +; X64-NEXT: [[TMP11:%.*]] = sub nsw i32 [[TMP9]], [[TMP10]] +; X64-NEXT: ret i32 [[TMP11]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind + ret i32 %m +} + +define i1 @length8_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length8_eq( +; X86-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X86-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X86-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; X86-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; X86-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X86-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i32* +; X86-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; X86-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +; X86-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP6]], align 4 +; X86-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 4 +; X86-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP3]], [[TMP4]] +; X86-NEXT: [[TMP12:%.*]] = icmp eq i32 [[TMP9]], [[TMP10]] +; X86-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length8_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length8_eq_const(i8* %X) nounwind { +; X86-LABEL: @length8_eq_const( +; X86-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X86-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 +; X86-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X86-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i32* +; X86-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 +; X86-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP2]], 858927408 +; X86-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 926299444 +; X86-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] +; X86-NEXT: ret i1 [[TMP8]] +; +; X64-LABEL: @length8_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP2]], 3978425819141910832 +; X64-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length9_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length9_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(9) [[X:%.*]], i8* dereferenceable(9) [[Y:%.*]], i64 9) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length9_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 +; X64-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 +; X64-NEXT: [[TMP10:%.*]] = xor i8 [[TMP8]], [[TMP9]] +; X64-NEXT: [[TMP11:%.*]] = zext i8 [[TMP10]] to i64 +; X64-NEXT: [[TMP12:%.*]] = or i64 [[TMP5]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = icmp eq i64 [[TMP12]], 0 +; X64-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length10_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length10_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(10) [[X:%.*]], i8* dereferenceable(10) [[Y:%.*]], i64 10) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length10_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* +; X64-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* +; X64-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]], align 2 +; X64-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 2 +; X64-NEXT: [[TMP12:%.*]] = xor i16 [[TMP10]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = zext i16 [[TMP12]] to i64 +; X64-NEXT: [[TMP14:%.*]] = or i64 [[TMP5]], [[TMP13]] +; X64-NEXT: [[TMP15:%.*]] = icmp eq i64 [[TMP14]], 0 +; X64-NEXT: ret i1 [[TMP15]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 10) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length11_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length11_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(11) [[X:%.*]], i8* dereferenceable(11) [[Y:%.*]], i64 11) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length11_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 3 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 11) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length12_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length12_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(12) [[X:%.*]], i8* dereferenceable(12) [[Y:%.*]], i64 12) #3 +; X86-NEXT: [[C:%.*]] = icmp ne i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length12_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X64-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X64-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]], align 4 +; X64-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]], align 4 +; X64-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +; X64-NEXT: [[TMP14:%.*]] = or i64 [[TMP5]], [[TMP13]] +; X64-NEXT: [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0 +; X64-NEXT: ret i1 [[TMP15]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length12(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length12( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(12) [[X:%.*]], i8* dereferenceable(12) [[Y:%.*]], i64 12) #3 +; X86-NEXT: ret i32 [[M]] +; +; X64-LABEL: @length12( +; X64-NEXT: loadbb: +; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]], align 8 +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]]) +; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP2]], [[TMP3]] +; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; X64: res_block: +; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; X64-NEXT: br label [[ENDBLOCK:%.*]] +; X64: loadbb1: +; X64-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32* +; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +; X64-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP10]], align 4 +; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]], align 4 +; X64-NEXT: [[TMP15:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP13]]) +; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]]) +; X64-NEXT: [[TMP17]] = zext i32 [[TMP15]] to i64 +; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64 +; X64-NEXT: [[TMP19:%.*]] = icmp eq i32 [[TMP13]], [[TMP14]] +; X64-NEXT: br i1 [[TMP19]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; X64: endblock: +; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; X64-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind + ret i32 %m +} + +define i1 @length13_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length13_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(13) [[X:%.*]], i8* dereferenceable(13) [[Y:%.*]], i64 13) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length13_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 5 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 5 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 13) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length14_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length14_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(14) [[X:%.*]], i8* dereferenceable(14) [[Y:%.*]], i64 14) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length14_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 6 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 6 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 14) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length15_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length15_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(15) [[X:%.*]], i8* dereferenceable(15) [[Y:%.*]], i64 15) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length15_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 7 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 7 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 15) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329 + +define i32 @length16(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length16( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(16) [[X:%.*]], i8* dereferenceable(16) [[Y:%.*]], i64 16) #3 +; X86-NEXT: ret i32 [[M]] +; +; X64-LABEL: @length16( +; X64-NEXT: loadbb: +; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]], align 8 +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]]) +; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP2]], [[TMP3]] +; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; X64: res_block: +; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; X64-NEXT: br label [[ENDBLOCK:%.*]] +; X64: loadbb1: +; X64-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i64* +; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i64* +; X64-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP10]], align 8 +; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 8 +; X64-NEXT: [[TMP15]] = call i64 @llvm.bswap.i64(i64 [[TMP13]]) +; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) +; X64-NEXT: [[TMP17:%.*]] = icmp eq i64 [[TMP13]], [[TMP14]] +; X64-NEXT: br i1 [[TMP17]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; X64: endblock: +; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; X64-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind + ret i32 %m +} + +define i1 @length16_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: @length16_eq( +; X86-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(16) [[X:%.*]], i8* dereferenceable(16) [[Y:%.*]], i64 16) #3 +; X86-NEXT: [[CMP:%.*]] = icmp ne i32 [[CALL]], 0 +; X86-NEXT: ret i1 [[CMP]] +; +; X64-LABEL: @length16_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = icmp ne i128 [[TMP3]], [[TMP4]] +; X64-NEXT: ret i1 [[TMP5]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length16_eq_const(i8* %X) nounwind { +; X86-LABEL: @length16_eq_const( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(16) [[X:%.*]], i8* dereferenceable(16) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 16) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length16_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = icmp eq i128 [[TMP2]], 70720121592765328381466889075544961328 +; X64-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914 + +define i32 @length24(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length24( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(24) [[X:%.*]], i8* dereferenceable(24) [[Y:%.*]], i64 24) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind + ret i32 %m +} + +define i1 @length24_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: @length24_eq( +; X86-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(24) [[X:%.*]], i8* dereferenceable(24) [[Y:%.*]], i64 24) #3 +; X86-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X86-NEXT: ret i1 [[CMP]] +; +; X64-LABEL: @length24_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i128 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* +; X64-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 16 +; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]], align 8 +; X64-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 8 +; X64-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = zext i64 [[TMP12]] to i128 +; X64-NEXT: [[TMP14:%.*]] = or i128 [[TMP5]], [[TMP13]] +; X64-NEXT: [[TMP15:%.*]] = icmp eq i128 [[TMP14]], 0 +; X64-NEXT: ret i1 [[TMP15]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length24_eq_const(i8* %X) nounwind { +; X86-LABEL: @length24_eq_const( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(24) [[X:%.*]], i8* dereferenceable(24) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 24) #3 +; X86-NEXT: [[C:%.*]] = icmp ne i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length24_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], 70720121592765328381466889075544961328 +; X64-NEXT: [[TMP4:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i64* +; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8 +; X64-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 3689065127958034230 +; X64-NEXT: [[TMP8:%.*]] = zext i64 [[TMP7]] to i128 +; X64-NEXT: [[TMP9:%.*]] = or i128 [[TMP3]], [[TMP8]] +; X64-NEXT: [[TMP10:%.*]] = icmp ne i128 [[TMP9]], 0 +; X64-NEXT: ret i1 [[TMP10]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length32(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length32( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(32) [[X:%.*]], i8* dereferenceable(32) [[Y:%.*]], i64 32) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind + ret i32 %m +} + +; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325 + +define i1 @length32_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: @length32_eq( +; X86-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(32) [[X:%.*]], i8* dereferenceable(32) [[Y:%.*]], i64 32) #3 +; X86-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X86-NEXT: ret i1 [[CMP]] +; +; X64-LABEL: @length32_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i128* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 16 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128* +; X64-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i128, i128* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i128 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i128 [[TMP9]], [[TMP10]] +; X64-NEXT: [[CMP:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[CMP]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length32_eq_const(i8* %X) nounwind { +; X86-LABEL: @length32_eq_const( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(32) [[X:%.*]], i8* dereferenceable(32) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 32) #3 +; X86-NEXT: [[C:%.*]] = icmp ne i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length32_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i128* +; X64-NEXT: [[TMP5:%.*]] = load i128, i128* [[TMP4]], align 8 +; X64-NEXT: [[TMP6:%.*]] = icmp ne i128 [[TMP2]], 70720121592765328381466889075544961328 +; X64-NEXT: [[TMP7:%.*]] = icmp ne i128 [[TMP5]], 65382562593882267225249597816672106294 +; X64-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] +; X64-NEXT: ret i1 [[TMP8]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length64(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length64( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(64) [[X:%.*]], i8* dereferenceable(64) [[Y:%.*]], i64 64) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind + ret i32 %m +} + +define i1 @length64_eq(i8* %x, i8* %y) nounwind { +; ALL-LABEL: @length64_eq( +; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(64) [[X:%.*]], i8* dereferenceable(64) [[Y:%.*]], i64 64) #3 +; ALL-NEXT: [[CMP:%.*]] = icmp ne i32 [[CALL]], 0 +; ALL-NEXT: ret i1 [[CMP]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length64_eq_const(i8* %X) nounwind { +; ALL-LABEL: @length64_eq_const( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(64) [[X:%.*]], i8* dereferenceable(64) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 64) #3 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; This checks that we do not do stupid things with huge sizes. +define i32 @huge_length(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @huge_length( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(9223372036854775807) [[X:%.*]], i8* dereferenceable(9223372036854775807) [[Y:%.*]], i64 9223372036854775807) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind + ret i32 %m +} + +define i1 @huge_length_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @huge_length_eq( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(9223372036854775807) [[X:%.*]], i8* dereferenceable(9223372036854775807) [[Y:%.*]], i64 9223372036854775807) #3 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; This checks non-constant sizes. +define i32 @nonconst_length(i8* %X, i8* %Y, i64 %size) nounwind { +; ALL-LABEL: @nonconst_length( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 [[SIZE:%.*]]) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind + ret i32 %m +} + +define i1 @nonconst_length_eq(i8* %X, i8* %Y, i64 %size) nounwind { +; ALL-LABEL: @nonconst_length_eq( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 [[SIZE:%.*]]) #3 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @bcmp_length2(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @bcmp_length2( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP3]], [[TMP4]] +; ALL-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @bcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + |