diff options
Diffstat (limited to 'llvm/test/Transforms')
13 files changed, 2032 insertions, 3 deletions
diff --git a/llvm/test/Transforms/ExpandMemCmp/AArch64/memcmp.ll b/llvm/test/Transforms/ExpandMemCmp/AArch64/memcmp.ll new file mode 100644 index 00000000000..b6762863ad3 --- /dev/null +++ b/llvm/test/Transforms/ExpandMemCmp/AArch64/memcmp.ll @@ -0,0 +1,124 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -S -expandmemcmp -verify-dom-info -mtriple=aarch64-linux-gnu -data-layout="e-m:e-i64:64-n32:64" | FileCheck %s +; RUN: opt < %s -S -expandmemcmp -verify-dom-info -mtriple=aarch64-linux-gnu -mattr=strict-align -data-layout="E-m:e-i64:64-n32:64" | FileCheck %s --check-prefix=CHECK-STRICTALIGN + +declare i32 @bcmp(i8*, i8*, i64) nounwind readonly +declare i32 @memcmp(i8*, i8*, i64) nounwind readonly + +define i1 @bcmp_b2(i8* %s1, i8* %s2) { +; CHECK-LABEL: @bcmp_b2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[S1:%.*]] to i64* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[S2:%.*]] to i64* +; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[S1]], i8 7 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[S2]], i8 7 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = or i64 [[TMP4]], [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP12]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = zext i1 [[TMP13]] to i32 +; CHECK-NEXT: [[RET:%.*]] = icmp eq i32 [[TMP14]], 0 +; CHECK-NEXT: ret i1 [[RET]] +; +; CHECK-STRICTALIGN-LABEL: @bcmp_b2( +; CHECK-STRICTALIGN-NEXT: entry: +; CHECK-STRICTALIGN-NEXT: [[TMP0:%.*]] = bitcast i8* [[S1:%.*]] to i64* +; CHECK-STRICTALIGN-NEXT: [[TMP1:%.*]] = bitcast i8* [[S2:%.*]] to i64* +; CHECK-STRICTALIGN-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]] +; CHECK-STRICTALIGN-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] +; CHECK-STRICTALIGN-NEXT: [[TMP4:%.*]] = xor i64 [[TMP2]], [[TMP3]] +; CHECK-STRICTALIGN-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[S1]], i8 8 +; CHECK-STRICTALIGN-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i32* +; CHECK-STRICTALIGN-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[S2]], i8 8 +; CHECK-STRICTALIGN-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +; CHECK-STRICTALIGN-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP6]] +; CHECK-STRICTALIGN-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]] +; CHECK-STRICTALIGN-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 +; CHECK-STRICTALIGN-NEXT: [[TMP12:%.*]] = zext i32 [[TMP10]] to i64 +; CHECK-STRICTALIGN-NEXT: [[TMP13:%.*]] = xor i64 [[TMP11]], [[TMP12]] +; CHECK-STRICTALIGN-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[S1]], i8 12 +; CHECK-STRICTALIGN-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16* +; CHECK-STRICTALIGN-NEXT: [[TMP16:%.*]] = getelementptr i8, i8* [[S2]], i8 12 +; CHECK-STRICTALIGN-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i16* +; CHECK-STRICTALIGN-NEXT: [[TMP18:%.*]] = load i16, i16* [[TMP15]] +; CHECK-STRICTALIGN-NEXT: [[TMP19:%.*]] = load i16, i16* [[TMP17]] +; CHECK-STRICTALIGN-NEXT: [[TMP20:%.*]] = zext i16 [[TMP18]] to i64 +; CHECK-STRICTALIGN-NEXT: [[TMP21:%.*]] = zext i16 [[TMP19]] to i64 +; CHECK-STRICTALIGN-NEXT: [[TMP22:%.*]] = xor i64 [[TMP20]], [[TMP21]] +; CHECK-STRICTALIGN-NEXT: [[TMP23:%.*]] = getelementptr i8, i8* [[S1]], i8 14 +; CHECK-STRICTALIGN-NEXT: [[TMP24:%.*]] = getelementptr i8, i8* [[S2]], i8 14 +; CHECK-STRICTALIGN-NEXT: [[TMP25:%.*]] = load i8, i8* [[TMP23]] +; CHECK-STRICTALIGN-NEXT: [[TMP26:%.*]] = load i8, i8* [[TMP24]] +; CHECK-STRICTALIGN-NEXT: [[TMP27:%.*]] = zext i8 [[TMP25]] to i64 +; CHECK-STRICTALIGN-NEXT: [[TMP28:%.*]] = zext i8 [[TMP26]] to i64 +; CHECK-STRICTALIGN-NEXT: [[TMP29:%.*]] = xor i64 [[TMP27]], [[TMP28]] +; CHECK-STRICTALIGN-NEXT: [[TMP30:%.*]] = or i64 [[TMP4]], [[TMP13]] +; CHECK-STRICTALIGN-NEXT: [[TMP31:%.*]] = or i64 [[TMP22]], [[TMP29]] +; CHECK-STRICTALIGN-NEXT: [[TMP32:%.*]] = or i64 [[TMP30]], [[TMP31]] +; CHECK-STRICTALIGN-NEXT: [[TMP33:%.*]] = icmp ne i64 [[TMP32]], 0 +; CHECK-STRICTALIGN-NEXT: [[TMP34:%.*]] = zext i1 [[TMP33]] to i32 +; CHECK-STRICTALIGN-NEXT: [[RET:%.*]] = icmp eq i32 [[TMP34]], 0 +; CHECK-STRICTALIGN-NEXT: ret i1 [[RET]] +; +entry: + %bcmp = call i32 @bcmp(i8* %s1, i8* %s2, i64 15) + %ret = icmp eq i32 %bcmp, 0 + ret i1 %ret +} + +define i1 @bcmp_bs(i8* %s1, i8* %s2) optsize { +; CHECK-LABEL: @bcmp_bs( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[S1:%.*]] to i64* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[S2:%.*]] to i64* +; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[S1]], i8 8 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[S2]], i8 8 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]] +; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[S1]], i8 16 +; CHECK-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i64* +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[S2]], i8 16 +; CHECK-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i64* +; CHECK-NEXT: [[TMP16:%.*]] = load i64, i64* [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = xor i64 [[TMP16]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[S1]], i8 23 +; CHECK-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i64* +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[S2]], i8 23 +; CHECK-NEXT: [[TMP22:%.*]] = bitcast i8* [[TMP21]] to i64* +; CHECK-NEXT: [[TMP23:%.*]] = load i64, i64* [[TMP20]] +; CHECK-NEXT: [[TMP24:%.*]] = load i64, i64* [[TMP22]] +; CHECK-NEXT: [[TMP25:%.*]] = xor i64 [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP26:%.*]] = or i64 [[TMP4]], [[TMP11]] +; CHECK-NEXT: [[TMP27:%.*]] = or i64 [[TMP18]], [[TMP25]] +; CHECK-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = icmp ne i64 [[TMP28]], 0 +; CHECK-NEXT: [[TMP30:%.*]] = zext i1 [[TMP29]] to i32 +; CHECK-NEXT: [[RET:%.*]] = icmp eq i32 [[TMP30]], 0 +; CHECK-NEXT: ret i1 [[RET]] +; +; CHECK-STRICTALIGN-LABEL: @bcmp_bs( +; CHECK-STRICTALIGN-NEXT: entry: +; CHECK-STRICTALIGN-NEXT: [[MEMCMP:%.*]] = call i32 @memcmp(i8* [[S1:%.*]], i8* [[S2:%.*]], i64 31) +; CHECK-STRICTALIGN-NEXT: [[RET:%.*]] = icmp eq i32 [[MEMCMP]], 0 +; CHECK-STRICTALIGN-NEXT: ret i1 [[RET]] +; +entry: + %memcmp = call i32 @memcmp(i8* %s1, i8* %s2, i64 31) + %ret = icmp eq i32 %memcmp, 0 + ret i1 %ret +} + + diff --git a/llvm/test/Transforms/ExpandMemCmp/PowerPC/lit.local.cfg b/llvm/test/Transforms/ExpandMemCmp/PowerPC/lit.local.cfg new file mode 100644 index 00000000000..5d33887ff0a --- /dev/null +++ b/llvm/test/Transforms/ExpandMemCmp/PowerPC/lit.local.cfg @@ -0,0 +1,3 @@ +if not 'PowerPC' in config.root.targets: + config.unsupported = True + diff --git a/llvm/test/Transforms/ExpandMemCmp/PowerPC/memcmpIR.ll b/llvm/test/Transforms/ExpandMemCmp/PowerPC/memcmpIR.ll new file mode 100644 index 00000000000..c09601f969b --- /dev/null +++ b/llvm/test/Transforms/ExpandMemCmp/PowerPC/memcmpIR.ll @@ -0,0 +1,294 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -S -expandmemcmp -verify-dom-info -mtriple=powerpc64le-unknown-gnu-linux -data-layout="e-m:e-i64:64-n32:64" | FileCheck %s +; RUN: opt < %s -S -expandmemcmp -verify-dom-info -mtriple=powerpc64-unknown-gnu-linux -data-layout="E-m:e-i64:64-n32:64" | FileCheck %s --check-prefix=CHECK-BE + +define signext i32 @test1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @test1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-NEXT: br label [[LOADBB:%.*]] +; CHECK: res_block: +; CHECK-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP17:%.*]], [[LOADBB1:%.*]] ] +; CHECK-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP9:%.*]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 -1, i32 1 +; CHECK-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK: loadbb: +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i64* +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP1]] to i64* +; CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP5]] +; CHECK-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) +; CHECK-NEXT: [[TMP9]] = call i64 @llvm.bswap.i64(i64 [[TMP7]]) +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP8]], [[TMP9]] +; CHECK-NEXT: br i1 [[TMP10]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] +; CHECK: loadbb1: +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[TMP0]], i8 8 +; CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i64* +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[TMP1]], i8 8 +; CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i64* +; CHECK-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP12]] +; CHECK-NEXT: [[TMP16:%.*]] = load i64, i64* [[TMP14]] +; CHECK-NEXT: [[TMP17]] = call i64 @llvm.bswap.i64(i64 [[TMP15]]) +; CHECK-NEXT: [[TMP18]] = call i64 @llvm.bswap.i64(i64 [[TMP16]]) +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP17]], [[TMP18]] +; CHECK-NEXT: br i1 [[TMP19]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; CHECK: endblock: +; CHECK-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP3]], [[RES_BLOCK]] ] +; CHECK-NEXT: ret i32 [[PHI_RES]] +; +; CHECK-BE-LABEL: @test1( +; CHECK-BE-NEXT: entry: +; CHECK-BE-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-BE-NEXT: br label [[LOADBB:%.*]] +; CHECK-BE: res_block: +; CHECK-BE-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP6:%.*]], [[LOADBB]] ], [ [[TMP13:%.*]], [[LOADBB1:%.*]] ] +; CHECK-BE-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP14:%.*]], [[LOADBB1]] ] +; CHECK-BE-NEXT: [[TMP2:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; CHECK-BE-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 -1, i32 1 +; CHECK-BE-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK-BE: loadbb: +; CHECK-BE-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i64* +; CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP1]] to i64* +; CHECK-BE-NEXT: [[TMP6]] = load i64, i64* [[TMP4]] +; CHECK-BE-NEXT: [[TMP7]] = load i64, i64* [[TMP5]] +; CHECK-BE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP6]], [[TMP7]] +; CHECK-BE-NEXT: br i1 [[TMP8]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] +; CHECK-BE: loadbb1: +; CHECK-BE-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[TMP0]], i8 8 +; CHECK-BE-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i64* +; CHECK-BE-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[TMP1]], i8 8 +; CHECK-BE-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i64* +; CHECK-BE-NEXT: [[TMP13]] = load i64, i64* [[TMP10]] +; CHECK-BE-NEXT: [[TMP14]] = load i64, i64* [[TMP12]] +; CHECK-BE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[TMP13]], [[TMP14]] +; CHECK-BE-NEXT: br i1 [[TMP15]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; CHECK-BE: endblock: +; CHECK-BE-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP3]], [[RES_BLOCK]] ] +; CHECK-BE-NEXT: ret i32 [[PHI_RES]] +; +entry: + + + + + + + %0 = bitcast i32* %buffer1 to i8* + %1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 16) + ret i32 %call +} + +declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1 + +define signext i32 @test2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @test2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to i32* +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32* +; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP5]]) +; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i32 [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 +; CHECK-NEXT: [[TMP11:%.*]] = zext i1 [[TMP9]] to i32 +; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP10]], [[TMP11]] +; CHECK-NEXT: ret i32 [[TMP12]] +; +; CHECK-BE-LABEL: @test2( +; CHECK-BE-NEXT: entry: +; CHECK-BE-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-BE-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to i32* +; CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32* +; CHECK-BE-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; CHECK-BE-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]] +; CHECK-BE-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP4]], [[TMP5]] +; CHECK-BE-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP4]], [[TMP5]] +; CHECK-BE-NEXT: [[TMP8:%.*]] = zext i1 [[TMP6]] to i32 +; CHECK-BE-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 +; CHECK-BE-NEXT: [[TMP10:%.*]] = sub i32 [[TMP8]], [[TMP9]] +; CHECK-BE-NEXT: ret i32 [[TMP10]] +; + + +entry: + %0 = bitcast i32* %buffer1 to i8* + %1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 4) + ret i32 %call +} + +define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @test3( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-NEXT: br label [[LOADBB:%.*]] +; CHECK: res_block: +; CHECK-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP8:%.*]], [[LOADBB]] ], [ [[TMP19:%.*]], [[LOADBB1:%.*]] ], [ [[TMP30:%.*]], [[LOADBB2:%.*]] ] +; CHECK-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP9:%.*]], [[LOADBB]] ], [ [[TMP20:%.*]], [[LOADBB1]] ], [ [[TMP31:%.*]], [[LOADBB2]] ] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 -1, i32 1 +; CHECK-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK: loadbb: +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i64* +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP1]] to i64* +; CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP5]] +; CHECK-NEXT: [[TMP8]] = call i64 @llvm.bswap.i64(i64 [[TMP6]]) +; CHECK-NEXT: [[TMP9]] = call i64 @llvm.bswap.i64(i64 [[TMP7]]) +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP8]], [[TMP9]] +; CHECK-NEXT: br i1 [[TMP10]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] +; CHECK: loadbb1: +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[TMP0]], i8 8 +; CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[TMP1]], i8 8 +; CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP12]] +; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP14]] +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP15]]) +; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP16]]) +; CHECK-NEXT: [[TMP19]] = zext i32 [[TMP17]] to i64 +; CHECK-NEXT: [[TMP20]] = zext i32 [[TMP18]] to i64 +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[TMP19]], [[TMP20]] +; CHECK-NEXT: br i1 [[TMP21]], label [[LOADBB2]], label [[RES_BLOCK]] +; CHECK: loadbb2: +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, i8* [[TMP0]], i8 12 +; CHECK-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i16* +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, i8* [[TMP1]], i8 12 +; CHECK-NEXT: [[TMP25:%.*]] = bitcast i8* [[TMP24]] to i16* +; CHECK-NEXT: [[TMP26:%.*]] = load i16, i16* [[TMP23]] +; CHECK-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP25]] +; CHECK-NEXT: [[TMP28:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP26]]) +; CHECK-NEXT: [[TMP29:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP27]]) +; CHECK-NEXT: [[TMP30]] = zext i16 [[TMP28]] to i64 +; CHECK-NEXT: [[TMP31]] = zext i16 [[TMP29]] to i64 +; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[TMP30]], [[TMP31]] +; CHECK-NEXT: br i1 [[TMP32]], label [[LOADBB3:%.*]], label [[RES_BLOCK]] +; CHECK: loadbb3: +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr i8, i8* [[TMP0]], i8 14 +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i8, i8* [[TMP1]], i8 14 +; CHECK-NEXT: [[TMP35:%.*]] = load i8, i8* [[TMP33]] +; CHECK-NEXT: [[TMP36:%.*]] = load i8, i8* [[TMP34]] +; CHECK-NEXT: [[TMP37:%.*]] = zext i8 [[TMP35]] to i32 +; CHECK-NEXT: [[TMP38:%.*]] = zext i8 [[TMP36]] to i32 +; CHECK-NEXT: [[TMP39:%.*]] = sub i32 [[TMP37]], [[TMP38]] +; CHECK-NEXT: br label [[ENDBLOCK]] +; CHECK: endblock: +; CHECK-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP39]], [[LOADBB3]] ], [ [[TMP3]], [[RES_BLOCK]] ] +; CHECK-NEXT: ret i32 [[PHI_RES]] +; +; CHECK-BE-LABEL: @test3( +; CHECK-BE-NEXT: entry: +; CHECK-BE-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-BE-NEXT: br label [[LOADBB:%.*]] +; CHECK-BE: res_block: +; CHECK-BE-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP6:%.*]], [[LOADBB]] ], [ [[TMP15:%.*]], [[LOADBB1:%.*]] ], [ [[TMP24:%.*]], [[LOADBB2:%.*]] ] +; CHECK-BE-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP7:%.*]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ], [ [[TMP25:%.*]], [[LOADBB2]] ] +; CHECK-BE-NEXT: [[TMP2:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; CHECK-BE-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 -1, i32 1 +; CHECK-BE-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK-BE: loadbb: +; CHECK-BE-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0]] to i64* +; CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP1]] to i64* +; CHECK-BE-NEXT: [[TMP6]] = load i64, i64* [[TMP4]] +; CHECK-BE-NEXT: [[TMP7]] = load i64, i64* [[TMP5]] +; CHECK-BE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP6]], [[TMP7]] +; CHECK-BE-NEXT: br i1 [[TMP8]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] +; CHECK-BE: loadbb1: +; CHECK-BE-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[TMP0]], i8 8 +; CHECK-BE-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32* +; CHECK-BE-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[TMP1]], i8 8 +; CHECK-BE-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +; CHECK-BE-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP10]] +; CHECK-BE-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]] +; CHECK-BE-NEXT: [[TMP15]] = zext i32 [[TMP13]] to i64 +; CHECK-BE-NEXT: [[TMP16]] = zext i32 [[TMP14]] to i64 +; CHECK-BE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[TMP15]], [[TMP16]] +; CHECK-BE-NEXT: br i1 [[TMP17]], label [[LOADBB2]], label [[RES_BLOCK]] +; CHECK-BE: loadbb2: +; CHECK-BE-NEXT: [[TMP18:%.*]] = getelementptr i8, i8* [[TMP0]], i8 12 +; CHECK-BE-NEXT: [[TMP19:%.*]] = bitcast i8* [[TMP18]] to i16* +; CHECK-BE-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[TMP1]], i8 12 +; CHECK-BE-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16* +; CHECK-BE-NEXT: [[TMP22:%.*]] = load i16, i16* [[TMP19]] +; CHECK-BE-NEXT: [[TMP23:%.*]] = load i16, i16* [[TMP21]] +; CHECK-BE-NEXT: [[TMP24]] = zext i16 [[TMP22]] to i64 +; CHECK-BE-NEXT: [[TMP25]] = zext i16 [[TMP23]] to i64 +; CHECK-BE-NEXT: [[TMP26:%.*]] = icmp eq i64 [[TMP24]], [[TMP25]] +; CHECK-BE-NEXT: br i1 [[TMP26]], label [[LOADBB3:%.*]], label [[RES_BLOCK]] +; CHECK-BE: loadbb3: +; CHECK-BE-NEXT: [[TMP27:%.*]] = getelementptr i8, i8* [[TMP0]], i8 14 +; CHECK-BE-NEXT: [[TMP28:%.*]] = getelementptr i8, i8* [[TMP1]], i8 14 +; CHECK-BE-NEXT: [[TMP29:%.*]] = load i8, i8* [[TMP27]] +; CHECK-BE-NEXT: [[TMP30:%.*]] = load i8, i8* [[TMP28]] +; CHECK-BE-NEXT: [[TMP31:%.*]] = zext i8 [[TMP29]] to i32 +; CHECK-BE-NEXT: [[TMP32:%.*]] = zext i8 [[TMP30]] to i32 +; CHECK-BE-NEXT: [[TMP33:%.*]] = sub i32 [[TMP31]], [[TMP32]] +; CHECK-BE-NEXT: br label [[ENDBLOCK]] +; CHECK-BE: endblock: +; CHECK-BE-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP33]], [[LOADBB3]] ], [ [[TMP3]], [[RES_BLOCK]] ] +; CHECK-BE-NEXT: ret i32 [[PHI_RES]] +; +entry: + %0 = bitcast i32* %buffer1 to i8* + %1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 15) + ret i32 %call +} + +define signext i32 @test4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @test4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-NEXT: [[CALL:%.*]] = tail call signext i32 @memcmp(i8* [[TMP0]], i8* [[TMP1]], i64 65) +; CHECK-NEXT: ret i32 [[CALL]] +; +; CHECK-BE-LABEL: @test4( +; CHECK-BE-NEXT: entry: +; CHECK-BE-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-BE-NEXT: [[CALL:%.*]] = tail call signext i32 @memcmp(i8* [[TMP0]], i8* [[TMP1]], i64 65) +; CHECK-BE-NEXT: ret i32 [[CALL]] +; +entry: + %0 = bitcast i32* %buffer1 to i8* + %1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 65) + ret i32 %call +} + +define signext i32 @test5(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2, i32 signext %SIZE) { +; CHECK-LABEL: @test5( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[SIZE:%.*]] to i64 +; CHECK-NEXT: [[CALL:%.*]] = tail call signext i32 @memcmp(i8* [[TMP0]], i8* [[TMP1]], i64 [[CONV]]) +; CHECK-NEXT: ret i32 [[CALL]] +; +; CHECK-BE-LABEL: @test5( +; CHECK-BE-NEXT: entry: +; CHECK-BE-NEXT: [[TMP0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-BE-NEXT: [[CONV:%.*]] = sext i32 [[SIZE:%.*]] to i64 +; CHECK-BE-NEXT: [[CALL:%.*]] = tail call signext i32 @memcmp(i8* [[TMP0]], i8* [[TMP1]], i64 [[CONV]]) +; CHECK-BE-NEXT: ret i32 [[CALL]] +; +entry: + %0 = bitcast i32* %buffer1 to i8* + %1 = bitcast i32* %buffer2 to i8* + %conv = sext i32 %SIZE to i64 + %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 %conv) + ret i32 %call +} diff --git a/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll b/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll index c1cbcc3272c..3c050223b53 100644 --- a/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll +++ b/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -expandmemcmp -mtriple=i686-unknown-unknown -data-layout=e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X32 -; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=1 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_1LD -; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=2 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_2LD +; RUN: opt -S -domtree -expandmemcmp -verify-dom-info -mtriple=i686-unknown-unknown -data-layout=e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X32 +; RUN: opt -S -domtree -expandmemcmp -verify-dom-info -memcmp-num-loads-per-block=1 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 -mattr=+avx2 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_1LD +; RUN: opt -S -domtree -expandmemcmp -verify-dom-info -memcmp-num-loads-per-block=2 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 -mattr=+avx2 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_2LD declare i32 @memcmp(i8* nocapture, i8* nocapture, i64) @@ -1217,3 +1217,86 @@ define i32 @cmp_eq16(i8* nocapture readonly %x, i8* nocapture readonly %y) { ret i32 %conv } +define i32 @cmp_eq32(i8* nocapture readonly %x, i8* nocapture readonly %y) { +; X32-LABEL: @cmp_eq32( +; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 32) +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; X32-NEXT: ret i32 [[CONV]] +; +; X64-LABEL: @cmp_eq32( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i256* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i256* +; X64-NEXT: [[TMP3:%.*]] = load i256, i256* [[TMP1]] +; X64-NEXT: [[TMP4:%.*]] = load i256, i256* [[TMP2]] +; X64-NEXT: [[TMP5:%.*]] = icmp ne i256 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 +; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0 +; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; X64-NEXT: ret i32 [[CONV]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) + %cmp = icmp eq i32 %call, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @cmp_eq32_prefer128(i8* nocapture readonly %x, i8* nocapture readonly %y) "prefer-vector-width"="128" { +; X32-LABEL: @cmp_eq32_prefer128( +; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 32) +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; X32-NEXT: ret i32 [[CONV]] +; +; X64_1LD-LABEL: @cmp_eq32_prefer128( +; X64_1LD-NEXT: br label [[LOADBB:%.*]] +; X64_1LD: res_block: +; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]] +; X64_1LD: loadbb: +; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64_1LD-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]] +; X64_1LD-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]] +; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i128 [[TMP3]], [[TMP4]] +; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] +; X64_1LD: loadbb1: +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 16 +; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i128* +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 16 +; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i128* +; X64_1LD-NEXT: [[TMP10:%.*]] = load i128, i128* [[TMP7]] +; X64_1LD-NEXT: [[TMP11:%.*]] = load i128, i128* [[TMP9]] +; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i128 [[TMP10]], [[TMP11]] +; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]] +; X64_1LD: endblock: +; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] +; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0 +; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; X64_1LD-NEXT: ret i32 [[CONV]] +; +; X64_2LD-LABEL: @cmp_eq32_prefer128( +; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64_2LD-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]] +; X64_2LD-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]] +; X64_2LD-NEXT: [[TMP5:%.*]] = xor i128 [[TMP3]], [[TMP4]] +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 16 +; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i128* +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 16 +; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i128* +; X64_2LD-NEXT: [[TMP10:%.*]] = load i128, i128* [[TMP7]] +; X64_2LD-NEXT: [[TMP11:%.*]] = load i128, i128* [[TMP9]] +; X64_2LD-NEXT: [[TMP12:%.*]] = xor i128 [[TMP10]], [[TMP11]] +; X64_2LD-NEXT: [[TMP13:%.*]] = or i128 [[TMP5]], [[TMP12]] +; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0 +; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32 +; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0 +; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; X64_2LD-NEXT: ret i32 [[CONV]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) + %cmp = icmp eq i32 %call, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + diff --git a/llvm/test/Transforms/ExpandMemCmp/X86/pr36421.ll b/llvm/test/Transforms/ExpandMemCmp/X86/pr36421.ll new file mode 100644 index 00000000000..c64d37a3757 --- /dev/null +++ b/llvm/test/Transforms/ExpandMemCmp/X86/pr36421.ll @@ -0,0 +1,79 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -domtree -expandmemcmp -verify-dom-info -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-unknown-unknown" + +@.str = private unnamed_addr constant [7 x i8] c"abcdef\00", align 1 +@.str.1 = private unnamed_addr constant [7 x i8] c"ABCDEF\00", align 1 + +define i32 @test(i8* nocapture readonly %string, i32 %len) local_unnamed_addr #0 { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[LEN:%.*]], 6 +; CHECK-NEXT: br i1 [[COND]], label [[SW_BB:%.*]], label [[RETURN:%.*]] +; CHECK: sw.bb: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[STRING:%.*]] to i32* +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 1684234849 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[STRING]], i8 4 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i16* +; CHECK-NEXT: [[TMP5:%.*]] = load i16, i16* [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32 +; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[TMP6]], 26213 +; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[TMP2]], [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[TMP9]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP10]], 0 +; CHECK-NEXT: br i1 [[CMP]], label [[RETURN]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i8* [[STRING]] to i32* +; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = xor i32 [[TMP12]], 1145258561 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[STRING]], i8 4 +; CHECK-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16* +; CHECK-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP15]] +; CHECK-NEXT: [[TMP17:%.*]] = zext i16 [[TMP16]] to i32 +; CHECK-NEXT: [[TMP18:%.*]] = xor i32 [[TMP17]], 17989 +; CHECK-NEXT: [[TMP19:%.*]] = or i32 [[TMP13]], [[TMP18]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 +; CHECK-NEXT: [[TMP21:%.*]] = zext i1 [[TMP20]] to i32 +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[TMP21]], 0 +; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP2]], i32 64, i32 0 +; CHECK-NEXT: br label [[RETURN]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 61, [[SW_BB]] ], [ [[DOT]], [[IF_END]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cond = icmp eq i32 %len, 6 + br i1 %cond, label %sw.bb, label %return + +sw.bb: ; preds = %entry + %call = tail call i32 @memcmp(i8* %string, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 6) + %cmp = icmp eq i32 %call, 0 + br i1 %cmp, label %return, label %if.end + +if.end: ; preds = %sw.bb + %call1 = tail call i32 @memcmp(i8* %string, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.1, i64 0, i64 0), i64 6) + %cmp2 = icmp eq i32 %call1, 0 + %. = select i1 %cmp2, i32 64, i32 0 + br label %return + +return: ; preds = %entry, %if.end8, %if.end4, %if.end, %sw.bb + %retval.0 = phi i32 [ 61, %sw.bb ], [ %., %if.end ], [ 0, %entry ] + ret i32 %retval.0 +} + +; Function Attrs: nounwind readonly +declare i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1 + +attributes #0 = { nounwind readonly ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{!"clang version 7.0.0 (trunk 325350)"} diff --git a/llvm/test/Transforms/PhaseOrdering/PowerPC/lit.local.cfg b/llvm/test/Transforms/PhaseOrdering/PowerPC/lit.local.cfg new file mode 100644 index 00000000000..091332439b1 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/PowerPC/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'PowerPC' in config.root.targets: + config.unsupported = True diff --git a/llvm/test/Transforms/PhaseOrdering/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/llvm/test/Transforms/PhaseOrdering/PowerPC/memCmpUsedInZeroEqualityComparison.ll new file mode 100644 index 00000000000..c32f6b2a16a --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/PowerPC/memCmpUsedInZeroEqualityComparison.ll @@ -0,0 +1,174 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -O2 -S -mcpu=pwr8 < %s | FileCheck %s +target datalayout = "e-m:e-i64:64-n32:64" +target triple = "powerpc64le-unknown-linux-gnu" + +@zeroEqualityTest01.buffer1 = private unnamed_addr constant [3 x i32] [i32 1, i32 2, i32 4], align 4 +@zeroEqualityTest01.buffer2 = private unnamed_addr constant [3 x i32] [i32 1, i32 2, i32 3], align 4 +@zeroEqualityTest02.buffer1 = private unnamed_addr constant [4 x i32] [i32 4, i32 0, i32 0, i32 0], align 4 +@zeroEqualityTest02.buffer2 = private unnamed_addr constant [4 x i32] [i32 3, i32 0, i32 0, i32 0], align 4 +@zeroEqualityTest03.buffer1 = private unnamed_addr constant [4 x i32] [i32 0, i32 0, i32 0, i32 3], align 4 +@zeroEqualityTest03.buffer2 = private unnamed_addr constant [4 x i32] [i32 0, i32 0, i32 0, i32 4], align 4 +@zeroEqualityTest04.buffer1 = private unnamed_addr constant [15 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14], align 4 +@zeroEqualityTest04.buffer2 = private unnamed_addr constant [15 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 13], align 4 + +declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1 + +; Check 4 bytes - requires 1 load for each param. +define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) { +; CHECK-LABEL: @zeroEqualityTest02( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 +; CHECK-NEXT: ret i32 [[TMP6]] +; + %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 4) + %not.cmp = icmp ne i32 %call, 0 + %. = zext i1 %not.cmp to i32 + ret i32 %. +} + +; Check 16 bytes - requires 2 loads for each param (or use vectors?). +define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) { +; CHECK-LABEL: @zeroEqualityTest01( +; CHECK-NEXT: loadbb: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[TMP2]], [[TMP3]] +; CHECK-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; CHECK: res_block: +; CHECK-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK: loadbb1: +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; CHECK-NEXT: br i1 [[TMP11]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; CHECK: endblock: +; CHECK-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ] +; CHECK-NEXT: ret i32 [[PHI_RES]] +; + %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 16) + %not.tobool = icmp ne i32 %call, 0 + %. = zext i1 %not.tobool to i32 + ret i32 %. +} + +; Check 7 bytes - requires 3 loads for each param. +define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) { +; CHECK-LABEL: @zeroEqualityTest03( +; CHECK-NEXT: loadbb: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; CHECK-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; CHECK: res_block: +; CHECK-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK: loadbb1: +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i16* +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i16* +; CHECK-NEXT: [[TMP9:%.*]] = load i16, i16* [[TMP6]], align 2 +; CHECK-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP8]], align 2 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i16 [[TMP9]], [[TMP10]] +; CHECK-NEXT: br i1 [[TMP11]], label [[LOADBB2:%.*]], label [[RES_BLOCK]] +; CHECK: loadbb2: +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[X]], i64 6 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[Y]], i64 6 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, i8* [[TMP12]], align 1 +; CHECK-NEXT: [[TMP15:%.*]] = load i8, i8* [[TMP13]], align 1 +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i8 [[TMP14]], [[TMP15]] +; CHECK-NEXT: br i1 [[TMP16]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; CHECK: endblock: +; CHECK-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB2]] ], [ 1, [[RES_BLOCK]] ] +; CHECK-NEXT: ret i32 [[PHI_RES]] +; + %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 7) + %not.lnot = icmp ne i32 %call, 0 + %cond = zext i1 %not.lnot to i32 + ret i32 %cond +} + +; Validate with > 0 +define signext i32 @zeroEqualityTest04() { +; CHECK-LABEL: @zeroEqualityTest04( +; CHECK-NEXT: loadbb: +; CHECK-NEXT: ret i32 0 +; + %call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer2 to i8*), i64 16) + %not.cmp = icmp slt i32 %call, 1 + %. = zext i1 %not.cmp to i32 + ret i32 %. +} + +; Validate with < 0 +define signext i32 @zeroEqualityTest05() { +; CHECK-LABEL: @zeroEqualityTest05( +; CHECK-NEXT: loadbb: +; CHECK-NEXT: ret i32 0 +; + %call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer2 to i8*), i64 16) + %call.lobit = lshr i32 %call, 31 + %call.lobit.not = xor i32 %call.lobit, 1 + ret i32 %call.lobit.not +} + +; Validate with memcmp()?: +define signext i32 @equalityFoldTwoConstants() { +; CHECK-LABEL: @equalityFoldTwoConstants( +; CHECK-NEXT: loadbb: +; CHECK-NEXT: ret i32 1 +; + %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer2 to i8*), i64 16) + %not.tobool = icmp eq i32 %call, 0 + %cond = zext i1 %not.tobool to i32 + ret i32 %cond +} + +define signext i32 @equalityFoldOneConstant(i8* %X) { +; CHECK-LABEL: @equalityFoldOneConstant( +; CHECK-NEXT: loadbb: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* +; CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 4294967296 +; CHECK-NEXT: br i1 [[TMP2]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; CHECK: res_block: +; CHECK-NEXT: br label [[ENDBLOCK:%.*]] +; CHECK: loadbb1: +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64* +; CHECK-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 12884901890 +; CHECK-NEXT: br i1 [[TMP6]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; CHECK: endblock: +; CHECK-NEXT: [[PHI_RES:%.*]] = phi i32 [ 1, [[LOADBB1]] ], [ 0, [[RES_BLOCK]] ] +; CHECK-NEXT: ret i32 [[PHI_RES]] +; + %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* %X, i64 16) + %not.tobool = icmp eq i32 %call, 0 + %cond = zext i1 %not.tobool to i32 + ret i32 %cond +} + +define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) { +; CHECK-LABEL: @length2_eq_nobuiltin_attr( +; CHECK-NEXT: [[M:%.*]] = tail call signext i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 2) #2 +; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; CHECK-NEXT: ret i1 [[C]] +; + %m = tail call signext i32 @memcmp(i8* %X, i8* %Y, i64 2) nobuiltin + %c = icmp eq i32 %m, 0 + ret i1 %c +} + diff --git a/llvm/test/Transforms/PhaseOrdering/PowerPC/memcmp-mergeexpand.ll b/llvm/test/Transforms/PhaseOrdering/PowerPC/memcmp-mergeexpand.ll new file mode 100644 index 00000000000..cab9ad0fefb --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/PowerPC/memcmp-mergeexpand.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mergeicmps -expandmemcmp -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux < %s | FileCheck %s --check-prefix=PPC64LE + +; This tests interaction between MergeICmp and ExpandMemCmp. + +%"struct.std::pair" = type { i32, i32 } + +define zeroext i1 @opeq1( +; PPC64LE-LABEL: @opeq1( +; PPC64LE-NEXT: "entry+land.rhs.i": +; PPC64LE-NEXT: [[TMP0:%.*]] = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* [[A:%.*]], i64 0, i32 0 +; PPC64LE-NEXT: [[TMP1:%.*]] = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* [[B:%.*]], i64 0, i32 0 +; PPC64LE-NEXT: [[CSTR:%.*]] = bitcast i32* [[TMP0]] to i8* +; PPC64LE-NEXT: [[CSTR1:%.*]] = bitcast i32* [[TMP1]] to i8* +; PPC64LE-NEXT: [[TMP2:%.*]] = bitcast i8* [[CSTR]] to i64* +; PPC64LE-NEXT: [[TMP3:%.*]] = bitcast i8* [[CSTR1]] to i64* +; PPC64LE-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] +; PPC64LE-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]] +; PPC64LE-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4]], [[TMP5]] +; PPC64LE-NEXT: [[TMP7:%.*]] = zext i1 [[TMP6]] to i32 +; PPC64LE-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0 +; PPC64LE-NEXT: br label [[OPEQ1_EXIT:%.*]] +; PPC64LE: opeq1.exit: +; PPC64LE-NEXT: ret i1 [[TMP8]] +; + %"struct.std::pair"* nocapture readonly dereferenceable(8) %a, + %"struct.std::pair"* nocapture readonly dereferenceable(8) %b) local_unnamed_addr #0 { +entry: + %first.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 0 + %0 = load i32, i32* %first.i, align 4 + %first1.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 0 + %1 = load i32, i32* %first1.i, align 4 + %cmp.i = icmp eq i32 %0, %1 + br i1 %cmp.i, label %land.rhs.i, label %opeq1.exit + +land.rhs.i: + %second.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 1 + %2 = load i32, i32* %second.i, align 4 + %second2.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 1 + %3 = load i32, i32* %second2.i, align 4 + %cmp3.i = icmp eq i32 %2, %3 + br label %opeq1.exit + +opeq1.exit: + %4 = phi i1 [ false, %entry ], [ %cmp3.i, %land.rhs.i ] + ret i1 %4 +} + + diff --git a/llvm/test/Transforms/PhaseOrdering/PowerPC/memcmp.ll b/llvm/test/Transforms/PhaseOrdering/PowerPC/memcmp.ll new file mode 100644 index 00000000000..e881e685555 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/PowerPC/memcmp.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -O2 -S -mcpu=pwr8 -mtriple=powerpc64le-unknown-gnu-linux | FileCheck %s -check-prefix=CHECK + +define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @memcmp8( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i64* +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i64* +; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]]) +; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP5]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP5]], [[TMP6]] +; CHECK-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 +; CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 +; CHECK-NEXT: [[TMP11:%.*]] = sub nsw i32 [[TMP9]], [[TMP10]] +; CHECK-NEXT: ret i32 [[TMP11]] +; + %t0 = bitcast i32* %buffer1 to i8* + %t1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 8) + ret i32 %call +} + +define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @memcmp4( +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[BUFFER1:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[BUFFER2:%.*]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]]) +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[TMP5]] to i32 +; CHECK-NEXT: [[TMP8:%.*]] = zext i1 [[TMP6]] to i32 +; CHECK-NEXT: [[TMP9:%.*]] = sub nsw i32 [[TMP7]], [[TMP8]] +; CHECK-NEXT: ret i32 [[TMP9]] +; + %t0 = bitcast i32* %buffer1 to i8* + %t1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 4) + ret i32 %call +} + +define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @memcmp2( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i16* +; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i16* +; CHECK-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; CHECK-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; CHECK-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32 +; CHECK-NEXT: [[TMP8:%.*]] = zext i16 [[TMP6]] to i32 +; CHECK-NEXT: [[TMP9:%.*]] = sub nsw i32 [[TMP7]], [[TMP8]] +; CHECK-NEXT: ret i32 [[TMP9]] +; + %t0 = bitcast i32* %buffer1 to i8* + %t1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 2) + ret i32 %call +} + +define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { +; CHECK-LABEL: @memcmp1( +; CHECK-NEXT: [[T0:%.*]] = bitcast i32* [[BUFFER1:%.*]] to i8* +; CHECK-NEXT: [[T1:%.*]] = bitcast i32* [[BUFFER2:%.*]] to i8* +; CHECK-NEXT: [[LHSC:%.*]] = load i8, i8* [[T0]], align 1 +; CHECK-NEXT: [[LHSV:%.*]] = zext i8 [[LHSC]] to i32 +; CHECK-NEXT: [[RHSC:%.*]] = load i8, i8* [[T1]], align 1 +; CHECK-NEXT: [[RHSV:%.*]] = zext i8 [[RHSC]] to i32 +; CHECK-NEXT: [[CHARDIFF:%.*]] = sub nsw i32 [[LHSV]], [[RHSV]] +; CHECK-NEXT: ret i32 [[CHARDIFF]] +; + %t0 = bitcast i32* %buffer1 to i8* + %t1 = bitcast i32* %buffer2 to i8* + %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 1) #2 + ret i32 %call +} + +declare signext i32 @memcmp(i8*, i8*, i64) diff --git a/llvm/test/Transforms/PhaseOrdering/X86/lit.local.cfg b/llvm/test/Transforms/PhaseOrdering/X86/lit.local.cfg new file mode 100644 index 00000000000..c8625f4d9d2 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/X86/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'X86' in config.root.targets: + config.unsupported = True diff --git a/llvm/test/Transforms/PhaseOrdering/X86/memcmp-mergeexpand.ll b/llvm/test/Transforms/PhaseOrdering/X86/memcmp-mergeexpand.ll new file mode 100644 index 00000000000..68005ab99d7 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/X86/memcmp-mergeexpand.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mergeicmps -expandmemcmp -mtriple=i386-unknown-linux < %s | FileCheck %s --check-prefix=X86 +; RUN: opt -S -mergeicmps -expandmemcmp -mtriple=x86_64-unknown-linux < %s | FileCheck %s --check-prefix=X64 + +; This tests interaction between MergeICmp and ExpandMemCmp. + +%"struct.std::pair" = type { i32, i32 } + +define zeroext i1 @opeq1( +; X86-LABEL: @opeq1( +; X86-NEXT: "entry+land.rhs.i": +; X86-NEXT: [[TMP0:%.*]] = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* [[A:%.*]], i64 0, i32 0 +; X86-NEXT: [[TMP1:%.*]] = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* [[B:%.*]], i64 0, i32 0 +; X86-NEXT: [[CSTR:%.*]] = bitcast i32* [[TMP0]] to i8* +; X86-NEXT: [[CSTR1:%.*]] = bitcast i32* [[TMP1]] to i8* +; X86-NEXT: [[TMP2:%.*]] = bitcast i8* [[CSTR]] to i32* +; X86-NEXT: [[TMP3:%.*]] = bitcast i8* [[CSTR1]] to i32* +; X86-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X86-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]] +; X86-NEXT: [[TMP6:%.*]] = xor i32 [[TMP4]], [[TMP5]] +; X86-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[CSTR]], i8 4 +; X86-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +; X86-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[CSTR1]], i8 4 +; X86-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32* +; X86-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP8]] +; X86-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]] +; X86-NEXT: [[TMP13:%.*]] = xor i32 [[TMP11]], [[TMP12]] +; X86-NEXT: [[TMP14:%.*]] = or i32 [[TMP6]], [[TMP13]] +; X86-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +; X86-NEXT: [[TMP16:%.*]] = zext i1 [[TMP15]] to i32 +; X86-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 0 +; X86-NEXT: br label [[OPEQ1_EXIT:%.*]] +; X86: opeq1.exit: +; X86-NEXT: ret i1 [[TMP17]] +; +; X64-LABEL: @opeq1( +; X64-NEXT: "entry+land.rhs.i": +; X64-NEXT: [[TMP0:%.*]] = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* [[A:%.*]], i64 0, i32 0 +; X64-NEXT: [[TMP1:%.*]] = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* [[B:%.*]], i64 0, i32 0 +; X64-NEXT: [[CSTR:%.*]] = bitcast i32* [[TMP0]] to i8* +; X64-NEXT: [[CSTR1:%.*]] = bitcast i32* [[TMP1]] to i8* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[CSTR]] to i64* +; X64-NEXT: [[TMP3:%.*]] = bitcast i8* [[CSTR1]] to i64* +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] +; X64-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP3]] +; X64-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4]], [[TMP5]] +; X64-NEXT: [[TMP7:%.*]] = zext i1 [[TMP6]] to i32 +; X64-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0 +; X64-NEXT: br label [[OPEQ1_EXIT:%.*]] +; X64: opeq1.exit: +; X64-NEXT: ret i1 [[TMP8]] +; + %"struct.std::pair"* nocapture readonly dereferenceable(8) %a, + %"struct.std::pair"* nocapture readonly dereferenceable(8) %b) local_unnamed_addr #0 { +entry: + %first.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 0 + %0 = load i32, i32* %first.i, align 4 + %first1.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 0 + %1 = load i32, i32* %first1.i, align 4 + %cmp.i = icmp eq i32 %0, %1 + br i1 %cmp.i, label %land.rhs.i, label %opeq1.exit + +land.rhs.i: + %second.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 1 + %2 = load i32, i32* %second.i, align 4 + %second2.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 1 + %3 = load i32, i32* %second2.i, align 4 + %cmp3.i = icmp eq i32 %2, %3 + br label %opeq1.exit + +opeq1.exit: + %4 = phi i1 [ false, %entry ], [ %cmp3.i, %land.rhs.i ] + ret i1 %4 +} + + diff --git a/llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll b/llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll new file mode 100644 index 00000000000..0002f21d0e5 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/X86/memcmp.ll @@ -0,0 +1,995 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -O2 -S -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=X86 +; RUN: opt < %s -O2 -S -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +; This tests interaction between the MergeICmp and ExpandMemCmp IR transform +; passes. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" + + +@.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1 + +declare i32 @memcmp(i8*, i8*, i64) +declare i32 @bcmp(i8*, i8*, i64) + +define i32 @length0(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length0( +; ALL-NEXT: ret i32 0 +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + ret i32 %m +} + +define i1 @length0_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length0_eq( +; ALL-NEXT: ret i1 true +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length0_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length0_lt( +; ALL-NEXT: ret i1 false +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i32 @length2(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = zext i16 [[TMP5]] to i32 +; ALL-NEXT: [[TMP8:%.*]] = zext i16 [[TMP6]] to i32 +; ALL-NEXT: [[TMP9:%.*]] = sub nsw i32 [[TMP7]], [[TMP8]] +; ALL-NEXT: ret i32 [[TMP9]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + ret i32 %m +} + +define i1 @length2_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP3]], [[TMP4]] +; ALL-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length2_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_lt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; ALL-NEXT: [[C:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length2_gt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_gt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP4]]) +; ALL-NEXT: [[C:%.*]] = icmp ugt i16 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp sgt i32 %m, 0 + ret i1 %c +} + +define i1 @length2_eq_const(i8* %X) nounwind { +; ALL-LABEL: @length2_eq_const( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP3:%.*]] = icmp ne i16 [[TMP2]], 12849 +; ALL-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length2_eq_nobuiltin_attr( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 2) #5 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length3(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length3( +; ALL-NEXT: loadbb: +; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = load i16, i16* [[TMP0]], align 2 +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = icmp eq i16 [[TMP2]], [[TMP3]] +; ALL-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; ALL: res_block: +; ALL-NEXT: [[TMP5:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP2]]) +; ALL-NEXT: [[TMP6:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP3]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; ALL-NEXT: br label [[ENDBLOCK:%.*]] +; ALL: loadbb1: +; ALL-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i64 2 +; ALL-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 +; ALL-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32 +; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 +; ALL-NEXT: [[TMP15:%.*]] = sub nsw i32 [[TMP13]], [[TMP14]] +; ALL-NEXT: br label [[ENDBLOCK]] +; ALL: endblock: +; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; ALL-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind + ret i32 %m +} + +define i1 @length3_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length3_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]] +; ALL-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; ALL-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 +; ALL-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 +; ALL-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 +; ALL-NEXT: [[TMP10:%.*]] = xor i8 [[TMP8]], [[TMP9]] +; ALL-NEXT: [[TMP11:%.*]] = zext i8 [[TMP10]] to i16 +; ALL-NEXT: [[TMP12:%.*]] = or i16 [[TMP5]], [[TMP11]] +; ALL-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP12]], 0 +; ALL-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length4(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 +; ALL-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 +; ALL-NEXT: [[TMP11:%.*]] = sub nsw i32 [[TMP9]], [[TMP10]] +; ALL-NEXT: ret i32 [[TMP11]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + ret i32 %m +} + +define i1 @length4_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] +; ALL-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length4_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4_lt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[TMP7]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length4_gt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length4_gt( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP4]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: ret i1 [[TMP7]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp sgt i32 %m, 0 + ret i1 %c +} + +define i1 @length4_eq_const(i8* %X) nounwind { +; ALL-LABEL: @length4_eq_const( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 875770417 +; ALL-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length5(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length5( +; ALL-NEXT: loadbb: +; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; ALL-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; ALL: res_block: +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; ALL-NEXT: br label [[ENDBLOCK:%.*]] +; ALL: loadbb1: +; ALL-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; ALL-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 +; ALL-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32 +; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 +; ALL-NEXT: [[TMP15:%.*]] = sub nsw i32 [[TMP13]], [[TMP14]] +; ALL-NEXT: br label [[ENDBLOCK]] +; ALL: endblock: +; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; ALL-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + ret i32 %m +} + +define i1 @length5_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length5_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; ALL-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; ALL-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 +; ALL-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 +; ALL-NEXT: [[TMP10:%.*]] = xor i8 [[TMP8]], [[TMP9]] +; ALL-NEXT: [[TMP11:%.*]] = zext i8 [[TMP10]] to i32 +; ALL-NEXT: [[TMP12:%.*]] = or i32 [[TMP5]], [[TMP11]] +; ALL-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 +; ALL-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length5_lt(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length5_lt( +; ALL-NEXT: loadbb: +; ALL-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; ALL-NEXT: br i1 [[TMP4]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; ALL: res_block: +; ALL-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; ALL-NEXT: [[TMP6:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; ALL-NEXT: [[TMP7:%.*]] = icmp ult i32 [[TMP5]], [[TMP6]] +; ALL-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; ALL-NEXT: br label [[ENDBLOCK:%.*]] +; ALL: loadbb1: +; ALL-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; ALL-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]], align 1 +; ALL-NEXT: [[TMP13:%.*]] = zext i8 [[TMP11]] to i32 +; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 +; ALL-NEXT: [[TMP15:%.*]] = sub nsw i32 [[TMP13]], [[TMP14]] +; ALL-NEXT: br label [[ENDBLOCK]] +; ALL: endblock: +; ALL-NEXT: [[PHI_RES:%.*]] = phi i32 [ [[TMP15]], [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; ALL-NEXT: [[C:%.*]] = icmp slt i32 [[PHI_RES]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length7_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length7_eq( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; ALL-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; ALL-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; ALL-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 3 +; ALL-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i32* +; ALL-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 +; ALL-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP6]], align 4 +; ALL-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 4 +; ALL-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] +; ALL-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP9]], [[TMP10]] +; ALL-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] +; ALL-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 7) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length8(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length8( +; X86-NEXT: loadbb: +; X86-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X86-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X86-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 +; X86-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; X86-NEXT: [[TMP4:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP2]]) +; X86-NEXT: [[TMP5:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP3]]) +; X86-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP2]], [[TMP3]] +; X86-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; X86: res_block: +; X86-NEXT: [[PHI_SRC1:%.*]] = phi i32 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ] +; X86-NEXT: [[PHI_SRC2:%.*]] = phi i32 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ] +; X86-NEXT: [[TMP7:%.*]] = icmp ult i32 [[PHI_SRC1]], [[PHI_SRC2]] +; X86-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; X86-NEXT: br label [[ENDBLOCK:%.*]] +; X86: loadbb1: +; X86-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X86-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32* +; X86-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; X86-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +; X86-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP10]], align 4 +; X86-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]], align 4 +; X86-NEXT: [[TMP15]] = call i32 @llvm.bswap.i32(i32 [[TMP13]]) +; X86-NEXT: [[TMP16]] = call i32 @llvm.bswap.i32(i32 [[TMP14]]) +; X86-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP13]], [[TMP14]] +; X86-NEXT: br i1 [[TMP17]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; X86: endblock: +; X86-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; X86-NEXT: ret i32 [[PHI_RES]] +; +; X64-LABEL: @length8( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; X64-NEXT: [[TMP6:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]]) +; X64-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP5]], [[TMP6]] +; X64-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP5]], [[TMP6]] +; X64-NEXT: [[TMP9:%.*]] = zext i1 [[TMP7]] to i32 +; X64-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32 +; X64-NEXT: [[TMP11:%.*]] = sub nsw i32 [[TMP9]], [[TMP10]] +; X64-NEXT: ret i32 [[TMP11]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind + ret i32 %m +} + +define i1 @length8_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length8_eq( +; X86-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X86-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X86-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4 +; X86-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 +; X86-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X86-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i32* +; X86-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 +; X86-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +; X86-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP6]], align 4 +; X86-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP8]], align 4 +; X86-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP3]], [[TMP4]] +; X86-NEXT: [[TMP12:%.*]] = icmp eq i32 [[TMP9]], [[TMP10]] +; X86-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length8_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length8_eq_const(i8* %X) nounwind { +; X86-LABEL: @length8_eq_const( +; X86-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X86-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 +; X86-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X86-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i32* +; X86-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 +; X86-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP2]], 858927408 +; X86-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 926299444 +; X86-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] +; X86-NEXT: ret i1 [[TMP8]] +; +; X64-LABEL: @length8_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP2]], 3978425819141910832 +; X64-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length9_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length9_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(9) [[X:%.*]], i8* dereferenceable(9) [[Y:%.*]], i64 9) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length9_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]], align 1 +; X64-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]], align 1 +; X64-NEXT: [[TMP10:%.*]] = xor i8 [[TMP8]], [[TMP9]] +; X64-NEXT: [[TMP11:%.*]] = zext i8 [[TMP10]] to i64 +; X64-NEXT: [[TMP12:%.*]] = or i64 [[TMP5]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = icmp eq i64 [[TMP12]], 0 +; X64-NEXT: ret i1 [[TMP13]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length10_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length10_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(10) [[X:%.*]], i8* dereferenceable(10) [[Y:%.*]], i64 10) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length10_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* +; X64-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* +; X64-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]], align 2 +; X64-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]], align 2 +; X64-NEXT: [[TMP12:%.*]] = xor i16 [[TMP10]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = zext i16 [[TMP12]] to i64 +; X64-NEXT: [[TMP14:%.*]] = or i64 [[TMP5]], [[TMP13]] +; X64-NEXT: [[TMP15:%.*]] = icmp eq i64 [[TMP14]], 0 +; X64-NEXT: ret i1 [[TMP15]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 10) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length11_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length11_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(11) [[X:%.*]], i8* dereferenceable(11) [[Y:%.*]], i64 11) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length11_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 3 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 3 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 11) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length12_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length12_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(12) [[X:%.*]], i8* dereferenceable(12) [[Y:%.*]], i64 12) #3 +; X86-NEXT: [[C:%.*]] = icmp ne i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length12_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X64-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X64-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]], align 4 +; X64-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]], align 4 +; X64-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +; X64-NEXT: [[TMP14:%.*]] = or i64 [[TMP5]], [[TMP13]] +; X64-NEXT: [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0 +; X64-NEXT: ret i1 [[TMP15]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length12(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length12( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(12) [[X:%.*]], i8* dereferenceable(12) [[Y:%.*]], i64 12) #3 +; X86-NEXT: ret i32 [[M]] +; +; X64-LABEL: @length12( +; X64-NEXT: loadbb: +; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]], align 8 +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]]) +; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP2]], [[TMP3]] +; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; X64: res_block: +; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP17:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP18:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; X64-NEXT: br label [[ENDBLOCK:%.*]] +; X64: loadbb1: +; X64-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32* +; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +; X64-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP10]], align 4 +; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP12]], align 4 +; X64-NEXT: [[TMP15:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP13]]) +; X64-NEXT: [[TMP16:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP14]]) +; X64-NEXT: [[TMP17]] = zext i32 [[TMP15]] to i64 +; X64-NEXT: [[TMP18]] = zext i32 [[TMP16]] to i64 +; X64-NEXT: [[TMP19:%.*]] = icmp eq i32 [[TMP13]], [[TMP14]] +; X64-NEXT: br i1 [[TMP19]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; X64: endblock: +; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; X64-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind + ret i32 %m +} + +define i1 @length13_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length13_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(13) [[X:%.*]], i8* dereferenceable(13) [[Y:%.*]], i64 13) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length13_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 5 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 5 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 13) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length14_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length14_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(14) [[X:%.*]], i8* dereferenceable(14) [[Y:%.*]], i64 14) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length14_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 6 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 6 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 14) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length15_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length15_eq( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(15) [[X:%.*]], i8* dereferenceable(15) [[Y:%.*]], i64 15) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length15_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 7 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 7 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i64* +; X64-NEXT: [[TMP9:%.*]] = load i64, i64* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i64 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i64 [[TMP9]], [[TMP10]] +; X64-NEXT: [[C:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 15) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329 + +define i32 @length16(i8* %X, i8* %Y) nounwind { +; X86-LABEL: @length16( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(16) [[X:%.*]], i8* dereferenceable(16) [[Y:%.*]], i64 16) #3 +; X86-NEXT: ret i32 [[M]] +; +; X64-LABEL: @length16( +; X64-NEXT: loadbb: +; X64-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[Y:%.*]] to i64* +; X64-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP0]], align 8 +; X64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP2]]) +; X64-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP3]]) +; X64-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP2]], [[TMP3]] +; X64-NEXT: br i1 [[TMP6]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] +; X64: res_block: +; X64-NEXT: [[PHI_SRC1:%.*]] = phi i64 [ [[TMP4]], [[LOADBB:%.*]] ], [ [[TMP15:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[PHI_SRC2:%.*]] = phi i64 [ [[TMP5]], [[LOADBB]] ], [ [[TMP16:%.*]], [[LOADBB1]] ] +; X64-NEXT: [[TMP7:%.*]] = icmp ult i64 [[PHI_SRC1]], [[PHI_SRC2]] +; X64-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 -1, i32 1 +; X64-NEXT: br label [[ENDBLOCK:%.*]] +; X64: loadbb1: +; X64-NEXT: [[TMP9:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i64* +; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 +; X64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i64* +; X64-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP10]], align 8 +; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP12]], align 8 +; X64-NEXT: [[TMP15]] = call i64 @llvm.bswap.i64(i64 [[TMP13]]) +; X64-NEXT: [[TMP16]] = call i64 @llvm.bswap.i64(i64 [[TMP14]]) +; X64-NEXT: [[TMP17:%.*]] = icmp eq i64 [[TMP13]], [[TMP14]] +; X64-NEXT: br i1 [[TMP17]], label [[ENDBLOCK]], label [[RES_BLOCK]] +; X64: endblock: +; X64-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ [[TMP8]], [[RES_BLOCK]] ] +; X64-NEXT: ret i32 [[PHI_RES]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind + ret i32 %m +} + +define i1 @length16_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: @length16_eq( +; X86-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(16) [[X:%.*]], i8* dereferenceable(16) [[Y:%.*]], i64 16) #3 +; X86-NEXT: [[CMP:%.*]] = icmp ne i32 [[CALL]], 0 +; X86-NEXT: ret i1 [[CMP]] +; +; X64-LABEL: @length16_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = icmp ne i128 [[TMP3]], [[TMP4]] +; X64-NEXT: ret i1 [[TMP5]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length16_eq_const(i8* %X) nounwind { +; X86-LABEL: @length16_eq_const( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(16) [[X:%.*]], i8* dereferenceable(16) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 16) #3 +; X86-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length16_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = icmp eq i128 [[TMP2]], 70720121592765328381466889075544961328 +; X64-NEXT: ret i1 [[TMP3]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914 + +define i32 @length24(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length24( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(24) [[X:%.*]], i8* dereferenceable(24) [[Y:%.*]], i64 24) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind + ret i32 %m +} + +define i1 @length24_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: @length24_eq( +; X86-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(24) [[X:%.*]], i8* dereferenceable(24) [[Y:%.*]], i64 24) #3 +; X86-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X86-NEXT: ret i1 [[CMP]] +; +; X64-LABEL: @length24_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = xor i128 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* +; X64-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 16 +; X64-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* +; X64-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]], align 8 +; X64-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]], align 8 +; X64-NEXT: [[TMP12:%.*]] = xor i64 [[TMP10]], [[TMP11]] +; X64-NEXT: [[TMP13:%.*]] = zext i64 [[TMP12]] to i128 +; X64-NEXT: [[TMP14:%.*]] = or i128 [[TMP5]], [[TMP13]] +; X64-NEXT: [[TMP15:%.*]] = icmp eq i128 [[TMP14]], 0 +; X64-NEXT: ret i1 [[TMP15]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length24_eq_const(i8* %X) nounwind { +; X86-LABEL: @length24_eq_const( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(24) [[X:%.*]], i8* dereferenceable(24) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 24) #3 +; X86-NEXT: [[C:%.*]] = icmp ne i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length24_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], 70720121592765328381466889075544961328 +; X64-NEXT: [[TMP4:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i64* +; X64-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8 +; X64-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 3689065127958034230 +; X64-NEXT: [[TMP8:%.*]] = zext i64 [[TMP7]] to i128 +; X64-NEXT: [[TMP9:%.*]] = or i128 [[TMP3]], [[TMP8]] +; X64-NEXT: [[TMP10:%.*]] = icmp ne i128 [[TMP9]], 0 +; X64-NEXT: ret i1 [[TMP10]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length32(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length32( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(32) [[X:%.*]], i8* dereferenceable(32) [[Y:%.*]], i64 32) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind + ret i32 %m +} + +; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325 + +define i1 @length32_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: @length32_eq( +; X86-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(32) [[X:%.*]], i8* dereferenceable(32) [[Y:%.*]], i64 32) #3 +; X86-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X86-NEXT: ret i1 [[CMP]] +; +; X64-LABEL: @length32_eq( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128* +; X64-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]], align 8 +; X64-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i128* +; X64-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 16 +; X64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128* +; X64-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP6]], align 8 +; X64-NEXT: [[TMP10:%.*]] = load i128, i128* [[TMP8]], align 8 +; X64-NEXT: [[TMP11:%.*]] = icmp eq i128 [[TMP3]], [[TMP4]] +; X64-NEXT: [[TMP12:%.*]] = icmp eq i128 [[TMP9]], [[TMP10]] +; X64-NEXT: [[CMP:%.*]] = and i1 [[TMP12]], [[TMP11]] +; X64-NEXT: ret i1 [[CMP]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length32_eq_const(i8* %X) nounwind { +; X86-LABEL: @length32_eq_const( +; X86-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(32) [[X:%.*]], i8* dereferenceable(32) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 32) #3 +; X86-NEXT: [[C:%.*]] = icmp ne i32 [[M]], 0 +; X86-NEXT: ret i1 [[C]] +; +; X64-LABEL: @length32_eq_const( +; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128* +; X64-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP1]], align 8 +; X64-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[X]], i64 16 +; X64-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i128* +; X64-NEXT: [[TMP5:%.*]] = load i128, i128* [[TMP4]], align 8 +; X64-NEXT: [[TMP6:%.*]] = icmp ne i128 [[TMP2]], 70720121592765328381466889075544961328 +; X64-NEXT: [[TMP7:%.*]] = icmp ne i128 [[TMP5]], 65382562593882267225249597816672106294 +; X64-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] +; X64-NEXT: ret i1 [[TMP8]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length64(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @length64( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(64) [[X:%.*]], i8* dereferenceable(64) [[Y:%.*]], i64 64) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind + ret i32 %m +} + +define i1 @length64_eq(i8* %x, i8* %y) nounwind { +; ALL-LABEL: @length64_eq( +; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* dereferenceable(64) [[X:%.*]], i8* dereferenceable(64) [[Y:%.*]], i64 64) #3 +; ALL-NEXT: [[CMP:%.*]] = icmp ne i32 [[CALL]], 0 +; ALL-NEXT: ret i1 [[CMP]] +; + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length64_eq_const(i8* %X) nounwind { +; ALL-LABEL: @length64_eq_const( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(64) [[X:%.*]], i8* dereferenceable(64) getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i64 0, i64 0), i64 64) #3 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; This checks that we do not do stupid things with huge sizes. +define i32 @huge_length(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @huge_length( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(9223372036854775807) [[X:%.*]], i8* dereferenceable(9223372036854775807) [[Y:%.*]], i64 9223372036854775807) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind + ret i32 %m +} + +define i1 @huge_length_eq(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @huge_length_eq( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* dereferenceable(9223372036854775807) [[X:%.*]], i8* dereferenceable(9223372036854775807) [[Y:%.*]], i64 9223372036854775807) #3 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; This checks non-constant sizes. +define i32 @nonconst_length(i8* %X, i8* %Y, i64 %size) nounwind { +; ALL-LABEL: @nonconst_length( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 [[SIZE:%.*]]) #3 +; ALL-NEXT: ret i32 [[M]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind + ret i32 %m +} + +define i1 @nonconst_length_eq(i8* %X, i8* %Y, i64 %size) nounwind { +; ALL-LABEL: @nonconst_length_eq( +; ALL-NEXT: [[M:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 [[SIZE:%.*]]) #3 +; ALL-NEXT: [[C:%.*]] = icmp eq i32 [[M]], 0 +; ALL-NEXT: ret i1 [[C]] +; + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @bcmp_length2(i8* %X, i8* %Y) nounwind { +; ALL-LABEL: @bcmp_length2( +; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i16* +; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i16* +; ALL-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]], align 2 +; ALL-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]], align 2 +; ALL-NEXT: [[TMP5:%.*]] = icmp eq i16 [[TMP3]], [[TMP4]] +; ALL-NEXT: ret i1 [[TMP5]] +; + %m = tail call i32 @bcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr36421.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr36421.ll new file mode 100644 index 00000000000..55b4f8cb94a --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/X86/pr36421.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -O2 -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +target triple = "x86_64-unknown-unknown" + +@.str = private unnamed_addr constant [7 x i8] c"abcdef\00", align 1 +@.str.1 = private unnamed_addr constant [7 x i8] c"ABCDEF\00", align 1 + +define i32 @test(i8* nocapture readonly %string, i32 %len) local_unnamed_addr #0 { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[LEN:%.*]], 6 +; CHECK-NEXT: br i1 [[COND]], label [[SW_BB:%.*]], label [[RETURN:%.*]] +; CHECK: sw.bb: +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[STRING:%.*]] to i32* +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[STRING]], i64 4 +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i16* +; CHECK-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP3]], align 2 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[TMP1]], 1684234849 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i16 [[TMP4]], 26213 +; CHECK-NEXT: [[CMP:%.*]] = and i1 [[TMP6]], [[TMP5]] +; CHECK-NEXT: br i1 [[CMP]], label [[RETURN]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[TMP1]], 1145258561 +; CHECK-NEXT: [[TMP8:%.*]] = xor i16 [[TMP4]], 17989 +; CHECK-NEXT: [[TMP9:%.*]] = zext i16 [[TMP8]] to i32 +; CHECK-NEXT: [[TMP10:%.*]] = or i32 [[TMP7]], [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP10]], 0 +; CHECK-NEXT: [[DOT:%.*]] = select i1 [[TMP11]], i32 64, i32 0 +; CHECK-NEXT: br label [[RETURN]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 61, [[SW_BB]] ], [ [[DOT]], [[IF_END]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cond = icmp eq i32 %len, 6 + br i1 %cond, label %sw.bb, label %return + +sw.bb: ; preds = %entry + %call = tail call i32 @memcmp(i8* %string, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 6) + %cmp = icmp eq i32 %call, 0 + br i1 %cmp, label %return, label %if.end + +if.end: ; preds = %sw.bb + %call1 = tail call i32 @memcmp(i8* %string, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.1, i64 0, i64 0), i64 6) + %cmp2 = icmp eq i32 %call1, 0 + %. = select i1 %cmp2, i32 64, i32 0 + br label %return + +return: ; preds = %entry, %if.end8, %if.end4, %if.end, %sw.bb + %retval.0 = phi i32 [ 61, %sw.bb ], [ %., %if.end ], [ 0, %entry ] + ret i32 %retval.0 +} + +; Function Attrs: nounwind readonly +declare i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1 + +attributes #0 = { nounwind readonly ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.module.flags = !{!0, !1} +!llvm.ident = !{!2} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"PIC Level", i32 2} +!2 = !{!"clang version 7.0.0 (trunk 325350)"} |