diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-06-26 17:56:53 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-06-26 17:56:53 +0000 |
commit | 16ba077a2d5fce2ad8e094a08ad045a31021d8dd (patch) | |
tree | ed641b0a14f78549b627b6bfa2d62dd7357df62c | |
parent | 908b697e78ffa16e49294a8323bc67da6888197d (diff) | |
download | bcm5719-llvm-16ba077a2d5fce2ad8e094a08ad045a31021d8dd.tar.gz bcm5719-llvm-16ba077a2d5fce2ad8e094a08ad045a31021d8dd.zip |
[X86][AVX] Add reduced test case for PR41545
llvm-svn: 364454
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining.ll | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 75fcfdbfcb1..1e74e18718c 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -2858,3 +2858,79 @@ define <8 x i16> @PR39549(<16 x i8> %x) { %d = ashr <8 x i16> %c, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8> ret <8 x i16> %d } + +define <4 x i32> @PR41545(<4 x i32> %a0, <16 x i8> %a1) { +; SSE2-LABEL: PR41545: +; SSE2: # %bb.0: +; SSE2-NEXT: paddd %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: PR41545: +; SSSE3: # %bb.0: +; SSSE3-NEXT: paddd %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: PR41545: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0] +; SSE41-NEXT: pand %xmm1, %xmm2 +; SSE41-NEXT: pxor %xmm3, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7] +; SSE41-NEXT: psrld $24, %xmm1 +; SSE41-NEXT: pslld $24, %xmm1 +; SSE41-NEXT: por %xmm1, %xmm3 +; SSE41-NEXT: por %xmm2, %xmm3 +; SSE41-NEXT: paddd %xmm3, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: PR41545: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsrld $24, %xmm1, %xmm2 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3 +; AVX1-NEXT: vpslld $24, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-SLOW-LABEL: PR41545: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: vpsrld $24, %xmm1, %xmm2 +; AVX2-SLOW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3 +; AVX2-SLOW-NEXT: vpslld $24, %xmm2, %xmm2 +; AVX2-SLOW-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; AVX2-SLOW-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX2-SLOW-NEXT: vpor %xmm1, %xmm3, %xmm1 +; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-LABEL: PR41545: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2 +; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3 +; AVX2-FAST-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; AVX2-FAST-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX2-FAST-NEXT: vpor %xmm3, %xmm1, %xmm1 +; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX2-FAST-NEXT: retq + %1 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12> + %2 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13> + %3 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14> + %4 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15> + %5 = zext <4 x i8> %1 to <4 x i32> + %6 = zext <4 x i8> %2 to <4 x i32> + %7 = zext <4 x i8> %3 to <4 x i32> + %8 = zext <4 x i8> %4 to <4 x i32> + %9 = shl <4 x i32> %6, <i32 8, i32 8, i32 8, i32 8> + %10 = shl <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16> + %11 = shl <4 x i32> %8, <i32 24, i32 24, i32 24, i32 24> + %12 = or <4 x i32> %5, %9 + %13 = or <4 x i32> %12, %10 + %14 = or <4 x i32> %13, %11 + %15 = add <4 x i32> %a0, %14 + ret <4 x i32> %15 +} |