diff options
-rw-r--r-- | llvm/test/CodeGen/X86/x86-interleaved-access.ll | 396 |
1 files changed, 198 insertions, 198 deletions
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll index a131cc1d626..311868554ef 100644 --- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll +++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX3 +; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 define <4 x double> @load_factorf64_4(<16 x double>* %ptr) { ; AVX1-LABEL: load_factorf64_4: @@ -204,22 +204,22 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x doubl ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX3-LABEL: store_factorf64_4: -; AVX3: # BB#0: -; AVX3-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 -; AVX3-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 -; AVX3-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] -; AVX3-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] -; AVX3-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] -; AVX3-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX3-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] -; AVX3-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX3-NEXT: vinsertf64x4 $1, %ymm4, %zmm2, %zmm1 -; AVX3-NEXT: vinsertf64x4 $1, %ymm0, %zmm3, %zmm0 -; AVX3-NEXT: vmovupd %zmm0, 64(%rdi) -; AVX3-NEXT: vmovupd %zmm1, (%rdi) -; AVX3-NEXT: vzeroupper -; AVX3-NEXT: retq +; AVX512-LABEL: store_factorf64_4: +; AVX512: # BB#0: +; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX512-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX512-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX512-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX512-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX512-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] +; AVX512-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm2, %zmm1 +; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm3, %zmm0 +; AVX512-NEXT: vmovupd %zmm0, 64(%rdi) +; AVX512-NEXT: vmovupd %zmm1, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %s1 = shufflevector <4 x double> %v2, <4 x double> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %interleaved.vec = shufflevector <8 x double> %s0, <8 x double> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15> @@ -262,22 +262,22 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, < ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX3-LABEL: store_factori64_4: -; AVX3: # BB#0: -; AVX3-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4 -; AVX3-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5 -; AVX3-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] -; AVX3-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] -; AVX3-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] -; AVX3-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX3-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] -; AVX3-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX3-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm1 -; AVX3-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0 -; AVX3-NEXT: vmovdqu64 %zmm0, 64(%rdi) -; AVX3-NEXT: vmovdqu64 %zmm1, (%rdi) -; AVX3-NEXT: vzeroupper -; AVX3-NEXT: retq +; AVX512-LABEL: store_factori64_4: +; AVX512: # BB#0: +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4 +; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5 +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] +; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm1 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0 +; AVX512-NEXT: vmovdqu64 %zmm0, 64(%rdi) +; AVX512-NEXT: vmovdqu64 %zmm1, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %s1 = shufflevector <4 x i64> %v2, <4 x i64> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %interleaved.vec = shufflevector <8 x i64> %s0, <8 x i64> %s1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15> @@ -345,26 +345,26 @@ define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX3-LABEL: interleaved_store_vf32_i8_stride4: -; AVX3: # BB#0: -; AVX3-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; AVX3-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; AVX3-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] -; AVX3-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31] -; AVX3-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15] -; AVX3-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15] -; AVX3-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11] -; AVX3-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11] -; AVX3-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm2 -; AVX3-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm4 -; AVX3-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] -; AVX3-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3] -; AVX3-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2 -; AVX3-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 -; AVX3-NEXT: vmovdqu8 %zmm0, 64(%rdi) -; AVX3-NEXT: vmovdqu8 %zmm2, (%rdi) -; AVX3-NEXT: vzeroupper -; AVX3-NEXT: retq +; AVX512-LABEL: interleaved_store_vf32_i8_stride4: +; AVX512: # BB#0: +; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] +; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31] +; AVX512-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15] +; AVX512-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15] +; AVX512-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11] +; AVX512-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11] +; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm2 +; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm4 +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3] +; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm2 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512-NEXT: vmovdqu8 %zmm0, 64(%rdi) +; AVX512-NEXT: vmovdqu8 %zmm2, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %v1 = shufflevector <32 x i8> %x1, <32 x i8> %x2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> %v2 = shufflevector <32 x i8> %x3, <32 x i8> %x4, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> %interleaved.vec = shufflevector <64 x i8> %v1, <64 x i8> %v2, <128 x i32> <i32 0, i32 32, i32 64, i32 96, i32 1, i32 33, i32 65, i32 97, i32 2, i32 34, i32 66, i32 98, i32 3, i32 35, i32 67, i32 99, i32 4, i32 36, i32 68, i32 100, i32 5, i32 37, i32 69, i32 101, i32 6, i32 38, i32 70, i32 102, i32 7, i32 39, i32 71, i32 103, i32 8, i32 40, i32 72, i32 104, i32 9, i32 41, i32 73, i32 105, i32 10, i32 42, i32 74, i32 106, i32 11, i32 43, i32 75, i32 107, i32 12, i32 44, i32 76, i32 108, i32 13, i32 45, i32 77, i32 109, i32 14, i32 46, i32 78, i32 110, i32 15, i32 47, i32 79, i32 111, i32 16, i32 48, i32 80, i32 112, i32 17, i32 49, i32 81, i32 113, i32 18, i32 50, i32 82, i32 114, i32 19, i32 51, i32 83, i32 115, i32 20, i32 52, i32 84, i32 116, i32 21, i32 53, i32 85, i32 117, i32 22, i32 54, i32 86, i32 118, i32 23, i32 55, i32 87, i32 119, i32 24, i32 56, i32 88, i32 120, i32 25, i32 57, i32 89, i32 121, i32 26, i32 58, i32 90, i32 122, i32 27, i32 59, i32 91, i32 123, i32 28, i32 60, i32 92, i32 124, i32 29, i32 61, i32 93, i32 125, i32 30, i32 62, i32 94, i32 126, i32 31, i32 63, i32 95, i32 127> @@ -419,34 +419,34 @@ define void @interleaved_store_vf16_i8_stride4(<16 x i8> %x1, <16 x i8> %x2, <16 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX3-LABEL: interleaved_store_vf16_i8_stride4: -; AVX3: # BB#0: -; AVX3-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def> -; AVX3-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> -; AVX3-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX3-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1 -; AVX3-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31] -; AVX3-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3,0,1] -; AVX3-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u] -; AVX3-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255> -; AVX3-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 -; AVX3-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u] -; AVX3-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3,0,1] -; AVX3-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u,u] -; AVX3-NEXT: vmovdqa {{.*#+}} ymm8 = <255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255,u,u> -; AVX3-NEXT: vpblendvb %ymm8, %ymm4, %ymm7, %ymm4 -; AVX3-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7],ymm4[8],ymm2[9],ymm4[10],ymm2[11],ymm4[12],ymm2[13],ymm4[14],ymm2[15] -; AVX3-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23] -; AVX3-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u] -; AVX3-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1 -; AVX3-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u] -; AVX3-NEXT: vpshufb {{.*#+}} ymm3 = ymm6[u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u,u] -; AVX3-NEXT: vpblendvb %ymm8, %ymm0, %ymm3, %ymm0 -; AVX3-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] -; AVX3-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX3-NEXT: vmovdqu8 %zmm0, (%rdi) -; AVX3-NEXT: vzeroupper -; AVX3-NEXT: retq +; AVX512-LABEL: interleaved_store_vf16_i8_stride4: +; AVX512: # BB#0: +; AVX512-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def> +; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def> +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm1 +; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31] +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3,0,1] +; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u] +; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255> +; AVX512-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 +; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm0[8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u] +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3,0,1] +; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,8,u,u,u,9,u,u,u,10,u,u,u,11,u,u,28,u,u,u,29,u,u,u,30,u,u,u,31,u,u,u] +; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = <255,0,u,u,255,0,u,u,255,0,u,u,255,0,u,u,0,255,u,u,0,255,u,u,0,255,u,u,0,255,u,u> +; AVX512-NEXT: vpblendvb %ymm8, %ymm4, %ymm7, %ymm4 +; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7],ymm4[8],ymm2[9],ymm4[10],ymm2[11],ymm4[12],ymm2[13],ymm4[14],ymm2[15] +; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23] +; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u] +; AVX512-NEXT: vpblendvb %ymm5, %ymm1, %ymm3, %ymm1 +; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm6[u,0,u,u,u,1,u,u,u,2,u,u,u,3,u,u,20,u,u,u,21,u,u,u,22,u,u,u,23,u,u,u] +; AVX512-NEXT: vpblendvb %ymm8, %ymm0, %ymm3, %ymm0 +; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] +; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vmovdqu8 %zmm0, (%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %v1 = shufflevector <16 x i8> %x1, <16 x i8> %x2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> %v2 = shufflevector <16 x i8> %x3, <16 x i8> %x4, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> %interleaved.vec = shufflevector <32 x i8> %v1, <32 x i8> %v2, <64 x i32> <i32 0,i32 16,i32 32,i32 48,i32 1,i32 17,i32 33,i32 49,i32 2,i32 18,i32 34,i32 50,i32 3,i32 19,i32 35,i32 51,i32 4,i32 20,i32 36,i32 52,i32 5,i32 21,i32 37,i32 53,i32 6,i32 22,i32 38,i32 54,i32 7,i32 23,i32 39,i32 55,i32 8,i32 24,i32 40,i32 56,i32 9,i32 25,i32 41,i32 57,i32 10,i32 26,i32 42,i32 58,i32 11,i32 27,i32 43,i32 59,i32 12,i32 28,i32 44,i32 60,i32 13,i32 29,i32 45,i32 61,i32 14,i32 30,i32 46,i32 62,i32 15,i32 31,i32 47,i32 63> @@ -500,21 +500,21 @@ define <8 x i8> @interleaved_load_vf8_i8_stride4(<32 x i8>* %ptr) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX3-LABEL: interleaved_load_vf8_i8_stride4: -; AVX3: # BB#0: -; AVX3-NEXT: vmovdqu (%rdi), %ymm0 -; AVX3-NEXT: vpmovdw %zmm0, %ymm1 -; AVX3-NEXT: vpsrlw $8, %ymm0, %ymm2 -; AVX3-NEXT: vpmovdw %zmm2, %ymm2 -; AVX3-NEXT: vpsrld $16, %ymm0, %ymm3 -; AVX3-NEXT: vpmovdw %zmm3, %ymm3 -; AVX3-NEXT: vpsrld $24, %ymm0, %ymm0 -; AVX3-NEXT: vpmovdw %zmm0, %ymm0 -; AVX3-NEXT: vpaddw %xmm2, %xmm1, %xmm1 -; AVX3-NEXT: vpaddw %xmm3, %xmm0, %xmm0 -; AVX3-NEXT: vpmullw %xmm0, %xmm1, %xmm0 -; AVX3-NEXT: vzeroupper -; AVX3-NEXT: retq +; AVX512-LABEL: interleaved_load_vf8_i8_stride4: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm1 +; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm2 +; AVX512-NEXT: vpmovdw %zmm2, %ymm2 +; AVX512-NEXT: vpsrld $16, %ymm0, %ymm3 +; AVX512-NEXT: vpmovdw %zmm3, %ymm3 +; AVX512-NEXT: vpsrld $24, %ymm0, %ymm0 +; AVX512-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpaddw %xmm3, %xmm0, %xmm0 +; AVX512-NEXT: vpmullw %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %wide.vec = load <32 x i8>, <32 x i8>* %ptr, align 16 %v1 = shufflevector <32 x i8> %wide.vec, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> %v2 = shufflevector <32 x i8> %wide.vec, <32 x i8> undef, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29> @@ -629,27 +629,27 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX3-LABEL: interleaved_load_vf16_i8_stride4: -; AVX3: # BB#0: -; AVX3-NEXT: vmovdqa64 (%rdi), %zmm0 -; AVX3-NEXT: vpmovdb %zmm0, %xmm1 -; AVX3-NEXT: vpsrlw $8, %zmm0, %zmm2 -; AVX3-NEXT: vpmovdb %zmm2, %xmm2 -; AVX3-NEXT: vpsrld $16, %zmm0, %zmm3 -; AVX3-NEXT: vpmovdb %zmm3, %xmm3 -; AVX3-NEXT: vpsrld $24, %zmm0, %zmm0 -; AVX3-NEXT: vpmovdb %zmm0, %xmm0 -; AVX3-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 -; AVX3-NEXT: vpsllw $7, %xmm1, %xmm1 -; AVX3-NEXT: vpmovb2m %zmm1, %k0 -; AVX3-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0 -; AVX3-NEXT: vpsllw $7, %xmm0, %xmm0 -; AVX3-NEXT: vpmovb2m %zmm0, %k1 -; AVX3-NEXT: kxnorw %k1, %k0, %k0 -; AVX3-NEXT: vpmovm2b %k0, %zmm0 -; AVX3-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> -; AVX3-NEXT: vzeroupper -; AVX3-NEXT: retq +; AVX512-LABEL: interleaved_load_vf16_i8_stride4: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, %xmm1 +; AVX512-NEXT: vpsrlw $8, %zmm0, %zmm2 +; AVX512-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512-NEXT: vpsrld $16, %zmm0, %zmm3 +; AVX512-NEXT: vpmovdb %zmm3, %xmm3 +; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX512-NEXT: vpmovb2m %zmm1, %k0 +; AVX512-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0 +; AVX512-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX512-NEXT: vpmovb2m %zmm0, %k1 +; AVX512-NEXT: kxnorw %k1, %k0, %k0 +; AVX512-NEXT: vpmovm2b %k0, %zmm0 +; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %wide.vec = load <64 x i8>, <64 x i8>* %ptr %v1 = shufflevector <64 x i8> %wide.vec, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> %v2 = shufflevector <64 x i8> %wide.vec, <64 x i8> undef, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61> @@ -852,87 +852,87 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) { ; AVX2-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: retq ; -; AVX3-LABEL: interleaved_load_vf32_i8_stride4: -; AVX3: # BB#0: -; AVX3-NEXT: vmovdqa64 (%rdi), %zmm0 -; AVX3-NEXT: vmovdqa64 64(%rdi), %zmm1 -; AVX3-NEXT: vpmovdw %zmm0, %ymm2 -; AVX3-NEXT: vpmovdw %zmm1, %ymm3 -; AVX3-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 -; AVX3-NEXT: vpmovwb %zmm2, %ymm8 -; AVX3-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u> -; AVX3-NEXT: vextracti64x4 $1, %zmm1, %ymm14 -; AVX3-NEXT: vextracti128 $1, %ymm14, %xmm9 -; AVX3-NEXT: vpshufb %xmm7, %xmm9, %xmm4 -; AVX3-NEXT: vpshufb %xmm7, %xmm14, %xmm5 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm5 -; AVX3-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX3-NEXT: vextracti128 $1, %ymm1, %xmm10 -; AVX3-NEXT: vpshufb %xmm3, %xmm10, %xmm6 -; AVX3-NEXT: vpshufb %xmm3, %xmm1, %xmm4 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] -; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 -; AVX3-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5],ymm5[6,7] -; AVX3-NEXT: vextracti64x4 $1, %zmm0, %ymm5 -; AVX3-NEXT: vextracti128 $1, %ymm5, %xmm12 -; AVX3-NEXT: vpshufb %xmm7, %xmm12, %xmm4 -; AVX3-NEXT: vpshufb %xmm7, %xmm5, %xmm7 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] -; AVX3-NEXT: vextracti128 $1, %ymm0, %xmm13 -; AVX3-NEXT: vpshufb %xmm3, %xmm13, %xmm6 -; AVX3-NEXT: vpshufb %xmm3, %xmm0, %xmm3 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] -; AVX3-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3] -; AVX3-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7] -; AVX3-NEXT: vpcmpeqb %ymm3, %ymm8, %ymm8 -; AVX3-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u> -; AVX3-NEXT: vpshufb %xmm3, %xmm9, %xmm4 -; AVX3-NEXT: vpshufb %xmm3, %xmm14, %xmm6 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] -; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 -; AVX3-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX3-NEXT: vpshufb %xmm6, %xmm10, %xmm7 -; AVX3-NEXT: vpshufb %xmm6, %xmm1, %xmm2 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1] -; AVX3-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 -; AVX3-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7] -; AVX3-NEXT: vpshufb %xmm3, %xmm12, %xmm4 -; AVX3-NEXT: vpshufb %xmm3, %xmm5, %xmm3 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; AVX3-NEXT: vpshufb %xmm6, %xmm13, %xmm4 -; AVX3-NEXT: vpshufb %xmm6, %xmm0, %xmm6 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] -; AVX3-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3] -; AVX3-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] -; AVX3-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u> -; AVX3-NEXT: vpshufb %xmm3, %xmm9, %xmm4 -; AVX3-NEXT: vpshufb %xmm3, %xmm14, %xmm6 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] -; AVX3-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 -; AVX3-NEXT: vmovdqa {{.*#+}} xmm6 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX3-NEXT: vpshufb %xmm6, %xmm10, %xmm7 -; AVX3-NEXT: vpshufb %xmm6, %xmm1, %xmm1 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] -; AVX3-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 -; AVX3-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7] -; AVX3-NEXT: vpshufb %xmm3, %xmm12, %xmm4 -; AVX3-NEXT: vpshufb %xmm3, %xmm5, %xmm3 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; AVX3-NEXT: vpshufb %xmm6, %xmm13, %xmm4 -; AVX3-NEXT: vpshufb %xmm6, %xmm0, %xmm0 -; AVX3-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] -; AVX3-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3] -; AVX3-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] -; AVX3-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0 -; AVX3-NEXT: vpsllw $7, %ymm8, %ymm1 -; AVX3-NEXT: vpmovb2m %zmm1, %k0 -; AVX3-NEXT: vpsllw $7, %ymm0, %ymm0 -; AVX3-NEXT: vpmovb2m %zmm0, %k1 -; AVX3-NEXT: kxnord %k1, %k0, %k0 -; AVX3-NEXT: vpmovm2b %k0, %zmm0 -; AVX3-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill> -; AVX3-NEXT: retq +; AVX512-LABEL: interleaved_load_vf32_i8_stride4: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1 +; AVX512-NEXT: vpmovdw %zmm0, %ymm2 +; AVX512-NEXT: vpmovdw %zmm1, %ymm3 +; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 +; AVX512-NEXT: vpmovwb %zmm2, %ymm8 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm14 +; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm9 +; AVX512-NEXT: vpshufb %xmm7, %xmm9, %xmm4 +; AVX512-NEXT: vpshufb %xmm7, %xmm14, %xmm5 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm5 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm10 +; AVX512-NEXT: vpshufb %xmm3, %xmm10, %xmm6 +; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm4 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 +; AVX512-NEXT: vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5],ymm5[6,7] +; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm5 +; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm12 +; AVX512-NEXT: vpshufb %xmm7, %xmm12, %xmm4 +; AVX512-NEXT: vpshufb %xmm7, %xmm5, %xmm7 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1] +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm13 +; AVX512-NEXT: vpshufb %xmm3, %xmm13, %xmm6 +; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm3 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] +; AVX512-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3] +; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm11[4,5,6,7] +; AVX512-NEXT: vpcmpeqb %ymm3, %ymm8, %ymm8 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vpshufb %xmm3, %xmm9, %xmm4 +; AVX512-NEXT: vpshufb %xmm3, %xmm14, %xmm6 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] +; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vpshufb %xmm6, %xmm10, %xmm7 +; AVX512-NEXT: vpshufb %xmm6, %xmm1, %xmm2 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1] +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2 +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7] +; AVX512-NEXT: vpshufb %xmm3, %xmm12, %xmm4 +; AVX512-NEXT: vpshufb %xmm3, %xmm5, %xmm3 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; AVX512-NEXT: vpshufb %xmm6, %xmm13, %xmm4 +; AVX512-NEXT: vpshufb %xmm6, %xmm0, %xmm6 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] +; AVX512-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3] +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vpshufb %xmm3, %xmm9, %xmm4 +; AVX512-NEXT: vpshufb %xmm3, %xmm14, %xmm6 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] +; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vpshufb %xmm6, %xmm10, %xmm7 +; AVX512-NEXT: vpshufb %xmm6, %xmm1, %xmm1 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7] +; AVX512-NEXT: vpshufb %xmm3, %xmm12, %xmm4 +; AVX512-NEXT: vpshufb %xmm3, %xmm5, %xmm3 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; AVX512-NEXT: vpshufb %xmm6, %xmm13, %xmm4 +; AVX512-NEXT: vpshufb %xmm6, %xmm0, %xmm0 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3] +; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] +; AVX512-NEXT: vpcmpeqb %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vpsllw $7, %ymm8, %ymm1 +; AVX512-NEXT: vpmovb2m %zmm1, %k0 +; AVX512-NEXT: vpsllw $7, %ymm0, %ymm0 +; AVX512-NEXT: vpmovb2m %zmm0, %k1 +; AVX512-NEXT: kxnord %k1, %k0, %k0 +; AVX512-NEXT: vpmovm2b %k0, %zmm0 +; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill> +; AVX512-NEXT: retq %wide.vec = load <128 x i8>, <128 x i8>* %ptr %v1 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124> |