diff options
-rw-r--r-- | llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll | 1 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll | 81 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll | 243 |
3 files changed, 325 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll index bc0660b44fb..9c5344562a9 100644 --- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll +++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll @@ -8,6 +8,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL ; PR31551 ; Pairs of shufflevector:trunc functions with functional equivalence. diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll index 0205c064d05..13d5d41a470 100644 --- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll +++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-256.ll @@ -6,6 +6,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMIVL ; PR31551 ; Pairs of shufflevector:trunc functions with functional equivalence. @@ -106,6 +107,13 @@ define void @trunc_v16i16_to_v16i8(<32 x i8>* %L, <16 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v16i16_to_v16i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vpmovwb %ymm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L %bc = bitcast <32 x i8> %vec to <16 x i16> %strided.vec = trunc <16 x i16> %bc to <16 x i8> @@ -206,6 +214,13 @@ define void @trunc_v8i32_to_v8i16(<16 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovdw %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i16: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vpmovdw %ymm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <16 x i16>, <16 x i16>* %L %bc = bitcast <16 x i16> %vec to <8 x i32> %strided.vec = trunc <8 x i32> %bc to <8 x i16> @@ -292,6 +307,13 @@ define void @trunc_v4i64_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovqd %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i32: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vpmovqd %ymm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <8 x i32>, <8 x i32>* %L %bc = bitcast <8 x i32> %vec to <4 x i64> %strided.vec = trunc <4 x i64> %bc to <4 x i32> @@ -371,6 +393,18 @@ define void @shuffle_v32i8_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v32i8_to_v8i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX512VBMIVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; AVX512VBMIVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; AVX512VBMIVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512VBMIVL-NEXT: vpmovwb %xmm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L %strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> store <8 x i8> %strided.vec, <8 x i8>* %S @@ -431,6 +465,13 @@ define void @trunc_v8i32_to_v8i8(<32 x i8>* %L, <8 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vpmovdb %ymm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L %bc = bitcast <32 x i8> %vec to <8 x i32> %strided.vec = trunc <8 x i32> %bc to <8 x i8> @@ -519,6 +560,15 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v16i16_to_v4i16: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovaps (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512VBMIVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; AVX512VBMIVL-NEXT: vpmovdw %xmm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <16 x i16>, <16 x i16>* %L %strided.vec = shufflevector <16 x i16> %vec, <16 x i16> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12> store <4 x i16> %strided.vec, <4 x i16>* %S @@ -585,6 +635,13 @@ define void @trunc_v4i64_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovqw %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vpmovqw %ymm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <16 x i16>, <16 x i16>* %L %bc = bitcast <16 x i16> %vec to <4 x i64> %strided.vec = trunc <4 x i64> %bc to <4 x i16> @@ -658,6 +715,15 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v32i8_to_v4i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovaps (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512VBMIVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; AVX512VBMIVL-NEXT: vpmovdb %xmm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L %strided.vec = shufflevector <32 x i8> %vec, <32 x i8> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24> store <4 x i8> %strided.vec, <4 x i8>* %S @@ -724,6 +790,13 @@ define void @trunc_v4i64_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovqb %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VBMIVL-NEXT: vpmovqb %ymm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L %bc = bitcast <32 x i8> %vec to <4 x i64> %strided.vec = trunc <4 x i64> %bc to <4 x i8> @@ -795,6 +868,14 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind { ; AVX512BWVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMIVL-LABEL: negative: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = [32,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,48,18,20,22,24,26,28,30,16,18,20,22,24,26,28,30] +; AVX512VBMIVL-NEXT: vpermt2b %ymm1, %ymm2, %ymm0 +; AVX512VBMIVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %strided.vec = shufflevector <32 x i8> %v, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> %w0 = extractelement <32 x i8> %w, i32 0 %merged = insertelement <16 x i8> %strided.vec, i8 %w0, i32 0 diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll index bfffb28d2b8..066ea7032fa 100644 --- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll +++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll @@ -3,6 +3,8 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VBMIVL ; PR31551 ; Pairs of shufflevector:trunc functions with functional equivalence. @@ -56,6 +58,28 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vmovdqa %ymm2, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: shuffle_v64i8_to_v32i8: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30] +; AVX512VBMI-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u] +; AVX512VBMI-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX512VBMI-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VBMI-NEXT: vmovdqa %ymm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v64i8_to_v32i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62] +; AVX512VBMIVL-NEXT: vpermi2b %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa %ymm2, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62> store <32 x i8> %strided.vec, <32 x i8>* %S @@ -98,6 +122,20 @@ define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: trunc_v32i16_to_v32i8: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vpmovwb %zmm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v32i16_to_v32i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vpmovwb %zmm0, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %bc = bitcast <64 x i8> %vec to <32 x i16> %strided.vec = trunc <32 x i16> %bc to <32 x i8> @@ -153,6 +191,29 @@ define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind { ; AVX512BWVL-NEXT: vmovdqa %ymm2, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: shuffle_v32i16_to_v16i16: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,4,5,6,7,8,9,12,13,12,13,14,15,16,17,20,21,20,21,22,23,24,25,28,29,28,29,30,31] +; AVX512VBMI-NEXT: vpshufb %ymm2, %ymm1, %ymm1 +; AVX512VBMI-NEXT: vpshufb %ymm2, %ymm0, %ymm0 +; AVX512VBMI-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6] +; AVX512VBMI-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VBMI-NEXT: vmovaps %ymm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v32i16_to_v16i16: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30] +; AVX512VBMIVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa %ymm2, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> store <16 x i16> %strided.vec, <16 x i16>* %S @@ -213,6 +274,26 @@ define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind { ; AVX512BWVL-NEXT: vmovdqa %ymm2, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: shuffle_v16i32_to_v8i32: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovaps (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6] +; AVX512VBMI-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VBMI-NEXT: vmovaps %ymm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v16i32_to_v8i32: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14] +; AVX512VBMIVL-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa %ymm2, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <16 x i32>, <16 x i32>* %L %strided.vec = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> store <8 x i32> %strided.vec, <8 x i32>* %S @@ -309,6 +390,35 @@ define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: shuffle_v64i8_to_v16i8: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX512VBMI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512VBMI-NEXT: vmovdqa %xmm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v64i8_to_v16i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMIVL-NEXT: vpermi2b %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa %xmm2, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60> store <16 x i8> %strided.vec, <16 x i8>* %S @@ -398,6 +508,35 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512BWVL-NEXT: vmovdqa %xmm2, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: shuffle_v32i16_to_v8i16: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,0,1,8,9,8,9,10,11,12,13,14,15] +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15] +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX512VBMI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512VBMI-NEXT: vmovdqa %xmm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v32i16_to_v8i16: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,u,u,u,u,u,u,u,u> +; AVX512VBMIVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa %xmm2, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28> store <8 x i16> %strided.vec, <8 x i16>* %S @@ -485,6 +624,35 @@ define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind { ; AVX512BWVL-NEXT: vpmovwb %xmm2, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: shuffle_v64i8_to_v8i8: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX512VBMI-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX512VBMI-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; AVX512VBMI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3] +; AVX512VBMI-NEXT: vmovq %xmm0, (%rsi) +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: shuffle_v64i8_to_v8i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,4,8,12,16,20,24,28,u,u,u,u,u,u,u,u> +; AVX512VBMIVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vpmovwb %xmm2, (%rsi) +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56> store <8 x i8> %strided.vec, <8 x i8>* %S @@ -571,6 +739,32 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_ ; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX512VBMI-NEXT: vpshufb %xmm3, %xmm0, %xmm0 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX512VBMI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_61: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm1 = <1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMIVL-NEXT: vpermi2b %ymm2, %ymm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa %xmm1, %xmm0 +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %res = shufflevector <64 x i8> %x, <64 x i8> %x, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61> ret <16 x i8> %res } @@ -637,6 +831,31 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_ ; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512VBMI-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMI-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; AVX512VBMI-NEXT: vpshufb %xmm2, %xmm0, %xmm2 +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512VBMI-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u] +; AVX512VBMI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u] +; AVX512VBMI-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX512VBMI-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm1 = <1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,62,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMIVL-NEXT: vpermi2b %ymm2, %ymm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa %xmm1, %xmm0 +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %res = shufflevector <64 x i8> %x, <64 x i8> %x, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 62> ret <16 x i8> %res } @@ -693,6 +912,30 @@ define <4 x double> @PR34175(<32 x i16>* %p) { ; AVX512BWVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX512BWVL-NEXT: vcvtdq2pd %xmm0, %ymm0 ; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: PR34175: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512VBMI-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512VBMI-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; AVX512VBMI-NEXT: vpbroadcastd %xmm0, %xmm0 +; AVX512VBMI-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] +; AVX512VBMI-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX512VBMI-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: PR34175: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512VBMIVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVX512VBMIVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,8,16,24,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512VBMIVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; AVX512VBMIVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512VBMIVL-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX512VBMIVL-NEXT: retq %v = load <32 x i16>, <32 x i16>* %p, align 2 %shuf = shufflevector <32 x i16> %v, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24> %tofp = uitofp <4 x i16> %shuf to <4 x double> |