; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BWVL ; PR31551 ; Pairs of shufflevector:trunc functions with functional equivalence. ; Ideally, the shuffles should be lowered to code with the same quality as the truncates. define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind { ; AVX512F-LABEL: shuffle_v64i8_to_v32i8: ; AVX512F: # BB#0: ; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vpmovsxwd 32(%rdi), %zmm1 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v64i8_to_v32i8: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0 ; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512VL-NEXT: vpmovsxwd 32(%rdi), %zmm1 ; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8: ; AVX512BWVL: # BB#0: ; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 ; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <32 x i32> store <32 x i8> %strided.vec, <32 x i8>* %S ret void } define void @trunc_v32i16_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind { ; AVX512F-LABEL: trunc_v32i16_to_v32i8: ; AVX512F: # BB#0: ; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vpmovsxwd 32(%rdi), %zmm1 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: trunc_v32i16_to_v32i8: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0 ; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512VL-NEXT: vpmovsxwd 32(%rdi), %zmm1 ; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1 ; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: trunc_v32i16_to_v32i8: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: trunc_v32i16_to_v32i8: ; AVX512BWVL: # BB#0: ; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 ; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %bc = bitcast <64 x i8> %vec to <32 x i16> %strided.vec = trunc <32 x i16> %bc to <32 x i8> store <32 x i8> %strided.vec, <32 x i8>* %S ret void } define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind { ; AVX512-LABEL: shuffle_v32i16_to_v16i16: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 ; AVX512-NEXT: vpmovdw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> store <16 x i16> %strided.vec, <16 x i16>* %S ret void } define void @trunc_v16i32_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind { ; AVX512-LABEL: trunc_v16i32_to_v16i16: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 ; AVX512-NEXT: vpmovdw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %bc = bitcast <32 x i16> %vec to <16 x i32> %strided.vec = trunc <16 x i32> %bc to <16 x i16> store <16 x i16> %strided.vec, <16 x i16>* %S ret void } define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind { ; AVX512-LABEL: shuffle_v16i32_to_v8i32: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpmovqd %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <16 x i32>, <16 x i32>* %L %strided.vec = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> store <8 x i32> %strided.vec, <8 x i32>* %S ret void } define void @trunc_v8i64_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind { ; AVX512-LABEL: trunc_v8i64_to_v8i32: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpmovqd %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <16 x i32>, <16 x i32>* %L %bc = bitcast <16 x i32> %vec to <8 x i64> %strided.vec = trunc <8 x i64> %bc to <8 x i32> store <8 x i32> %strided.vec, <8 x i32>* %S ret void } define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v16i8: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> store <16 x i8> %strided.vec, <16 x i8>* %S ret void } define void @trunc_v16i32_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind { ; AVX512-LABEL: trunc_v16i32_to_v16i8: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %bc = bitcast <64 x i8> %vec to <16 x i32> %strided.vec = trunc <16 x i32> %bc to <16 x i8> store <16 x i8> %strided.vec, <16 x i8>* %S ret void } define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512-LABEL: shuffle_v32i16_to_v8i16: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpmovqw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> store <8 x i16> %strided.vec, <8 x i16>* %S ret void } define void @trunc_v8i64_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { ; AVX512-LABEL: trunc_v8i64_to_v8i16: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpmovqw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %bc = bitcast <32 x i16> %vec to <8 x i64> %strided.vec = trunc <8 x i64> %bc to <8 x i16> store <8 x i16> %strided.vec, <8 x i16>* %S ret void } define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, <8 x i8>* %S ret void } define void @trunc_v8i64_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind { ; AVX512-LABEL: trunc_v8i64_to_v8i8: ; AVX512: # BB#0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %bc = bitcast <64 x i8> %vec to <8 x i64> %strided.vec = trunc <8 x i64> %bc to <8 x i8> store <8 x i8> %strided.vec, <8 x i8>* %S ret void }