summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll150
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll11
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll13
-rw-r--r--llvm/test/CodeGen/X86/x86-interleaved-access.ll62
4 files changed, 120 insertions, 116 deletions
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index bdc7e1cbd53..430097e6dc9 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -311,83 +311,93 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_
;
; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512BW-NEXT: vpextrb $1, %xmm0, %ecx
-; AVX512BW-NEXT: vmovd %ecx, %xmm1
-; AVX512BW-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $13, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $1, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT: vpextrb $14, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm2
+; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
; AVX512BWVL: # BB#0:
-; AVX512BWVL-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512BWVL-NEXT: vpextrb $1, %xmm0, %ecx
-; AVX512BWVL-NEXT: vmovd %ecx, %xmm1
-; AVX512BWVL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $13, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BWVL-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512BWVL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BWVL-NEXT: vpextrb $1, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpextrb $14, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm2
+; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u]
+; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u]
+; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%res = shufflevector <64 x i8> %x, <64 x i8> %x, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 62>
ret <16 x i8> %res
}
+
+define <4 x double> @PR34175(<32 x i16>* %p) {
+; AVX512F-LABEL: PR34175:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vmovdqu (%rdi), %ymm0
+; AVX512F-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512F-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512F-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: PR34175:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vmovdqu (%rdi), %ymm0
+; AVX512VL-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512VL-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512VL-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: PR34175:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BW-NEXT: vpbroadcastd %xmm0, %xmm0
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX512BW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512BW-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: PR34175:
+; AVX512BWVL: # BB#0:
+; AVX512BWVL-NEXT: vmovdqu64 (%rdi), %zmm0
+; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,8,16,24,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2
+; AVX512BWVL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX512BWVL-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX512BWVL-NEXT: retq
+ %v = load <32 x i16>, <32 x i16>* %p, align 2
+ %shuf = shufflevector <32 x i16> %v, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
+ %tofp = uitofp <4 x i16> %shuf to <4 x double>
+ ret <4 x double> %tofp
+}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
index c86a1192f41..665c5110e7c 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -286,13 +286,10 @@ define <8 x i32> @test_v16i32_1_3_5_7_9_11_13_15(<16 x i32> %v) {
define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
; ALL-LABEL: test_v16i32_0_1_2_12:
; ALL: # BB#0:
-; ALL-NEXT: vpextrd $1, %xmm0, %eax
-; ALL-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
-; ALL-NEXT: vpextrd $2, %xmm0, %eax
-; ALL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; ALL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; ALL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; ALL-NEXT: vpbroadcastd %xmm1, %xmm1
+; ALL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; ALL-NEXT: vzeroupper
; ALL-NEXT: retq
%res = shufflevector <16 x i32> %v, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 12>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
index d20d8669432..f8268cb6bc1 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -2726,20 +2726,17 @@ define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) {
define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
; AVX512F-LABEL: test_v8i64_2_5:
; AVX512F: # BB#0:
-; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
-; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: test_v8i64_2_5:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1
-; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax
-; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX512F-32-NEXT: vpextrd $3, %xmm0, %eax
-; AVX512F-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512F-32-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-32-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512F-32-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX512F-32-NEXT: vzeroupper
; AVX512F-32-NEXT: retl
%res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> <i32 2, i32 5>
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 6a3fe0c93ab..b2760f9ad82 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -567,37 +567,37 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
-; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm6
-; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
-; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm1
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
-; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm5
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm6
+; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm7
+; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
+; AVX2-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm5
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm7
+; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm5
+; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-NEXT: vpcmpeqb %xmm0, %xmm4, %xmm0
+; AVX2-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX2-NEXT: vpand %xmm1, %xmm2, %xmm2
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -836,15 +836,15 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX512-NEXT: vpmovdw %zmm1, %ymm3
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
; AVX512-NEXT: vpmovwb %zmm2, %ymm8
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm14
; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm9
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
; AVX512-NEXT: vpshufb %xmm7, %xmm9, %xmm4
; AVX512-NEXT: vpshufb %xmm7, %xmm14, %xmm5
; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm5
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm10
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
; AVX512-NEXT: vpshufb %xmm3, %xmm10, %xmm6
; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm4
; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
OpenPOWER on IntegriCloud