summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll334
1 files changed, 230 insertions, 104 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
index b43ec058ed9..42b3c11d3d6 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll
@@ -13,6 +13,10 @@ define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0,
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
; ALL-NEXT: subq $64, %rsp
+; ALL-NEXT: andl $3, %ecx
+; ALL-NEXT: andl $3, %edx
+; ALL-NEXT: andl $3, %esi
+; ALL-NEXT: andl $3, %edi
; ALL-NEXT: vmovaps %ymm0, (%rsp)
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
@@ -40,6 +44,8 @@ define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0,
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
; ALL-NEXT: subq $64, %rsp
+; ALL-NEXT: andl $3, %edx
+; ALL-NEXT: andl $3, %esi
; ALL-NEXT: vmovaps %ymm0, (%rsp)
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
@@ -62,6 +68,10 @@ define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0,
define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; ALL-LABEL: var_shuffle_v4f64_v2f64_xxxx_i64:
; ALL: # BB#0:
+; ALL-NEXT: andl $1, %ecx
+; ALL-NEXT: andl $1, %edx
+; ALL-NEXT: andl $1, %esi
+; ALL-NEXT: andl $1, %edi
; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
@@ -87,6 +97,10 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: andl $3, %ecx
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: andl $3, %edi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -105,6 +119,10 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: andl $3, %edi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -134,6 +152,8 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: andl $3, %edi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -150,6 +170,8 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: andl $3, %edi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -173,6 +195,10 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i
define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
; AVX1-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
; AVX1: # BB#0:
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: andl $1, %esi
+; AVX1-NEXT: andl $1, %edi
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -185,6 +211,10 @@ define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i
;
; AVX2-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
; AVX2: # BB#0:
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: andl $1, %esi
+; AVX2-NEXT: andl $1, %edi
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -212,15 +242,23 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: movslq %edi, %rax
-; AVX1-NEXT: movslq %esi, %rsi
-; AVX1-NEXT: movslq %edx, %rdx
-; AVX1-NEXT: movslq %ecx, %r11
-; AVX1-NEXT: movslq %r8d, %r10
+; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: andl $7, %edi
+; AVX1-NEXT: andl $7, %esi
+; AVX1-NEXT: andl $7, %edx
+; AVX1-NEXT: andl $7, %ecx
+; AVX1-NEXT: andl $7, %r8d
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
-; AVX1-NEXT: movslq %r9d, %r8
-; AVX1-NEXT: movslq 16(%rbp), %rdi
-; AVX1-NEXT: movslq 24(%rbp), %rcx
+; AVX1-NEXT: andl $7, %r9d
+; AVX1-NEXT: movl 16(%rbp), %r10d
+; AVX1-NEXT: andl $7, %r10d
+; AVX1-NEXT: movl 24(%rbp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -284,15 +322,23 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0
define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
; ALL: # BB#0:
-; ALL-NEXT: movslq %edi, %rax
-; ALL-NEXT: movslq %esi, %rsi
-; ALL-NEXT: movslq %edx, %rdx
-; ALL-NEXT: movslq %ecx, %r11
-; ALL-NEXT: movslq %r8d, %r10
+; ALL-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; ALL-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; ALL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; ALL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ALL-NEXT: andl $3, %edi
+; ALL-NEXT: andl $3, %esi
+; ALL-NEXT: andl $3, %edx
+; ALL-NEXT: andl $3, %ecx
+; ALL-NEXT: andl $3, %r8d
; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; ALL-NEXT: movslq %r9d, %r8
-; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rdi
-; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rcx
+; ALL-NEXT: andl $3, %r9d
+; ALL-NEXT: movl {{[0-9]+}}(%rsp), %r10d
+; ALL-NEXT: andl $3, %r10d
+; ALL-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; ALL-NEXT: andl $3, %eax
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ALL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
@@ -331,48 +377,64 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
-; AVX1-NEXT: movslq 32(%rbp), %rax
+; AVX1-NEXT: movl 32(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: movslq 40(%rbp), %rax
+; AVX1-NEXT: movl 40(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq 48(%rbp), %rax
+; AVX1-NEXT: movl 48(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq 56(%rbp), %rax
+; AVX1-NEXT: movl 56(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq 64(%rbp), %rax
+; AVX1-NEXT: movl 64(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq 72(%rbp), %rax
+; AVX1-NEXT: movl 72(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq 80(%rbp), %rax
+; AVX1-NEXT: movl 80(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq 88(%rbp), %rax
+; AVX1-NEXT: movl 88(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq %edi, %rax
-; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: andl $15, %edi
+; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax
; AVX1-NEXT: vmovd %eax, %xmm1
-; AVX1-NEXT: movslq %esi, %rax
-; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %edx, %rax
-; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %ecx, %rax
-; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %r8d, %rax
-; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %r9d, %rax
-; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq 16(%rbp), %rax
+; AVX1-NEXT: andl $15, %esi
+; AVX1-NEXT: vpinsrw $1, (%rsp,%rsi,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $15, %edx
+; AVX1-NEXT: vpinsrw $2, (%rsp,%rdx,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $15, %ecx
+; AVX1-NEXT: vpinsrw $3, (%rsp,%rcx,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $15, %r8d
+; AVX1-NEXT: vpinsrw $4, (%rsp,%r8,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $15, %r9d
+; AVX1-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm1, %xmm1
+; AVX1-NEXT: movl 16(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movslq 24(%rbp), %rax
+; AVX1-NEXT: movl 24(%rbp), %eax
+; AVX1-NEXT: andl $15, %eax
; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -386,48 +448,64 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
+; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
-; AVX2-NEXT: movslq 32(%rbp), %rax
+; AVX2-NEXT: movl 32(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: movslq 40(%rbp), %rax
+; AVX2-NEXT: movl 40(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq 48(%rbp), %rax
+; AVX2-NEXT: movl 48(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq 56(%rbp), %rax
+; AVX2-NEXT: movl 56(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq 64(%rbp), %rax
+; AVX2-NEXT: movl 64(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq 72(%rbp), %rax
+; AVX2-NEXT: movl 72(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq 80(%rbp), %rax
+; AVX2-NEXT: movl 80(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq 88(%rbp), %rax
+; AVX2-NEXT: movl 88(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq %edi, %rax
-; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: andl $15, %edi
+; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
-; AVX2-NEXT: movslq %esi, %rax
-; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %edx, %rax
-; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %ecx, %rax
-; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %r8d, %rax
-; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %r9d, %rax
-; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq 16(%rbp), %rax
+; AVX2-NEXT: andl $15, %esi
+; AVX2-NEXT: vpinsrw $1, (%rsp,%rsi,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $15, %edx
+; AVX2-NEXT: vpinsrw $2, (%rsp,%rdx,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $15, %ecx
+; AVX2-NEXT: vpinsrw $3, (%rsp,%rcx,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $15, %r8d
+; AVX2-NEXT: vpinsrw $4, (%rsp,%r8,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $15, %r9d
+; AVX2-NEXT: vpinsrw $5, (%rsp,%r9,2), %xmm1, %xmm1
+; AVX2-NEXT: movl 16(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movslq 24(%rbp), %rax
+; AVX2-NEXT: movl 24(%rbp), %eax
+; AVX2-NEXT: andl $15, %eax
; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
@@ -472,48 +550,64 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX1: # BB#0:
+; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: movslq %edi, %rax
-; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT: andl $7, %edi
+; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; AVX1-NEXT: vmovd %eax, %xmm1
-; AVX1-NEXT: movslq %esi, %rax
-; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %edx, %rax
-; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %ecx, %rax
-; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %r8d, %rax
-; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq %r9d, %rax
-; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: andl $7, %esi
+; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $7, %edx
+; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $7, %ecx
+; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $7, %r8d
+; AVX1-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm1, %xmm1
+; AVX1-NEXT: andl $7, %r9d
+; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm1, %xmm1
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: andl $7, %eax
; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -521,48 +615,64 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
;
; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX2: # BB#0:
+; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: movslq %edi, %rax
-; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT: andl $7, %edi
+; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
-; AVX2-NEXT: movslq %esi, %rax
-; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %edx, %rax
-; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %ecx, %rax
-; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %r8d, %rax
-; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq %r9d, %rax
-; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $7, %esi
+; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $7, %edx
+; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rdx,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $7, %ecx
+; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $7, %r8d
+; AVX2-NEXT: vpinsrw $4, -24(%rsp,%r8,2), %xmm1, %xmm1
+; AVX2-NEXT: andl $7, %r9d
+; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm1, %xmm1
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movl {{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: andl $7, %eax
; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
@@ -615,8 +725,12 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi
; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: movq 8(%rdi), %rcx
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: andl $3, %ecx
; AVX1-NEXT: movq 16(%rdi), %rdx
+; AVX1-NEXT: andl $3, %edx
; AVX1-NEXT: movq 24(%rdi), %rsi
+; AVX1-NEXT: andl $3, %esi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -637,8 +751,12 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi
; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: movq 8(%rdi), %rcx
+; AVX2-NEXT: andl $3, %eax
+; AVX2-NEXT: andl $3, %ecx
; AVX2-NEXT: movq 16(%rdi), %rdx
+; AVX2-NEXT: andl $3, %edx
; AVX2-NEXT: movq 24(%rdi), %rsi
+; AVX2-NEXT: andl $3, %esi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -674,8 +792,12 @@ define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwi
; AVX1: # BB#0:
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: movq 8(%rdi), %rcx
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: andl $1, %ecx
; AVX1-NEXT: movq 16(%rdi), %rdx
+; AVX1-NEXT: andl $1, %edx
; AVX1-NEXT: movq 24(%rdi), %rsi
+; AVX1-NEXT: andl $1, %esi
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
@@ -690,8 +812,12 @@ define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwi
; AVX2: # BB#0:
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: movq 8(%rdi), %rcx
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: andl $1, %ecx
; AVX2-NEXT: movq 16(%rdi), %rdx
+; AVX2-NEXT: andl $1, %edx
; AVX2-NEXT: movq 24(%rdi), %rsi
+; AVX2-NEXT: andl $1, %esi
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
OpenPOWER on IntegriCloud