diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-07-31 12:55:39 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-07-31 12:55:39 +0000 |
| commit | 24ad2b5e7d8ceecb045bfc499107b9ed90439adb (patch) | |
| tree | ccecf7fb87e76b58d171ad4fbabe2a6da14f7c44 /llvm/test/CodeGen/X86/oddsubvector.ll | |
| parent | a36d31478c182903523e04eb271bbf102bfab2cc (diff) | |
| download | bcm5719-llvm-24ad2b5e7d8ceecb045bfc499107b9ed90439adb.tar.gz bcm5719-llvm-24ad2b5e7d8ceecb045bfc499107b9ed90439adb.zip | |
[X86][AVX] Ensure chained subvector insertions are the same size (PR42833)
Before combining insert_subvector(insert_subvector(vec, sub0, c0), sub1, c1) patterns, ensure that the subvectors are all the same type. On AVX512 targets especially we might have a mixture of 128/256 subvector insertions.
llvm-svn: 367429
Diffstat (limited to 'llvm/test/CodeGen/X86/oddsubvector.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/oddsubvector.ll | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll index d38f21e82ad..5bcfbe61d7b 100644 --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -349,19 +349,21 @@ define void @PR42833() { ; AVX512: # %bb.0: ; AVX512-NEXT: movl {{.*}}(%rip), %eax ; AVX512-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0 +; AVX512-NEXT: vmovdqu64 c+{{.*}}(%rip), %zmm1 ; AVX512-NEXT: addl c+{{.*}}(%rip), %eax -; AVX512-NEXT: vmovd %eax, %xmm1 -; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7] -; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm2 -; AVX512-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7] -; AVX512-NEXT: vmovdqa c+{{.*}}(%rip), %xmm1 +; AVX512-NEXT: vmovd %eax, %xmm2 +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],mem[1,2,3,4,5,6,7] +; AVX512-NEXT: vpaddd %ymm2, %ymm0, %ymm3 +; AVX512-NEXT: vpsllvd %ymm2, %ymm0, %ymm0 +; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7] +; AVX512-NEXT: vmovdqa c+{{.*}}(%rip), %xmm2 ; AVX512-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip) ; AVX512-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0 -; AVX512-NEXT: vmovdqu64 d+{{.*}}(%rip), %zmm2 -; AVX512-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1 +; AVX512-NEXT: vmovdqu64 d+{{.*}}(%rip), %zmm3 +; AVX512-NEXT: vpinsrd $0, %eax, %xmm2, %xmm2 +; AVX512-NEXT: vinserti32x4 $0, %xmm2, %zmm1, %zmm1 ; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm1 -; AVX512-NEXT: vpsubd %zmm1, %zmm2, %zmm1 +; AVX512-NEXT: vpsubd %zmm1, %zmm3, %zmm1 ; AVX512-NEXT: vmovdqu64 %zmm1, d+{{.*}}(%rip) ; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip) |

