diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-02-19 16:33:17 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-02-19 16:33:17 +0000 |
| commit | 9d575db85ed52e87e759cb0fc88c221ef19898a4 (patch) | |
| tree | 71e87a2dcd06f4fa54c0d07edcaabd86760f513d /llvm/test/CodeGen/X86 | |
| parent | 26b7e859efb0dd76028003720594869c7ed64ced (diff) | |
| download | bcm5719-llvm-9d575db85ed52e87e759cb0fc88c221ef19898a4.tar.gz bcm5719-llvm-9d575db85ed52e87e759cb0fc88c221ef19898a4.zip | |
[X86][AVX] Update VBROADCAST folds to always use v2i64 X86vzload
The VBROADCAST combines and SimplifyDemandedVectorElts improvements mean that we now more consistently use shorter (128-bit) X86vzload input operands.
Follow up to D58053
llvm-svn: 354346
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/insertelement-shuffle.ll | 3 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll | 3 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll | 3 |
3 files changed, 3 insertions, 6 deletions
diff --git a/llvm/test/CodeGen/X86/insertelement-shuffle.ll b/llvm/test/CodeGen/X86/insertelement-shuffle.ll index 8880dda4984..5b44337785e 100644 --- a/llvm/test/CodeGen/X86/insertelement-shuffle.ll +++ b/llvm/test/CodeGen/X86/insertelement-shuffle.ll @@ -95,8 +95,7 @@ define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind { ; ; X86_AVX512-LABEL: insert_subvector_into_undef: ; X86_AVX512: # %bb.0: -; X86_AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86_AVX512-NEXT: vbroadcastsd %xmm0, %zmm0 +; X86_AVX512-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0 ; X86_AVX512-NEXT: retl ; ; X64_AVX512-LABEL: insert_subvector_into_undef: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index 963fb98f56a..bedf6823277 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -655,8 +655,7 @@ define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) { define <8 x i32> @combine_permd_insertion_as_broadcast_v4i64(i64 %a0) { ; X86-LABEL: combine_permd_insertion_as_broadcast_v4i64: ; X86: # %bb.0: -; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: vbroadcastsd %xmm0, %ymm0 +; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_permd_insertion_as_broadcast_v4i64: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll index 6a295ba8cc5..b8efb26d6fc 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll @@ -975,8 +975,7 @@ define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x floa define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) { ; X86-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64: ; X86: # %bb.0: -; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: vbroadcastsd %xmm0, %zmm0 +; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64: |

