diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2019-06-07 13:17:46 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2019-06-07 13:17:46 +0000 |
| commit | 6880bceda2df17f68e319c86a78642125086e0b8 (patch) | |
| tree | 936eca4e86abcdbd7788df22c7cbd579b61b21c0 /llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll | |
| parent | 0723c659f5838a5f67cd6ef5133f7d0e9464b122 (diff) | |
| download | bcm5719-llvm-6880bceda2df17f68e319c86a78642125086e0b8.tar.gz bcm5719-llvm-6880bceda2df17f68e319c86a78642125086e0b8.zip | |
[x86] narrow extract subvector of vector select
This is a potentially large perf win for AVX1 targets because of the way we
auto-vectorize to 256-bit but then expect the backend to legalize/optimize
for the half-implemented AVX1 ISA.
On the motivating example from PR37428 (even though this patch doesn't solve
the vector shift issue):
https://bugs.llvm.org/show_bug.cgi?id=37428
...there's a 16% speedup when compiling with "-mavx" (perf tested on Haswell)
because we eliminate the remaining 256-bit vblendv ops.
I added comments on a couple of tests that require further work. If we have
256-bit logic ops separating the vselect and extract, we should probably narrow
everything to 128-bit, but that requires a larger pattern match.
Differential Revision: https://reviews.llvm.org/D62969
llvm-svn: 362797
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll | 116 |
1 files changed, 53 insertions, 63 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll b/llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll index a2c0653e126..4290994e740 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-umax-widen.ll @@ -155,15 +155,13 @@ define i64 @test_v4i64(<4 x i64> %a0) { ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vxorpd %xmm2, %xmm1, %xmm2 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -333,29 +331,26 @@ define i64 @test_v8i64(<8 x i64> %a0) { ; ; AVX1-LABEL: test_v8i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 -; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4 -; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm2 -; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpxor %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpxor %xmm2, %xmm6, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vxorpd %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm2 -; AVX1-NEXT: vxorpd %xmm3, %xmm1, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vxorpd %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -640,49 +635,44 @@ define i64 @test_v16i64(<16 x i64> %a0) { ; ; AVX1-LABEL: test_v16i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm8 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 -; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpxor %xmm4, %xmm8, %xmm6 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm10 +; AVX1-NEXT: vpxor %xmm4, %xmm10, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm9 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11 +; AVX1-NEXT: vpxor %xmm4, %xmm11, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7 +; AVX1-NEXT: vpxor %xmm4, %xmm7, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm12 ; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm6 -; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm7 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 -; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 -; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 -; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm2 -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm5 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm3, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm13 +; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm6 +; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3 -; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm2 -; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vblendvpd %xmm13, %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vblendvpd %xmm12, %xmm7, %xmm11, %xmm3 +; AVX1-NEXT: vxorpd %xmm4, %xmm3, %xmm5 +; AVX1-NEXT: vblendvpd %xmm9, %xmm10, %xmm8, %xmm6 +; AVX1-NEXT: vxorpd %xmm4, %xmm6, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vxorpd %xmm4, %xmm3, %xmm5 +; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2 ; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq |

