diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-08-09 12:30:02 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-08-09 12:30:02 +0000 |
commit | 01ae462fef748212bfe42c2555bc3eb1f4309d0f (patch) | |
tree | 1a211d8b95013ae17c8f0c57a2a438c83b142a62 /llvm/test/CodeGen/X86/bitcast-setcc-128.ll | |
parent | bf7f18b79c1315eb482a580a9be81e3aa7dd55ac (diff) | |
download | bcm5719-llvm-01ae462fef748212bfe42c2555bc3eb1f4309d0f.tar.gz bcm5719-llvm-01ae462fef748212bfe42c2555bc3eb1f4309d0f.zip |
[X86][SSE] Combine (some) target shuffles with multiple uses
As discussed on D41794, we have many cases where we fail to combine shuffles as the input operands have other uses.
This patch permits these shuffles to be combined as long as they don't introduce additional variable shuffle masks, which should reduce instruction dependencies and allow the total number of shuffles to still drop without increasing the constant pool.
However, this may mean that some memory folds may no longer occur, and on pre-AVX require the occasional extra register move.
This also exposes some poor PMULDQ/PMULUDQ codegen which was doing unnecessary upper/lower calculations which will in fact fold to zero/undef - the fix will be added in a followup commit.
Differential Revision: https://reviews.llvm.org/D50328
llvm-svn: 339335
Diffstat (limited to 'llvm/test/CodeGen/X86/bitcast-setcc-128.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/bitcast-setcc-128.ll | 20 |
1 files changed, 8 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-128.ll b/llvm/test/CodeGen/X86/bitcast-setcc-128.ll index 599e907251b..adef5e4c8e2 100644 --- a/llvm/test/CodeGen/X86/bitcast-setcc-128.ll +++ b/llvm/test/CodeGen/X86/bitcast-setcc-128.ll @@ -360,13 +360,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) { ; ; AVX1-LABEL: v2i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpsllq $32, %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovmskpd %xmm0, %eax @@ -375,13 +373,11 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) { ; ; AVX2-LABEL: v2i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2 -; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX2-NEXT: vpsllq $32, %xmm1, %xmm2 +; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0 -; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2 -; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovmskpd %xmm0, %eax |