diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-02-16 12:21:08 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-02-16 12:21:08 +0000 |
| commit | 0ffde50f9c599c9f289552c0e5e0efc184dc7a55 (patch) | |
| tree | 9992a54d62e95bb87068ef007c70d3ecbcbfa710 /llvm/test/CodeGen | |
| parent | 7d60d20d5793a1e9270534b358c54cd1a9e2b0e1 (diff) | |
| download | bcm5719-llvm-0ffde50f9c599c9f289552c0e5e0efc184dc7a55.tar.gz bcm5719-llvm-0ffde50f9c599c9f289552c0e5e0efc184dc7a55.zip | |
[SelectionDAG] Add initial SimplifyDemandedVectorElts support for simplifying VSELECT operands
This just adds a basic pass through - we can add constant selection mask handling in a future patch to fully match InstCombine.
llvm-svn: 325338
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/known-bits-vector.ll | 4 |
1 files changed, 0 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 46a888f3b9b..9c1346d1be7 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -651,9 +651,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x ; X32-NEXT: andl $-16, %esp ; X32-NEXT: subl $16, %esp ; X32-NEXT: vmovaps 8(%ebp), %xmm3 -; X32-NEXT: vpsrld $1, %xmm2, %xmm4 ; X32-NEXT: vpsrld $5, %xmm2, %xmm2 -; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] ; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3 ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 @@ -665,9 +663,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x ; ; X64-LABEL: knownbits_lshr_and_select_shuffle_uitofp: ; X64: # %bb.0: -; X64-NEXT: vpsrld $1, %xmm2, %xmm4 ; X64-NEXT: vpsrld $5, %xmm2, %xmm2 -; X64-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] ; X64-NEXT: vandps {{.*}}(%rip), %xmm3, %xmm3 ; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X64-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 |

