summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vec_extract.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-08-22 12:56:54 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-08-22 12:56:54 +0000
commit2279e595737524c6133fddad91634a28c12cd07c (patch)
treea37d3d4c092eb008896bf2ab08b04a3a7cb3f579 /llvm/test/CodeGen/X86/vec_extract.ll
parentf0ed16eae58479155639e601bbfbff961f2cc49a (diff)
downloadbcm5719-llvm-2279e595737524c6133fddad91634a28c12cd07c.tar.gz
bcm5719-llvm-2279e595737524c6133fddad91634a28c12cd07c.zip
[X86][SSE] Avoid specifying unused arguments in SHUFPD lowering
As discussed on PR26491, we are missing the opportunity to make use of the smaller MOVHLPS instruction because we set both arguments of a SHUFPD when using it to lower a single input shuffle. This patch sets the lowered argument to UNDEF if that shuffle element is undefined. This in turn makes it easier for target shuffle combining to decode UNDEF shuffle elements, allowing combines to MOVHLPS to occur. A fix to match against MOVHPD stores was necessary as well. This builds on the improved MOVLHPS/MOVHLPS lowering and memory folding support added in D16956 Adding similar support for SHUFPS will have to wait until have better support for target combining of binary shuffles. Differential Revision: https://reviews.llvm.org/D23027 llvm-svn: 279430
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_extract.ll')
-rw-r--r--llvm/test/CodeGen/X86/vec_extract.ll8
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/vec_extract.ll b/llvm/test/CodeGen/X86/vec_extract.ll
index 47f719d9e32..58d8392b235 100644
--- a/llvm/test/CodeGen/X86/vec_extract.ll
+++ b/llvm/test/CodeGen/X86/vec_extract.ll
@@ -33,7 +33,7 @@ define float @test2(<4 x float>* %F, float* %f) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movaps (%eax), %xmm0
; X32-NEXT: addps %xmm0, %xmm0
-; X32-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X32-NEXT: movss %xmm0, (%esp)
; X32-NEXT: flds (%esp)
; X32-NEXT: popl %eax
@@ -43,7 +43,7 @@ define float @test2(<4 x float>* %F, float* %f) nounwind {
; X64: # BB#0: # %entry
; X64-NEXT: movaps (%rdi), %xmm0
; X64-NEXT: addps %xmm0, %xmm0
-; X64-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X64-NEXT: retq
entry:
%tmp = load <4 x float>, <4 x float>* %F
@@ -78,7 +78,7 @@ define double @test4(double %A) nounwind {
; X32: # BB#0: # %entry
; X32-NEXT: subl $12, %esp
; X32-NEXT: calll foo
-; X32-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X32-NEXT: addsd {{[0-9]+}}(%esp), %xmm0
; X32-NEXT: movsd %xmm0, (%esp)
; X32-NEXT: fldl (%esp)
@@ -90,7 +90,7 @@ define double @test4(double %A) nounwind {
; X64-NEXT: pushq %rax
; X64-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
; X64-NEXT: callq foo
-; X64-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
+; X64-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X64-NEXT: addsd (%rsp), %xmm0 # 8-byte Folded Reload
; X64-NEXT: popq %rax
; X64-NEXT: retq
OpenPOWER on IntegriCloud