diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-07-02 14:16:25 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-07-02 14:16:25 +0000 |
| commit | 8971b2904ed494684e4029ea86a013ccb02c44ec (patch) | |
| tree | 7754bae56211399522d5444ebb6cb9357d5424ba /llvm/test/CodeGen | |
| parent | 4cb5613c386ebd7b70bc78925a3badf164159b8f (diff) | |
| download | bcm5719-llvm-8971b2904ed494684e4029ea86a013ccb02c44ec.tar.gz bcm5719-llvm-8971b2904ed494684e4029ea86a013ccb02c44ec.zip | |
[X86][SSE] Attempt to combine 64-bit and 32-bit shuffles to unary shuffles before bit shifts
We are combining shuffles to bit shifts before unary permutes, which means we can't fold loads plus the destination register is destructive
llvm-svn: 306978
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/cast-vsel.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/extract-store.ll | 2 |
2 files changed, 2 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/X86/cast-vsel.ll b/llvm/test/CodeGen/X86/cast-vsel.ll index 83ab2fac2f1..260535985e2 100644 --- a/llvm/test/CodeGen/X86/cast-vsel.ll +++ b/llvm/test/CodeGen/X86/cast-vsel.ll @@ -148,7 +148,7 @@ define <4 x double> @fpext(<4 x double> %a, <4 x double> %b, <4 x float> %c, <4 ; SSE2-NEXT: andnps %xmm5, %xmm0 ; SSE2-NEXT: orps %xmm4, %xmm0 ; SSE2-NEXT: cvtps2pd %xmm0, %xmm2 -; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; SSE2-NEXT: cvtps2pd %xmm0, %xmm1 ; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq diff --git a/llvm/test/CodeGen/X86/extract-store.ll b/llvm/test/CodeGen/X86/extract-store.ll index 48cb8d70b97..4ea6b7801fb 100644 --- a/llvm/test/CodeGen/X86/extract-store.ll +++ b/llvm/test/CodeGen/X86/extract-store.ll @@ -345,7 +345,7 @@ define void @extract_i64_1(i64* nocapture %dst, <2 x i64> %foo) nounwind { ; SSE-X32-LABEL: extract_i64_1: ; SSE-X32: # BB#0: ; SSE-X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; SSE-X32-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero +; SSE-X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; SSE-X32-NEXT: movq %xmm0, (%eax) ; SSE-X32-NEXT: retl ; |

