summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/build-vector-512.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-07-02 17:51:02 +0000
committerCraig Topper <craig.topper@intel.com>2019-07-02 17:51:02 +0000
commitcffbaa93b72b307904935c380f90d49d00c7ecdc (patch)
treea31e28a995d70ea4c8c32a9287f21210880fe0eb /llvm/test/CodeGen/X86/build-vector-512.ll
parent36face4c1df75c1e4e82c3f26b0b98495af9359e (diff)
downloadbcm5719-llvm-cffbaa93b72b307904935c380f90d49d00c7ecdc.tar.gz
bcm5719-llvm-cffbaa93b72b307904935c380f90d49d00c7ecdc.zip
[X86] Add patterns to select (scalar_to_vector (loadf32)) as (V)MOVSSrm instead of COPY_TO_REGCLASS + (V)MOVSSrm_alt.
Similar for (V)MOVSD. Ultimately, I'd like to see about folding scalar_to_vector+load to vzload. Which would select as (V)MOVSSrm so this is closer to that. llvm-svn: 364948
Diffstat (limited to 'llvm/test/CodeGen/X86/build-vector-512.ll')
-rw-r--r--llvm/test/CodeGen/X86/build-vector-512.ll8
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/build-vector-512.ll b/llvm/test/CodeGen/X86/build-vector-512.ll
index f4c2065e1cb..aba8b13db96 100644
--- a/llvm/test/CodeGen/X86/build-vector-512.ll
+++ b/llvm/test/CodeGen/X86/build-vector-512.ll
@@ -39,8 +39,6 @@ define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, fl
;
; AVX-64-LABEL: test_buildvector_v16f32:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
-; AVX-64-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
@@ -48,10 +46,12 @@ define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, fl
; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm9[0],mem[0],xmm9[2,3]
+; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0]
-; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm8[0],mem[0],xmm8[2,3]
+; AVX-64-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
OpenPOWER on IntegriCloud