diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-fmul.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-reduce-fmul.ll | 145 |
1 files changed, 53 insertions, 92 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmul.ll b/llvm/test/CodeGen/X86/vector-reduce-fmul.ll index 5b68c8003c1..b3a1d440a1c 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-fmul.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-fmul.ll @@ -647,31 +647,26 @@ define float @test_v16f32_one(<16 x float> %a0) { define float @test_v2f32_undef(<2 x float> %a0) { ; SSE2-LABEL: test_v2f32_undef: ; SSE2: # %bb.0: -; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: mulss %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-NEXT: mulss %xmm1, %xmm0 +; SSE2-NEXT: mulss {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v2f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: mulss %xmm0, %xmm0 -; SSE41-NEXT: mulss %xmm1, %xmm0 +; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: mulss {{.*}}(%rip), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v2f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulss %xmm0, %xmm0, %xmm1 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulss %xmm0, %xmm0, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.experimental.vector.reduce.fmul.f32.f32.v2f32(float undef, <2 x float> %a0) ret float %1 @@ -681,23 +676,19 @@ define float @test_v4f32_undef(<4 x float> %a0) { ; SSE2-LABEL: test_v4f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: mulss %xmm0, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] +; SSE2-NEXT: mulss {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE2-NEXT: mulss %xmm1, %xmm2 -; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] -; SSE2-NEXT: mulss %xmm2, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: mulss %xmm1, %xmm0 +; SSE2-NEXT: mulss %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v4f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movaps %xmm0, %xmm1 -; SSE41-NEXT: mulss %xmm0, %xmm1 -; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; SSE41-NEXT: mulss %xmm2, %xmm1 +; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE41-NEXT: mulss {{.*}}(%rip), %xmm1 ; SSE41-NEXT: movaps %xmm0, %xmm2 ; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE41-NEXT: mulss %xmm1, %xmm2 @@ -707,9 +698,8 @@ define float @test_v4f32_undef(<4 x float> %a0) { ; ; AVX-LABEL: test_v4f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] @@ -718,9 +708,8 @@ define float @test_v4f32_undef(<4 x float> %a0) { ; ; AVX512-LABEL: test_v4f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulss %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] @@ -734,15 +723,13 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; SSE2-LABEL: test_v8f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: mulss %xmm0, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; SSE2-NEXT: mulss {{.*}}(%rip), %xmm2 ; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] +; SSE2-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE2-NEXT: mulss %xmm2, %xmm3 -; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] -; SSE2-NEXT: mulss %xmm3, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: mulss %xmm2, %xmm0 +; SSE2-NEXT: mulss %xmm3, %xmm0 ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3] @@ -756,10 +743,8 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; ; SSE41-LABEL: test_v8f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movaps %xmm0, %xmm2 -; SSE41-NEXT: mulss %xmm0, %xmm2 -; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE41-NEXT: mulss %xmm3, %xmm2 +; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSE41-NEXT: mulss {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movaps %xmm0, %xmm3 ; SSE41-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE41-NEXT: mulss %xmm2, %xmm3 @@ -777,9 +762,8 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; ; AVX-LABEL: test_v8f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3] @@ -797,9 +781,8 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; ; AVX512-LABEL: test_v8f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulss %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3] @@ -822,15 +805,13 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; SSE2-LABEL: test_v16f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: mulss %xmm0, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[2,3] +; SSE2-NEXT: mulss {{.*}}(%rip), %xmm4 ; SSE2-NEXT: movaps %xmm0, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[2,3] +; SSE2-NEXT: movhlps {{.*#+}} xmm5 = xmm0[1],xmm5[1] ; SSE2-NEXT: mulss %xmm4, %xmm5 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1] -; SSE2-NEXT: mulss %xmm5, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: mulss %xmm4, %xmm0 +; SSE2-NEXT: mulss %xmm5, %xmm0 ; SSE2-NEXT: mulss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[2,3] @@ -862,10 +843,8 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; ; SSE41-LABEL: test_v16f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movaps %xmm0, %xmm4 -; SSE41-NEXT: mulss %xmm0, %xmm4 -; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: mulss %xmm5, %xmm4 +; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: mulss {{.*}}(%rip), %xmm4 ; SSE41-NEXT: movaps %xmm0, %xmm5 ; SSE41-NEXT: movhlps {{.*#+}} xmm5 = xmm0[1],xmm5[1] ; SSE41-NEXT: mulss %xmm4, %xmm5 @@ -899,9 +878,8 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; ; AVX-LABEL: test_v16f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulss %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3] @@ -934,9 +912,8 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; ; AVX512-LABEL: test_v16f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulss %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vmulss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3] @@ -1426,24 +1403,20 @@ define double @test_v16f64_one(<16 x double> %a0) { define double @test_v2f64_undef(<2 x double> %a0) { ; SSE-LABEL: test_v2f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm1 -; SSE-NEXT: mulsd %xmm0, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: mulsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulsd %xmm0, %xmm0, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmulsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulsd %xmm0, %xmm0, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vmulsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call double @llvm.experimental.vector.reduce.fmul.f64.f64.v2f64(double undef, <2 x double> %a0) ret double %1 @@ -1452,10 +1425,8 @@ define double @test_v2f64_undef(<2 x double> %a0) { define double @test_v4f64_undef(<4 x double> %a0) { ; SSE-LABEL: test_v4f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm2 -; SSE-NEXT: mulsd %xmm0, %xmm2 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: mulsd %xmm2, %xmm0 +; SSE-NEXT: mulsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 @@ -1463,9 +1434,8 @@ define double @test_v4f64_undef(<4 x double> %a0) { ; ; AVX-LABEL: test_v4f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulsd %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX-NEXT: vmulsd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-NEXT: vmulsd {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1475,9 +1445,8 @@ define double @test_v4f64_undef(<4 x double> %a0) { ; ; AVX512-LABEL: test_v4f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulsd %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX512-NEXT: vmulsd {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vmulsd %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1491,10 +1460,8 @@ define double @test_v4f64_undef(<4 x double> %a0) { define double @test_v8f64_undef(<8 x double> %a0) { ; SSE-LABEL: test_v8f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm4 -; SSE-NEXT: mulsd %xmm0, %xmm4 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: mulsd %xmm4, %xmm0 +; SSE-NEXT: mulsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 @@ -1508,9 +1475,8 @@ define double @test_v8f64_undef(<8 x double> %a0) { ; ; AVX-LABEL: test_v8f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulsd %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX-NEXT: vmulsd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-NEXT: vmulsd {{.*}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1527,9 +1493,8 @@ define double @test_v8f64_undef(<8 x double> %a0) { ; ; AVX512-LABEL: test_v8f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulsd %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX512-NEXT: vmulsd {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vmulsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] @@ -1551,10 +1516,8 @@ define double @test_v8f64_undef(<8 x double> %a0) { define double @test_v16f64_undef(<16 x double> %a0) { ; SSE-LABEL: test_v16f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm8 -; SSE-NEXT: mulsd %xmm0, %xmm8 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: mulsd %xmm8, %xmm0 +; SSE-NEXT: mulsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: mulsd %xmm1, %xmm0 @@ -1580,9 +1543,8 @@ define double @test_v16f64_undef(<16 x double> %a0) { ; ; AVX-LABEL: test_v16f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vmulsd %xmm0, %xmm0, %xmm4 -; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0] -; AVX-NEXT: vmulsd %xmm5, %xmm4, %xmm4 +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0] +; AVX-NEXT: vmulsd {{.*}}(%rip), %xmm4, %xmm4 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vmulsd %xmm0, %xmm4, %xmm4 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1613,9 +1575,8 @@ define double @test_v16f64_undef(<16 x double> %a0) { ; ; AVX512-LABEL: test_v16f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vmulsd %xmm0, %xmm0, %xmm2 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX512-NEXT: vmulsd {{.*}}(%rip), %xmm2, %xmm2 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX512-NEXT: vmulsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] |