diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-fadd.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/vector-reduce-fadd.ll | 145 |
1 files changed, 53 insertions, 92 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fadd.ll b/llvm/test/CodeGen/X86/vector-reduce-fadd.ll index aeeb4791373..fbf9dbc03a6 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-fadd.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-fadd.ll @@ -755,31 +755,26 @@ define float @test_v16f32_zero(<16 x float> %a0) { define float @test_v2f32_undef(<2 x float> %a0) { ; SSE2-LABEL: test_v2f32_undef: ; SSE2: # %bb.0: -; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: addss %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3] -; SSE2-NEXT: addss %xmm1, %xmm0 +; SSE2-NEXT: addss {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v2f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE41-NEXT: addss %xmm0, %xmm0 -; SSE41-NEXT: addss %xmm1, %xmm0 +; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: addss {{.*}}(%rip), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: test_v2f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddss %xmm0, %xmm0, %xmm1 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddss %xmm0, %xmm0, %xmm1 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] -; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v2f32(float undef, <2 x float> %a0) ret float %1 @@ -789,23 +784,19 @@ define float @test_v4f32_undef(<4 x float> %a0) { ; SSE2-LABEL: test_v4f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: addss %xmm0, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3] +; SSE2-NEXT: addss {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE2-NEXT: addss %xmm1, %xmm2 -; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1] -; SSE2-NEXT: addss %xmm2, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: addss %xmm1, %xmm0 +; SSE2-NEXT: addss %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_v4f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movaps %xmm0, %xmm1 -; SSE41-NEXT: addss %xmm0, %xmm1 -; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; SSE41-NEXT: addss %xmm2, %xmm1 +; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE41-NEXT: addss {{.*}}(%rip), %xmm1 ; SSE41-NEXT: movaps %xmm0, %xmm2 ; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] ; SSE41-NEXT: addss %xmm1, %xmm2 @@ -815,9 +806,8 @@ define float @test_v4f32_undef(<4 x float> %a0) { ; ; AVX-LABEL: test_v4f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] @@ -826,9 +816,8 @@ define float @test_v4f32_undef(<4 x float> %a0) { ; ; AVX512-LABEL: test_v4f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddss %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] @@ -842,15 +831,13 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; SSE2-LABEL: test_v8f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: addss %xmm0, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3] +; SSE2-NEXT: addss {{.*}}(%rip), %xmm2 ; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3] +; SSE2-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE2-NEXT: addss %xmm2, %xmm3 -; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1] -; SSE2-NEXT: addss %xmm3, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: addss %xmm2, %xmm0 +; SSE2-NEXT: addss %xmm3, %xmm0 ; SSE2-NEXT: addss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3] @@ -864,10 +851,8 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; ; SSE41-LABEL: test_v8f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movaps %xmm0, %xmm2 -; SSE41-NEXT: addss %xmm0, %xmm2 -; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE41-NEXT: addss %xmm3, %xmm2 +; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSE41-NEXT: addss {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movaps %xmm0, %xmm3 ; SSE41-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1] ; SSE41-NEXT: addss %xmm2, %xmm3 @@ -885,9 +870,8 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; ; AVX-LABEL: test_v8f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3] @@ -905,9 +889,8 @@ define float @test_v8f32_undef(<8 x float> %a0) { ; ; AVX512-LABEL: test_v8f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddss %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3] @@ -930,15 +913,13 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; SSE2-LABEL: test_v16f32_undef: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: addss %xmm0, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[2,3] +; SSE2-NEXT: addss {{.*}}(%rip), %xmm4 ; SSE2-NEXT: movaps %xmm0, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[2,3] +; SSE2-NEXT: movhlps {{.*#+}} xmm5 = xmm0[1],xmm5[1] ; SSE2-NEXT: addss %xmm4, %xmm5 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1] -; SSE2-NEXT: addss %xmm5, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE2-NEXT: addss %xmm4, %xmm0 +; SSE2-NEXT: addss %xmm5, %xmm0 ; SSE2-NEXT: addss %xmm1, %xmm0 ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[2,3] @@ -970,10 +951,8 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; ; SSE41-LABEL: test_v16f32_undef: ; SSE41: # %bb.0: -; SSE41-NEXT: movaps %xmm0, %xmm4 -; SSE41-NEXT: addss %xmm0, %xmm4 -; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3] -; SSE41-NEXT: addss %xmm5, %xmm4 +; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: addss {{.*}}(%rip), %xmm4 ; SSE41-NEXT: movaps %xmm0, %xmm5 ; SSE41-NEXT: movhlps {{.*#+}} xmm5 = xmm0[1],xmm5[1] ; SSE41-NEXT: addss %xmm4, %xmm5 @@ -1007,9 +986,8 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; ; AVX-LABEL: test_v16f32_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddss %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vaddss {{.*}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3] @@ -1042,9 +1020,8 @@ define float @test_v16f32_undef(<16 x float> %a0) { ; ; AVX512-LABEL: test_v16f32_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddss %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] ; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3] @@ -1629,24 +1606,20 @@ define double @test_v16f64_zero(<16 x double> %a0) { define double @test_v2f64_undef(<2 x double> %a0) { ; SSE-LABEL: test_v2f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm1 -; SSE-NEXT: addsd %xmm0, %xmm1 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: addsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddsd %xmm0, %xmm0, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: test_v2f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddsd %xmm0, %xmm0, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: retq %1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v2f64(double undef, <2 x double> %a0) ret double %1 @@ -1655,10 +1628,8 @@ define double @test_v2f64_undef(<2 x double> %a0) { define double @test_v4f64_undef(<4 x double> %a0) { ; SSE-LABEL: test_v4f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm2 -; SSE-NEXT: addsd %xmm0, %xmm2 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: addsd %xmm2, %xmm0 +; SSE-NEXT: addsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: addsd %xmm1, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: addsd %xmm1, %xmm0 @@ -1666,9 +1637,8 @@ define double @test_v4f64_undef(<4 x double> %a0) { ; ; AVX-LABEL: test_v4f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddsd %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1678,9 +1648,8 @@ define double @test_v4f64_undef(<4 x double> %a0) { ; ; AVX512-LABEL: test_v4f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddsd %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1694,10 +1663,8 @@ define double @test_v4f64_undef(<4 x double> %a0) { define double @test_v8f64_undef(<8 x double> %a0) { ; SSE-LABEL: test_v8f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm4 -; SSE-NEXT: addsd %xmm0, %xmm4 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: addsd %xmm4, %xmm0 +; SSE-NEXT: addsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: addsd %xmm1, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: addsd %xmm1, %xmm0 @@ -1711,9 +1678,8 @@ define double @test_v8f64_undef(<8 x double> %a0) { ; ; AVX-LABEL: test_v8f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddsd %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1730,9 +1696,8 @@ define double @test_v8f64_undef(<8 x double> %a0) { ; ; AVX512-LABEL: test_v8f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddsd %xmm0, %xmm0, %xmm1 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] @@ -1754,10 +1719,8 @@ define double @test_v8f64_undef(<8 x double> %a0) { define double @test_v16f64_undef(<16 x double> %a0) { ; SSE-LABEL: test_v16f64_undef: ; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm8 -; SSE-NEXT: addsd %xmm0, %xmm8 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: addsd %xmm8, %xmm0 +; SSE-NEXT: addsd {{.*}}(%rip), %xmm0 ; SSE-NEXT: addsd %xmm1, %xmm0 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: addsd %xmm1, %xmm0 @@ -1783,9 +1746,8 @@ define double @test_v16f64_undef(<16 x double> %a0) { ; ; AVX-LABEL: test_v16f64_undef: ; AVX: # %bb.0: -; AVX-NEXT: vaddsd %xmm0, %xmm0, %xmm4 -; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm5, %xmm4, %xmm4 +; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0] +; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm4, %xmm4 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -1816,9 +1778,8 @@ define double @test_v16f64_undef(<16 x double> %a0) { ; ; AVX512-LABEL: test_v16f64_undef: ; AVX512: # %bb.0: -; AVX512-NEXT: vaddsd %xmm0, %xmm0, %xmm2 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm2, %xmm2 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0] |