summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/haddsub-undef.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/haddsub-undef.ll')
-rw-r--r--llvm/test/CodeGen/X86/haddsub-undef.ll199
1 files changed, 132 insertions, 67 deletions
diff --git a/llvm/test/CodeGen/X86/haddsub-undef.ll b/llvm/test/CodeGen/X86/haddsub-undef.ll
index 3e5851bacb2..4fcc38d5db9 100644
--- a/llvm/test/CodeGen/X86/haddsub-undef.ll
+++ b/llvm/test/CodeGen/X86/haddsub-undef.ll
@@ -84,17 +84,27 @@ define <4 x float> @test3_undef(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: test4_undef:
-; SSE: # %bb.0:
-; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT: addss %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE-SLOW-LABEL: test4_undef:
+; SSE-SLOW: # %bb.0:
+; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT: addss %xmm1, %xmm0
+; SSE-SLOW-NEXT: retq
;
-; AVX-LABEL: test4_undef:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; SSE-FAST-LABEL: test4_undef:
+; SSE-FAST: # %bb.0:
+; SSE-FAST-NEXT: haddps %xmm0, %xmm0
+; SSE-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: test4_undef:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: test4_undef:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
%vecext1 = extractelement <4 x float> %a, i32 1
%add = fadd float %vecext, %vecext1
@@ -103,19 +113,29 @@ define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
}
define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
-; SSE-LABEL: test5_undef:
-; SSE: # %bb.0:
-; SSE-NEXT: movapd %xmm0, %xmm1
-; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE-NEXT: addsd %xmm0, %xmm1
-; SSE-NEXT: movapd %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE-SLOW-LABEL: test5_undef:
+; SSE-SLOW: # %bb.0:
+; SSE-SLOW-NEXT: movapd %xmm0, %xmm1
+; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE-SLOW-NEXT: addsd %xmm0, %xmm1
+; SSE-SLOW-NEXT: movapd %xmm1, %xmm0
+; SSE-SLOW-NEXT: retq
;
-; AVX-LABEL: test5_undef:
-; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; SSE-FAST-LABEL: test5_undef:
+; SSE-FAST: # %bb.0:
+; SSE-FAST-NEXT: haddpd %xmm0, %xmm0
+; SSE-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: test5_undef:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: test5_undef:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: retq
%vecext = extractelement <2 x double> %a, i32 0
%vecext1 = extractelement <2 x double> %a, i32 1
%add = fadd double %vecext, %vecext1
@@ -166,27 +186,48 @@ define <4 x float> @test7_undef(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: test8_undef:
-; SSE: # %bb.0:
-; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT: addss %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT: addss %xmm2, %xmm0
-; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE-SLOW-LABEL: test8_undef:
+; SSE-SLOW: # %bb.0:
+; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT: addss %xmm0, %xmm1
+; SSE-SLOW-NEXT: movaps %xmm0, %xmm2
+; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-SLOW-NEXT: addss %xmm2, %xmm0
+; SSE-SLOW-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-SLOW-NEXT: movaps %xmm1, %xmm0
+; SSE-SLOW-NEXT: retq
;
-; AVX-LABEL: test8_undef:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT: retq
+; SSE-FAST-LABEL: test8_undef:
+; SSE-FAST: # %bb.0:
+; SSE-FAST-NEXT: movaps %xmm0, %xmm1
+; SSE-FAST-NEXT: haddps %xmm0, %xmm1
+; SSE-FAST-NEXT: movaps %xmm0, %xmm2
+; SSE-FAST-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-FAST-NEXT: addss %xmm2, %xmm0
+; SSE-FAST-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-FAST-NEXT: movaps %xmm1, %xmm0
+; SSE-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: test8_undef:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: test8_undef:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
+; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-FAST-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
%vecext1 = extractelement <4 x float> %a, i32 1
%add = fadd float %vecext, %vecext1
@@ -241,14 +282,21 @@ define <8 x float> @test10_undef(<8 x float> %a, <8 x float> %b) {
}
define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
-; SSE-LABEL: test11_undef:
-; SSE: # %bb.0:
-; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT: addss %xmm1, %xmm0
-; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE-NEXT: addss %xmm3, %xmm1
-; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0]
-; SSE-NEXT: retq
+; SSE-SLOW-LABEL: test11_undef:
+; SSE-SLOW: # %bb.0:
+; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT: addss %xmm1, %xmm0
+; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE-SLOW-NEXT: addss %xmm3, %xmm1
+; SSE-SLOW-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0]
+; SSE-SLOW-NEXT: retq
+;
+; SSE-FAST-LABEL: test11_undef:
+; SSE-FAST: # %bb.0:
+; SSE-FAST-NEXT: haddps %xmm0, %xmm0
+; SSE-FAST-NEXT: haddps %xmm3, %xmm3
+; SSE-FAST-NEXT: movddup {{.*#+}} xmm1 = xmm3[0,0]
+; SSE-FAST-NEXT: retq
;
; AVX-LABEL: test11_undef:
; AVX: # %bb.0:
@@ -334,23 +382,40 @@ define <16 x float> @test13_v16f32_undef(<16 x float> %a, <16 x float> %b) {
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
-; AVX512-LABEL: test13_v16f32_undef:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
-; AVX512-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm2
-; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-NEXT: retq
+; AVX512-SLOW-LABEL: test13_v16f32_undef:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm2
+; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-SLOW-NEXT: retq
+;
+; AVX512-FAST-LABEL: test13_v16f32_undef:
+; AVX512-FAST: # %bb.0:
+; AVX512-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
+; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm2
+; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-FAST-NEXT: retq
%vecext = extractelement <16 x float> %a, i32 0
%vecext1 = extractelement <16 x float> %a, i32 1
%add1 = fadd float %vecext, %vecext1
OpenPOWER on IntegriCloud