summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-trunc-math.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-trunc-math.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-math.ll103
1 files changed, 46 insertions, 57 deletions
diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll
index fb1b91efb78..d5202a279c9 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-math.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll
@@ -5515,22 +5515,18 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm2, %xmm5
-; SSE-NEXT: paddq %xmm4, %xmm5
-; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pmuludq %xmm1, %xmm5
+; SSE-NEXT: movdqa %xmm2, %xmm6
+; SSE-NEXT: pmuludq %xmm4, %xmm6
+; SSE-NEXT: paddq %xmm5, %xmm6
+; SSE-NEXT: psllq $32, %xmm6
; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: paddq %xmm5, %xmm2
+; SSE-NEXT: paddq %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: pmuludq %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm0, %xmm4
+; SSE-NEXT: pmuludq %xmm4, %xmm1
+; SSE-NEXT: pmuludq %xmm3, %xmm4
; SSE-NEXT: paddq %xmm1, %xmm4
; SSE-NEXT: psllq $32, %xmm4
; SSE-NEXT: pmuludq %xmm3, %xmm0
@@ -5559,37 +5555,34 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psrad $31, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm0, %xmm5
-; SSE-NEXT: paddq %xmm4, %xmm5
-; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: psrad $31, %xmm6
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: psrad $31, %xmm5
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: psrad $31, %xmm7
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
+; SSE-NEXT: pxor %xmm8, %xmm8
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm1, %xmm6
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm0, %xmm7
+; SSE-NEXT: paddq %xmm6, %xmm7
+; SSE-NEXT: psllq $32, %xmm7
; SSE-NEXT: pmuludq %xmm0, %xmm1
-; SSE-NEXT: paddq %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: psrlq $32, %xmm0
-; SSE-NEXT: pmuludq %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: paddq %xmm7, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm2, %xmm5
+; SSE-NEXT: paddq %xmm3, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: paddq %xmm0, %xmm4
-; SSE-NEXT: psllq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: paddq %xmm4, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE-NEXT: paddq %xmm5, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
; SSE-NEXT: paddd %xmm1, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
@@ -5614,22 +5607,18 @@ define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nou
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm1, %xmm5
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: psrlq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm3, %xmm6
-; SSE-NEXT: paddq %xmm5, %xmm6
-; SSE-NEXT: psllq $32, %xmm6
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm6
+; SSE-NEXT: pmuludq %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm3, %xmm7
+; SSE-NEXT: pmuludq %xmm5, %xmm7
+; SSE-NEXT: paddq %xmm6, %xmm7
+; SSE-NEXT: psllq $32, %xmm7
; SSE-NEXT: pmuludq %xmm1, %xmm3
-; SSE-NEXT: paddq %xmm6, %xmm3
+; SSE-NEXT: paddq %xmm7, %xmm3
; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psrlq $32, %xmm1
-; SSE-NEXT: pmuludq %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm5
-; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm2, %xmm5
+; SSE-NEXT: pmuludq %xmm5, %xmm1
+; SSE-NEXT: pmuludq %xmm4, %xmm5
; SSE-NEXT: paddq %xmm1, %xmm5
; SSE-NEXT: psllq $32, %xmm5
; SSE-NEXT: pmuludq %xmm4, %xmm2
OpenPOWER on IntegriCloud