summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-trunc-math.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-11-19 04:33:20 +0000
committerCraig Topper <craig.topper@intel.com>2018-11-19 04:33:20 +0000
commit3616891046e7f13a758e53dcc6fa73a7c3232b35 (patch)
tree8126bffd4f9009478ef780060293bc37d774e892 /llvm/test/CodeGen/X86/vector-trunc-math.ll
parent053f1eea96eaa8a0a4bb034274fa485655323d32 (diff)
downloadbcm5719-llvm-3616891046e7f13a758e53dcc6fa73a7c3232b35.tar.gz
bcm5719-llvm-3616891046e7f13a758e53dcc6fa73a7c3232b35.zip
[X86] Use compare with 0 to fill an element with sign bits when sign extending to v2i64 pre-sse4.1
Previously we used an arithmetic shift right by 31, but that requires a copy to preserve the input. So we might as well materialize a zero and compare to it since the comparison will overwrite the register that contains the zeros. This should be one byte shorter. llvm-svn: 347181
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-trunc-math.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-math.ll67
1 files changed, 34 insertions, 33 deletions
diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll
index c8ccab02614..e3b3ee4bb22 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-math.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll
@@ -5569,39 +5569,40 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE-LABEL: mul_add_self_v4i64_v4i32:
; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: psrad $31, %xmm5
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psrad $31, %xmm7
-; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
-; SSE-NEXT: pmuludq %xmm0, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3]
-; SSE-NEXT: pmuludq %xmm1, %xmm5
-; SSE-NEXT: paddq %xmm5, %xmm2
-; SSE-NEXT: psllq $32, %xmm2
-; SSE-NEXT: paddq %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,1,3]
-; SSE-NEXT: pmuludq %xmm3, %xmm0
-; SSE-NEXT: pmuludq %xmm6, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,1,3]
-; SSE-NEXT: pmuludq %xmm6, %xmm1
-; SSE-NEXT: paddq %xmm1, %xmm0
-; SSE-NEXT: psllq $32, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
-; SSE-NEXT: paddd %xmm2, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT: pxor %xmm8, %xmm8
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: pxor %xmm7, %xmm7
+; SSE-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE-NEXT: pxor %xmm6, %xmm6
+; SSE-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pcmpgtd %xmm1, %xmm5
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm1, %xmm7
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm0, %xmm5
+; SSE-NEXT: paddq %xmm7, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: paddq %xmm5, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm2, %xmm6
+; SSE-NEXT: paddq %xmm3, %xmm6
+; SSE-NEXT: psllq $32, %xmm6
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: paddq %xmm6, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
+; SSE-NEXT: paddd %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: mul_add_self_v4i64_v4i32:
OpenPOWER on IntegriCloud