summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/avx-arith.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-12-21 20:00:10 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-12-21 20:00:10 +0000
commit081abbb164cceea0ff5b70d1557f2cf31198f5b9 (patch)
tree9b712808d6be653cbd6b052d85fa932a9714d3c7 /llvm/test/CodeGen/X86/avx-arith.ll
parentb0761a0c1ba8ec77d3704d2450d481bc25e60a9d (diff)
downloadbcm5719-llvm-081abbb164cceea0ff5b70d1557f2cf31198f5b9.tar.gz
bcm5719-llvm-081abbb164cceea0ff5b70d1557f2cf31198f5b9.zip
[X86][SSE] Improve lowering of vXi64 multiplies
As mentioned on PR30845, we were performing our vXi64 multiplication as: AloBlo = pmuludq(a, b); AloBhi = pmuludq(a, psrlqi(b, 32)); AhiBlo = pmuludq(psrlqi(a, 32), b); return AloBlo + psllqi(AloBhi, 32)+ psllqi(AhiBlo, 32); when we could avoid one of the upper shifts with: AloBlo = pmuludq(a, b); AloBhi = pmuludq(a, psrlqi(b, 32)); AhiBlo = pmuludq(psrlqi(a, 32), b); return AloBlo + psllqi(AloBhi + AhiBlo, 32); This matches the lowering on gcc/icc. Differential Revision: https://reviews.llvm.org/D27756 llvm-svn: 290267
Diffstat (limited to 'llvm/test/CodeGen/X86/avx-arith.ll')
-rw-r--r--llvm/test/CodeGen/X86/avx-arith.ll22
1 files changed, 10 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/avx-arith.ll b/llvm/test/CodeGen/X86/avx-arith.ll
index 66c09e0dfa3..82d890a08cf 100644
--- a/llvm/test/CodeGen/X86/avx-arith.ll
+++ b/llvm/test/CodeGen/X86/avx-arith.ll
@@ -323,24 +323,22 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK: ## BB#0:
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
-; CHECK-NEXT: vpmuludq %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vpsrlq $32, %xmm3, %xmm4
+; CHECK-NEXT: vpmuludq %xmm2, %xmm4, %xmm4
; CHECK-NEXT: vpsrlq $32, %xmm2, %xmm5
; CHECK-NEXT: vpmuludq %xmm5, %xmm3, %xmm5
-; CHECK-NEXT: vpsllq $32, %xmm5, %xmm5
-; CHECK-NEXT: vpaddq %xmm5, %xmm4, %xmm4
-; CHECK-NEXT: vpsrlq $32, %xmm3, %xmm3
+; CHECK-NEXT: vpaddq %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vpsllq $32, %xmm4, %xmm4
; CHECK-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; CHECK-NEXT: vpsllq $32, %xmm2, %xmm2
-; CHECK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
-; CHECK-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; CHECK-NEXT: vpsrlq $32, %xmm0, %xmm3
+; CHECK-NEXT: vpmuludq %xmm1, %xmm3, %xmm3
; CHECK-NEXT: vpsrlq $32, %xmm1, %xmm4
; CHECK-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
-; CHECK-NEXT: vpsllq $32, %xmm4, %xmm4
-; CHECK-NEXT: vpaddq %xmm4, %xmm3, %xmm3
-; CHECK-NEXT: vpsrlq $32, %xmm0, %xmm0
+; CHECK-NEXT: vpaddq %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vpsllq $32, %xmm3, %xmm3
; CHECK-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vpsllq $32, %xmm0, %xmm0
-; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vpaddq %xmm3, %xmm0, %xmm0
; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; CHECK-NEXT: retq
%x = mul <4 x i64> %i, %j
OpenPOWER on IntegriCloud