summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2016-06-16 16:58:54 +0000
committerSanjay Patel <spatel@rotateright.com>2016-06-16 16:58:54 +0000
commitf664f3a5786545e73e2d99ddb3ca1ce1d8049c83 (patch)
tree43d736ce0b4684ef2d48cd303a60e0a3574415ad /llvm/test/CodeGen/X86
parentab3d91b8f1d322782a00eb4200c7e4381eebe105 (diff)
downloadbcm5719-llvm-f664f3a5786545e73e2d99ddb3ca1ce1d8049c83.tar.gz
bcm5719-llvm-f664f3a5786545e73e2d99ddb3ca1ce1d8049c83.zip
[DAG] Remove redundant FMUL in Newton-Raphson SQRT code
When calculating a square root using Newton-Raphson with two constants, a naive implementation is to use five multiplications (four muls to calculate reciprocal square root and another one to calculate the square root itself). However, after some reassociation and CSE the same result can be obtained with only four multiplications. Unfortunately, there's no reliable way to do such a reassociation in the back-end. So, the patch modifies NR code itself so that it directly builds optimal code for SQRT and doesn't rely on any further reassociation. Patch by Nikolai Bozhenov! Differential Revision: http://reviews.llvm.org/D21127 llvm-svn: 272920
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll52
-rw-r--r--llvm/test/CodeGen/X86/sqrt-fastmath.ll29
2 files changed, 66 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
new file mode 100644
index 00000000000..750b4d96e5d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
@@ -0,0 +1,52 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2,fma -recip=sqrt:2 -stop-after=expand-isel-pseudos 2>&1 | FileCheck %s
+
+declare float @llvm.sqrt.f32(float) #0
+
+define float @foo(float %f) #0 {
+; CHECK: {{name: *foo}}
+; CHECK: body:
+; CHECK: %0 = COPY %xmm0
+; CHECK: %1 = VRSQRTSSr killed %2, %0
+; CHECK: %3 = VMULSSrr %0, %1
+; CHECK: %4 = VMOVSSrm
+; CHECK: %5 = VFMADDSSr213r %1, killed %3, %4
+; CHECK: %6 = VMOVSSrm
+; CHECK: %7 = VMULSSrr %1, %6
+; CHECK: %8 = VMULSSrr killed %7, killed %5
+; CHECK: %9 = VMULSSrr %0, %8
+; CHECK: %10 = VFMADDSSr213r %8, %9, %4
+; CHECK: %11 = VMULSSrr %9, %6
+; CHECK: %12 = VMULSSrr killed %11, killed %10
+; CHECK: %13 = FsFLD0SS
+; CHECK: %14 = VCMPSSrr %0, killed %13, 0
+; CHECK: %15 = VFsANDNPSrr killed %14, killed %12
+; CHECK: %xmm0 = COPY %15
+; CHECK: RET 0, %xmm0
+ %call = tail call float @llvm.sqrt.f32(float %f) #1
+ ret float %call
+}
+
+define float @rfoo(float %f) #0 {
+; CHECK: {{name: *rfoo}}
+; CHECK: body: |
+; CHECK: %0 = COPY %xmm0
+; CHECK: %1 = VRSQRTSSr killed %2, %0
+; CHECK: %3 = VMULSSrr %0, %1
+; CHECK: %4 = VMOVSSrm
+; CHECK: %5 = VFMADDSSr213r %1, killed %3, %4
+; CHECK: %6 = VMOVSSrm
+; CHECK: %7 = VMULSSrr %1, %6
+; CHECK: %8 = VMULSSrr killed %7, killed %5
+; CHECK: %9 = VMULSSrr %0, %8
+; CHECK: %10 = VFMADDSSr213r %8, killed %9, %4
+; CHECK: %11 = VMULSSrr %8, %6
+; CHECK: %12 = VMULSSrr killed %11, killed %10
+; CHECK: %xmm0 = COPY %12
+; CHECK: RET 0, %xmm0
+ %sqrt = tail call float @llvm.sqrt.f32(float %f)
+ %div = fdiv fast float 1.0, %sqrt
+ ret float %div
+}
+
+attributes #0 = { "unsafe-fp-math"="true" }
+attributes #1 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
index 386409a674e..1c6b13026a7 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
@@ -34,12 +34,11 @@ define float @ff(float %f) #0 {
; ESTIMATE-LABEL: ff:
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
-; ESTIMATE-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm2
-; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm3
-; ESTIMATE-NEXT: vmulss %xmm3, %xmm1, %xmm1
+; ESTIMATE-NEXT: vmulss %xmm1, %xmm0, %xmm2
+; ESTIMATE-NEXT: vmulss %xmm1, %xmm2, %xmm1
; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
-; ESTIMATE-NEXT: vmulss %xmm0, %xmm2, %xmm2
-; ESTIMATE-NEXT: vmulss %xmm2, %xmm1, %xmm1
+; ESTIMATE-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2
+; ESTIMATE-NEXT: vmulss %xmm1, %xmm2, %xmm1
; ESTIMATE-NEXT: vxorps %xmm2, %xmm2, %xmm2
; ESTIMATE-NEXT: vcmpeqss %xmm2, %xmm0, %xmm0
; ESTIMATE-NEXT: vandnps %xmm1, %xmm0, %xmm0
@@ -78,11 +77,11 @@ define float @reciprocal_square_root(float %x) #0 {
; ESTIMATE-LABEL: reciprocal_square_root:
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
-; ESTIMATE-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm2
-; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm0
-; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm0
-; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
+; ESTIMATE-NEXT: vmulss %xmm1, %xmm1, %xmm2
; ESTIMATE-NEXT: vmulss %xmm2, %xmm0, %xmm0
+; ESTIMATE-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
+; ESTIMATE-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1
+; ESTIMATE-NEXT: vmulss %xmm0, %xmm1, %xmm0
; ESTIMATE-NEXT: retq
%sqrt = tail call float @llvm.sqrt.f32(float %x)
%div = fdiv fast float 1.0, %sqrt
@@ -100,11 +99,11 @@ define <4 x float> @reciprocal_square_root_v4f32(<4 x float> %x) #0 {
; ESTIMATE-LABEL: reciprocal_square_root_v4f32:
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtps %xmm0, %xmm1
-; ESTIMATE-NEXT: vmulps %xmm0, %xmm1, %xmm0
-; ESTIMATE-NEXT: vmulps %xmm0, %xmm1, %xmm0
+; ESTIMATE-NEXT: vmulps %xmm1, %xmm1, %xmm2
+; ESTIMATE-NEXT: vmulps %xmm2, %xmm0, %xmm0
; ESTIMATE-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
; ESTIMATE-NEXT: vmulps {{.*}}(%rip), %xmm1, %xmm1
-; ESTIMATE-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; ESTIMATE-NEXT: vmulps %xmm0, %xmm1, %xmm0
; ESTIMATE-NEXT: retq
%sqrt = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x)
%div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt
@@ -125,11 +124,11 @@ define <8 x float> @reciprocal_square_root_v8f32(<8 x float> %x) #0 {
; ESTIMATE-LABEL: reciprocal_square_root_v8f32:
; ESTIMATE: # BB#0:
; ESTIMATE-NEXT: vrsqrtps %ymm0, %ymm1
-; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0
-; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0
+; ESTIMATE-NEXT: vmulps %ymm1, %ymm1, %ymm2
+; ESTIMATE-NEXT: vmulps %ymm2, %ymm0, %ymm0
; ESTIMATE-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
; ESTIMATE-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
-; ESTIMATE-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0
; ESTIMATE-NEXT: retq
%sqrt = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %x)
%div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt
OpenPOWER on IntegriCloud