summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-01-19 22:47:49 +0000
committerSanjay Patel <spatel@rotateright.com>2018-01-19 22:47:49 +0000
commit4127e77e1347f7af05315236b37b8e91ac707938 (patch)
treea4300809a3099db1e299f69b4e077d68f036b491
parent0fb904325a0402308ce02654f2202ebee8d1dd1d (diff)
downloadbcm5719-llvm-4127e77e1347f7af05315236b37b8e91ac707938.tar.gz
bcm5719-llvm-4127e77e1347f7af05315236b37b8e91ac707938.zip
[x86] add tests for sqrt estimate that should respect denorms; NFC (PR34994)
llvm-svn: 323003
-rw-r--r--llvm/test/CodeGen/X86/sqrt-fastmath.ll67
1 files changed, 67 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
index ede954d92d3..d458994a4e8 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
@@ -108,6 +108,72 @@ define x86_fp80 @finite_f80_estimate_but_no(x86_fp80 %ld) #1 {
ret x86_fp80 %call
}
+; PR34994 - https://bugs.llvm.org/show_bug.cgi?id=34994
+
+define float @sqrtf_check_denorms(float %x) #3 {
+; SSE-LABEL: sqrtf_check_denorms:
+; SSE: # %bb.0:
+; SSE-NEXT: rsqrtss %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: mulss %xmm1, %xmm2
+; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: mulss %xmm2, %xmm3
+; SSE-NEXT: mulss %xmm1, %xmm2
+; SSE-NEXT: addss {{.*}}(%rip), %xmm2
+; SSE-NEXT: mulss %xmm3, %xmm2
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: cmpeqss %xmm1, %xmm0
+; SSE-NEXT: andnps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtf_check_denorms:
+; AVX: # %bb.0:
+; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vcmpeqss %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %call = tail call float @__sqrtf_finite(float %x) #2
+ ret float %call
+}
+
+define <4 x float> @sqrt_v4f32_check_denorms(<4 x float> %x) #3 {
+; SSE-LABEL: sqrt_v4f32_check_denorms:
+; SSE: # %bb.0:
+; SSE-NEXT: rsqrtps %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: mulps %xmm1, %xmm2
+; SSE-NEXT: movaps {{.*#+}} xmm3 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
+; SSE-NEXT: mulps %xmm2, %xmm3
+; SSE-NEXT: mulps %xmm1, %xmm2
+; SSE-NEXT: addps {{.*}}(%rip), %xmm2
+; SSE-NEXT: mulps %xmm3, %xmm2
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: cmpneqps %xmm1, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrt_v4f32_check_denorms:
+; AVX: # %bb.0:
+; AVX-NEXT: vrsqrtps %xmm0, %xmm1
+; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmulps {{.*}}(%rip), %xmm2, %xmm3
+; AVX-NEXT: vmulps %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vaddps {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmulps %xmm1, %xmm3, %xmm1
+; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vcmpneqps %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %call = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) #2
+ ret <4 x float> %call
+}
+
define float @f32_no_estimate(float %x) #0 {
; SSE-LABEL: f32_no_estimate:
; SSE: # %bb.0:
@@ -263,4 +329,5 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 {
attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!sqrtf,!vec-sqrtf,!divf,!vec-divf" }
attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="sqrt,vec-sqrt" }
attributes #2 = { nounwind readnone }
+attributes #3 = { "unsafe-fp-math"="true" "reciprocal-estimates"="sqrt,vec-sqrt" "denormal-fp-math"="ieee" }
OpenPOWER on IntegriCloud