summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/ARM/pow.ll
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-09-05 17:01:56 +0000
committerSanjay Patel <spatel@rotateright.com>2018-09-05 17:01:56 +0000
commitdbf52837fea5b9588f69632d5e067fa3e323de97 (patch)
tree609aca6333e43ff014770bcce3a60d0287350c80 /llvm/test/CodeGen/ARM/pow.ll
parent3daf3e707234309889a5c8b484c66ac2be0f7a45 (diff)
downloadbcm5719-llvm-dbf52837fea5b9588f69632d5e067fa3e323de97.tar.gz
bcm5719-llvm-dbf52837fea5b9588f69632d5e067fa3e323de97.zip
[DAGCombiner] try to convert pow(x, 0.25) to sqrt(sqrt(x))
This was proposed as an IR transform in D49306, but it was not clearly justifiable as a canonicalization. Here, we only do the transform when the target tells us that sqrt can be lowered with inline code. This is the basic case. Some potential enhancements are in the TODO comments: 1. Generalize the transform for other exponents (allow more than 2 sqrt calcs if that's really cheaper). 2. If we have less fast-math-flags, generate code to avoid -0.0 and/or INF. 3. Allow the transform when optimizing/minimizing size (might require a target hook to get that right). Note that by default, x86 converts single-precision sqrt calcs into sqrt reciprocal estimate with refinement. That codegen is controlled by CPU attributes and can be manually overridden. We have plenty of test coverage for that already, so I didn't bother to include extra testing for that here. AArch uses its full-precision ops in all cases (not sure if that's the intended behavior or not, but that should also be covered by existing tests). Differential Revision: https://reviews.llvm.org/D51630 llvm-svn: 341481
Diffstat (limited to 'llvm/test/CodeGen/ARM/pow.ll')
-rw-r--r--llvm/test/CodeGen/ARM/pow.ll92
1 files changed, 92 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/ARM/pow.ll b/llvm/test/CodeGen/ARM/pow.ll
new file mode 100644
index 00000000000..2b3df92aab5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/pow.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefixes=ANY,SOFTFLOAT
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabihf -mattr=neon | FileCheck %s --check-prefixes=ANY,HARDFLOAT
+
+declare float @llvm.pow.f32(float, float)
+declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
+
+declare double @llvm.pow.f64(double, double)
+declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
+
+define float @pow_f32_one_fourth_fmf(float %x) nounwind {
+; ANY-LABEL: pow_f32_one_fourth_fmf:
+; SOFTFLOAT: bl powf
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+ %r = call nsz ninf afn float @llvm.pow.f32(float %x, float 2.5e-01)
+ ret float %r
+}
+
+define double @pow_f64_one_fourth_fmf(double %x) nounwind {
+; ANY-LABEL: pow_f64_one_fourth_fmf:
+; SOFTFLOAT: bl pow
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+ %r = call nsz ninf afn double @llvm.pow.f64(double %x, double 2.5e-01)
+ ret double %r
+}
+
+define <4 x float> @pow_v4f32_one_fourth_fmf(<4 x float> %x) nounwind {
+; ANY-LABEL: pow_v4f32_one_fourth_fmf:
+; SOFTFLOAT: bl powf
+; SOFTFLOAT: bl powf
+; SOFTFLOAT: bl powf
+; SOFTFLOAT: bl powf
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+ %r = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @pow_v2f64_one_fourth_fmf(<2 x double> %x) nounwind {
+; ANY-LABEL: pow_v2f64_one_fourth_fmf:
+; SOFTFLOAT: bl pow
+; SOFTFLOAT: bl pow
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+ %r = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
+ ret <2 x double> %r
+}
+
+define float @pow_f32_one_fourth_not_enough_fmf(float %x) nounwind {
+; ANY-LABEL: pow_f32_one_fourth_not_enough_fmf:
+; SOFTFLOAT: bl powf
+; HARDFLOAT: b powf
+ %r = call afn ninf float @llvm.pow.f32(float %x, float 2.5e-01)
+ ret float %r
+}
+
+define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
+; ANY-LABEL: pow_f64_one_fourth_not_enough_fmf:
+; SOFTFLOAT: bl pow
+; HARDFLOAT: b pow
+ %r = call nsz ninf double @llvm.pow.f64(double %x, double 2.5e-01)
+ ret double %r
+}
+
+define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind {
+; ANY-LABEL: pow_v4f32_one_fourth_not_enough_fmf:
+; ANY: bl powf
+; ANY: bl powf
+; ANY: bl powf
+; ANY: bl powf
+ %r = call afn nsz <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwind {
+; ANY-LABEL: pow_v2f64_one_fourth_not_enough_fmf:
+; ANY: bl pow
+; ANY: bl pow
+ %r = call nsz nnan reassoc <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
+ ret <2 x double> %r
+}
+
OpenPOWER on IntegriCloud