summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp41
-rw-r--r--llvm/test/CodeGen/AMDGPU/rcp-pattern.ll35
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.ll64
3 files changed, 123 insertions, 17 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 6f56920cbf8..25ba21edb72 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -2464,22 +2464,31 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
bool Unsafe = DAG.getTarget().Options.UnsafeFPMath;
if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
- if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) &&
- CLHS->isExactlyValue(1.0)) {
- // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
- // the CI documentation has a worst case error of 1 ulp.
- // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
- // use it as long as we aren't trying to use denormals.
-
- // 1.0 / sqrt(x) -> rsq(x)
- //
- // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
- // error seems really high at 2^29 ULP.
- if (RHS.getOpcode() == ISD::FSQRT)
- return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
-
- // 1.0 / x -> rcp(x)
- return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
+ if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()))) {
+
+ if (CLHS->isExactlyValue(1.0)) {
+ // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
+ // the CI documentation has a worst case error of 1 ulp.
+ // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
+ // use it as long as we aren't trying to use denormals.
+
+ // 1.0 / sqrt(x) -> rsq(x)
+ //
+ // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
+ // error seems really high at 2^29 ULP.
+ if (RHS.getOpcode() == ISD::FSQRT)
+ return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
+
+ // 1.0 / x -> rcp(x)
+ return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
+ }
+
+ // Same as for 1.0, but expand the sign out of the constant.
+ if (CLHS->isExactlyValue(-1.0)) {
+ // -1.0 / x -> rcp (fneg x)
+ SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
+ return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
+ }
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 27a88f7b59e..9eb76eb290d 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -76,8 +76,22 @@ define void @rcp_fabs_pat_f32(float addrspace(1)* %out, float %src) #0 {
ret void
}
-; FIXME: fneg folded into constant 1
+; FUNC-LABEL: {{^}}neg_rcp_pat_f32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -[[SRC]]
+; GCN: buffer_store_dword [[RCP]]
+
+; EG: RECIP_IEEE
+define void @neg_rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
+ %rcp = fdiv float -1.0, %src
+ store float %rcp, float addrspace(1)* %out, align 4
+ ret void
+}
+
; FUNC-LABEL: {{^}}rcp_fabs_fneg_pat_f32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -|[[SRC]]|
+; GCN: buffer_store_dword [[RCP]]
define void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
%src.fabs = call float @llvm.fabs.f32(float %src)
%src.fabs.fneg = fsub float -0.0, %src.fabs
@@ -86,8 +100,27 @@ define void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
ret void
}
+; FUNC-LABEL: {{^}}rcp_fabs_fneg_pat_multi_use_f32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -|[[SRC]]|
+; GCN: buffer_store_dword [[RCP]]
+
+; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[SRC]], -|[[SRC]]|
+; GCN: buffer_store_dword [[MUL]]
+define void @rcp_fabs_fneg_pat_multi_use_f32(float addrspace(1)* %out, float %src) #0 {
+ %src.fabs = call float @llvm.fabs.f32(float %src)
+ %src.fabs.fneg = fsub float -0.0, %src.fabs
+ %rcp = fdiv float 1.0, %src.fabs.fneg
+ store volatile float %rcp, float addrspace(1)* %out, align 4
+
+ %other = fmul float %src, %src.fabs.fneg
+ store volatile float %other, float addrspace(1)* %out, align 4
+ ret void
+}
+
declare float @llvm.fabs.f32(float) #1
+declare float @llvm.sqrt.f32(float) #1
attributes #0 = { nounwind "unsafe-fp-math"="false" }
attributes #1 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.ll b/llvm/test/CodeGen/AMDGPU/rsq.ll
index 8192b861b60..699440c3efb 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.ll
@@ -72,3 +72,67 @@ define void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
store float %z, float addrspace(1)* %out.gep
ret void
}
+
+; SI-LABEL: {{^}}neg_rsq_f32:
+; SI-SAFE: v_sqrt_f32_e32 [[SQRT:v[0-9]+]], v{{[0-9]+}}
+; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
+; SI-SAFE: buffer_store_dword [[RSQ]]
+
+; SI-UNSAFE: v_rsq_f32_e32 [[RSQ:v[0-9]+]], v{{[0-9]+}}
+; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
+; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
+define void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float, float addrspace(1)* %in, align 4
+ %sqrt = call float @llvm.sqrt.f32(float %val)
+ %div = fdiv float -1.0, %sqrt
+ store float %div, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}neg_rsq_f64:
+; SI-SAFE: v_sqrt_f64_e32
+; SI-SAFE: v_div_scale_f64
+
+; SI-UNSAFE: v_sqrt_f64_e32 [[SQRT:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}
+; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
+; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
+define void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+ %val = load double, double addrspace(1)* %in, align 4
+ %sqrt = call double @llvm.sqrt.f64(double %val)
+ %div = fdiv double -1.0, %sqrt
+ store double %div, double addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}neg_rsq_neg_f32:
+; SI-SAFE: v_sqrt_f32_e64 [[SQRT:v[0-9]+]], -v{{[0-9]+}}
+; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
+; SI-SAFE: buffer_store_dword [[RSQ]]
+
+; SI-UNSAFE: v_rsq_f32_e64 [[RSQ:v[0-9]+]], -v{{[0-9]+}}
+; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
+; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
+define void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+ %val = load float, float addrspace(1)* %in, align 4
+ %val.fneg = fsub float -0.0, %val
+ %sqrt = call float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv float -1.0, %sqrt
+ store float %div, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: {{^}}neg_rsq_neg_f64:
+; SI-SAFE: v_sqrt_f64_e64 v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
+; SI-SAFE: v_div_scale_f64
+
+; SI-UNSAFE: v_sqrt_f64_e64 [[SQRT:v\[[0-9]+:[0-9]+\]]], -v{{\[[0-9]+:[0-9]+\]}}
+; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
+; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
+define void @neg_rsq_neg_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+ %val = load double, double addrspace(1)* %in, align 4
+ %val.fneg = fsub double -0.0, %val
+ %sqrt = call double @llvm.sqrt.f64(double %val.fneg)
+ %div = fdiv double -1.0, %sqrt
+ store double %div, double addrspace(1)* %out, align 4
+ ret void
+}
OpenPOWER on IntegriCloud