summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-01-07 19:55:12 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-01-07 19:55:12 +0000
commita7d219416880db0cae194e2b6339201e0cc0e4f5 (patch)
treeb4fe497852c5b8622db75bb62f016cdf049a79b6
parent917316fc5d910f9af26e592eef69f6d79397ba6d (diff)
downloadbcm5719-llvm-a7d219416880db0cae194e2b6339201e0cc0e4f5.tar.gz
bcm5719-llvm-a7d219416880db0cae194e2b6339201e0cc0e4f5.zip
SimplifyLibCalls: Remove incorrect optimization of fabs
fabs(x * x) is not generally safe to assume x is positive if x is a NaN. This is also less general than it could be, so this will be replaced with a transformation on the intrinsic. llvm-svn: 291359
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp12
-rw-r--r--llvm/test/Transforms/InstCombine/fabs.ll42
-rw-r--r--llvm/test/Transforms/InstCombine/fast-math.ll6
3 files changed, 39 insertions, 21 deletions
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index c8f030f7eb8..11d54bcf4f8 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -1189,19 +1189,11 @@ Value *LibCallSimplifier::optimizeExp2(CallInst *CI, IRBuilder<> &B) {
Value *LibCallSimplifier::optimizeFabs(CallInst *CI, IRBuilder<> &B) {
Function *Callee = CI->getCalledFunction();
- Value *Ret = nullptr;
StringRef Name = Callee->getName();
if (Name == "fabs" && hasFloatVersion(Name))
- Ret = optimizeUnaryDoubleFP(CI, B, false);
+ return optimizeUnaryDoubleFP(CI, B, false);
- Value *Op = CI->getArgOperand(0);
- if (Instruction *I = dyn_cast<Instruction>(Op)) {
- // Fold fabs(x * x) -> x * x; any squared FP value must already be positive.
- if (I->getOpcode() == Instruction::FMul)
- if (I->getOperand(0) == I->getOperand(1))
- return Op;
- }
- return Ret;
+ return nullptr;
}
Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) {
diff --git a/llvm/test/Transforms/InstCombine/fabs.ll b/llvm/test/Transforms/InstCombine/fabs.ll
index 09bea5895aa..6b5f5a94953 100644
--- a/llvm/test/Transforms/InstCombine/fabs.ll
+++ b/llvm/test/Transforms/InstCombine/fabs.ll
@@ -13,7 +13,8 @@ define float @square_fabs_call_f32(float %x) {
; CHECK-LABEL: square_fabs_call_f32(
; CHECK-NEXT: %mul = fmul float %x, %x
-; CHECK-NEXT: ret float %mul
+; CHECK-NEXT: %fabsf = tail call float @fabsf(float %mul)
+; CHECK-NEXT: ret float %fabsf
}
define double @square_fabs_call_f64(double %x) {
@@ -23,7 +24,8 @@ define double @square_fabs_call_f64(double %x) {
; CHECK-LABEL: square_fabs_call_f64(
; CHECK-NEXT: %mul = fmul double %x, %x
-; CHECK-NEXT: ret double %mul
+; CHECK-NEXT: %fabs = tail call double @fabs(double %mul)
+; CHECK-NEXT: ret double %fabs
}
define fp128 @square_fabs_call_f128(fp128 %x) {
@@ -33,15 +35,18 @@ define fp128 @square_fabs_call_f128(fp128 %x) {
; CHECK-LABEL: square_fabs_call_f128(
; CHECK-NEXT: %mul = fmul fp128 %x, %x
-; CHECK-NEXT: ret fp128 %mul
+; CHECK-NEXT: %fabsl = tail call fp128 @fabsl(fp128 %mul)
+; CHECK-NEXT: ret fp128 %fabsl
}
-; Make sure all intrinsic calls are eliminated when the input is known positive.
+; Make sure all intrinsic calls are eliminated when the input is known
+; positive.
declare float @llvm.fabs.f32(float)
declare double @llvm.fabs.f64(double)
declare fp128 @llvm.fabs.f128(fp128)
+; The fabs cannot be eliminated because %x may be a NaN
define float @square_fabs_intrinsic_f32(float %x) {
%mul = fmul float %x, %x
%fabsf = tail call float @llvm.fabs.f32(float %mul)
@@ -49,7 +54,8 @@ define float @square_fabs_intrinsic_f32(float %x) {
; CHECK-LABEL: square_fabs_intrinsic_f32(
; CHECK-NEXT: %mul = fmul float %x, %x
-; CHECK-NEXT: ret float %mul
+; CHECK-NEXT: %fabsf = tail call float @llvm.fabs.f32(float %mul)
+; CHECK-NEXT: ret float %fabsf
}
define double @square_fabs_intrinsic_f64(double %x) {
@@ -59,7 +65,8 @@ define double @square_fabs_intrinsic_f64(double %x) {
; CHECK-LABEL: square_fabs_intrinsic_f64(
; CHECK-NEXT: %mul = fmul double %x, %x
-; CHECK-NEXT: ret double %mul
+; CHECK-NEXT: %fabs = tail call double @llvm.fabs.f64(double %mul)
+; CHECK-NEXT: ret double %fabs
}
define fp128 @square_fabs_intrinsic_f128(fp128 %x) {
@@ -69,7 +76,20 @@ define fp128 @square_fabs_intrinsic_f128(fp128 %x) {
; CHECK-LABEL: square_fabs_intrinsic_f128(
; CHECK-NEXT: %mul = fmul fp128 %x, %x
-; CHECK-NEXT: ret fp128 %mul
+; CHECK-NEXT: %fabsl = tail call fp128 @llvm.fabs.f128(fp128 %mul)
+; CHECK-NEXT: ret fp128 %fabsl
+}
+
+; TODO: This should be able to elimnated the fabs
+define float @square_nnan_fabs_intrinsic_f32(float %x) {
+ %mul = fmul nnan float %x, %x
+ %fabsf = call float @llvm.fabs.f32(float %mul)
+ ret float %fabsf
+
+; CHECK-LABEL: square_nnan_fabs_intrinsic_f32(
+; CHECK-NEXT: %mul = fmul nnan float %x, %x
+; CHECK-NEXT: %fabsf = call float @llvm.fabs.f32(float %mul)
+; CHECK-NEXT: ret float %fabsf
}
; Shrinking a library call to a smaller type should not be inhibited by nor inhibit the square optimization.
@@ -82,7 +102,10 @@ define float @square_fabs_shrink_call1(float %x) {
ret float %trunc
; CHECK-LABEL: square_fabs_shrink_call1(
-; CHECK-NEXT: %trunc = fmul float %x, %x
+; CHECK-NEXT: %ext = fpext float %x to double
+; CHECK-NEXT: %sq = fmul double %ext, %ext
+; CHECK-NEXT: call double @fabs(double %sq)
+; CHECK-NEXT: %trunc = fptrunc double %fabs to float
; CHECK-NEXT: ret float %trunc
}
@@ -95,7 +118,8 @@ define float @square_fabs_shrink_call2(float %x) {
; CHECK-LABEL: square_fabs_shrink_call2(
; CHECK-NEXT: %sq = fmul float %x, %x
-; CHECK-NEXT: ret float %sq
+; CHECK-NEXT: %fabsf = call float @fabsf(float %sq)
+; CHECK-NEXT: ret float %fabsf
}
; CHECK-LABEL: @fabs_select_constant_negative_positive(
diff --git a/llvm/test/Transforms/InstCombine/fast-math.ll b/llvm/test/Transforms/InstCombine/fast-math.ll
index 6ccf6e9fa77..84f24ca0bf2 100644
--- a/llvm/test/Transforms/InstCombine/fast-math.ll
+++ b/llvm/test/Transforms/InstCombine/fast-math.ll
@@ -672,7 +672,8 @@ define double @sqrt_intrinsic_arg_4th(double %x) {
; CHECK-LABEL: sqrt_intrinsic_arg_4th(
; CHECK-NEXT: %mul = fmul fast double %x, %x
-; CHECK-NEXT: ret double %mul
+; CHECK-NEXT: %fabs = call fast double @llvm.fabs.f64(double %mul)
+; CHECK-NEXT: ret double %fabs
}
define double @sqrt_intrinsic_arg_5th(double %x) {
@@ -684,8 +685,9 @@ define double @sqrt_intrinsic_arg_5th(double %x) {
; CHECK-LABEL: sqrt_intrinsic_arg_5th(
; CHECK-NEXT: %mul = fmul fast double %x, %x
+; CHECK-NEXT: %fabs = call fast double @llvm.fabs.f64(double %mul)
; CHECK-NEXT: %sqrt1 = call fast double @llvm.sqrt.f64(double %x)
-; CHECK-NEXT: %1 = fmul fast double %mul, %sqrt1
+; CHECK-NEXT: %1 = fmul fast double %fabs, %sqrt1
; CHECK-NEXT: ret double %1
}
OpenPOWER on IntegriCloud