summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InstCombine/fabs.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/Transforms/InstCombine/fabs.ll')
-rw-r--r--llvm/test/Transforms/InstCombine/fabs.ll75
1 files changed, 45 insertions, 30 deletions
diff --git a/llvm/test/Transforms/InstCombine/fabs.ll b/llvm/test/Transforms/InstCombine/fabs.ll
index aee853ae9ee..a95f7b306b5 100644
--- a/llvm/test/Transforms/InstCombine/fabs.ll
+++ b/llvm/test/Transforms/InstCombine/fabs.ll
@@ -1,6 +1,10 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu < %s -instcombine -S | FileCheck %s
-; Make sure all library calls are eliminated when the input is known positive.
+; Make sure libcalls are replaced with intrinsic calls.
+
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
+declare fp128 @llvm.fabs.f128(fp128)
declare float @fabsf(float)
declare double @fabs(double)
@@ -8,46 +12,46 @@ declare fp128 @fabsl(fp128)
declare float @llvm.fma.f32(float, float, float)
declare float @llvm.fmuladd.f32(float, float, float)
-define float @square_fabs_call_f32(float %x) {
- %mul = fmul float %x, %x
- %fabsf = tail call float @fabsf(float %mul)
+define float @replace_fabs_call_f32(float %x) {
+ %fabsf = tail call float @fabsf(float %x)
ret float %fabsf
-; CHECK-LABEL: square_fabs_call_f32(
-; CHECK-NEXT: %mul = fmul float %x, %x
-; CHECK-NEXT: %fabsf = tail call float @fabsf(float %mul)
+; CHECK-LABEL: @replace_fabs_call_f32(
+; CHECK-NEXT: %fabsf = call float @llvm.fabs.f32(float %x)
; CHECK-NEXT: ret float %fabsf
}
-define double @square_fabs_call_f64(double %x) {
- %mul = fmul double %x, %x
- %fabs = tail call double @fabs(double %mul)
+define double @replace_fabs_call_f64(double %x) {
+ %fabs = tail call double @fabs(double %x)
ret double %fabs
-; CHECK-LABEL: square_fabs_call_f64(
-; CHECK-NEXT: %mul = fmul double %x, %x
-; CHECK-NEXT: %fabs = tail call double @fabs(double %mul)
+; CHECK-LABEL: @replace_fabs_call_f64(
+; CHECK-NEXT: %fabs = call double @llvm.fabs.f64(double %x)
; CHECK-NEXT: ret double %fabs
}
-define fp128 @square_fabs_call_f128(fp128 %x) {
- %mul = fmul fp128 %x, %x
- %fabsl = tail call fp128 @fabsl(fp128 %mul)
+define fp128 @replace_fabs_call_f128(fp128 %x) {
+ %fabsl = tail call fp128 @fabsl(fp128 %x)
ret fp128 %fabsl
-; CHECK-LABEL: square_fabs_call_f128(
-; CHECK-NEXT: %mul = fmul fp128 %x, %x
-; CHECK-NEXT: %fabsl = tail call fp128 @fabsl(fp128 %mul)
+; CHECK-LABEL: replace_fabs_call_f128(
+; CHECK-NEXT: %fabsl = call fp128 @llvm.fabs.f128(fp128 %x)
; CHECK-NEXT: ret fp128 %fabsl
}
+; Make sure fast math flags are preserved when replacing the libcall.
+define float @fmf_replace_fabs_call_f32(float %x) {
+ %fabsf = tail call nnan float @fabsf(float %x)
+ ret float %fabsf
+
+; CHECK-LABEL: @fmf_replace_fabs_call_f32(
+; CHECK-NEXT: %fabsf = call nnan float @llvm.fabs.f32(float %x)
+; CHECK-NEXT: ret float %fabsf
+}
+
; Make sure all intrinsic calls are eliminated when the input is known
; positive.
-declare float @llvm.fabs.f32(float)
-declare double @llvm.fabs.f64(double)
-declare fp128 @llvm.fabs.f128(fp128)
-
; The fabs cannot be eliminated because %x may be a NaN
define float @square_fabs_intrinsic_f32(float %x) {
%mul = fmul float %x, %x
@@ -102,10 +106,8 @@ define float @square_fabs_shrink_call1(float %x) {
ret float %trunc
; CHECK-LABEL: square_fabs_shrink_call1(
-; CHECK-NEXT: %ext = fpext float %x to double
-; CHECK-NEXT: %sq = fmul double %ext, %ext
-; CHECK-NEXT: call double @fabs(double %sq)
-; CHECK-NEXT: %trunc = fptrunc double %fabs to float
+; CHECK-NEXT: fmul float %x, %x
+; CHECK-NEXT: %trunc = call float @llvm.fabs.f32(float
; CHECK-NEXT: ret float %trunc
}
@@ -118,8 +120,8 @@ define float @square_fabs_shrink_call2(float %x) {
; CHECK-LABEL: square_fabs_shrink_call2(
; CHECK-NEXT: %sq = fmul float %x, %x
-; CHECK-NEXT: %fabsf = call float @fabsf(float %sq)
-; CHECK-NEXT: ret float %fabsf
+; CHECK-NEXT: %trunc = call float @llvm.fabs.f32(float %sq)
+; CHECK-NEXT: ret float %trunc
}
; CHECK-LABEL: @fabs_select_constant_negative_positive(
@@ -214,3 +216,16 @@ define float @square_nnan_fmuladd_fabs_intrinsic_f32(float %x) {
; CHECK-NEXT: %fmuladd = call nnan float @llvm.fmuladd.f32(float %x, float %x, float 1.000000e+00)
; CHECK-NEXT: ret float %fmuladd
}
+
+; Don't introduce a second fpext
+; CHECK-LABEL: @multi_use_fabs_fpext(
+; CHECK: %fpext = fpext float %x to double
+; CHECK-NEXT: %fabs = call double @llvm.fabs.f64(double %fpext)
+; CHECK-NEXT: store volatile double %fpext, double* undef, align 8
+; CHECK-NEXT: ret double %fabs
+define double @multi_use_fabs_fpext(float %x) {
+ %fpext = fpext float %x to double
+ %fabs = call double @llvm.fabs.f64(double %fpext)
+ store volatile double %fpext, double* undef
+ ret double %fabs
+}
OpenPOWER on IntegriCloud