summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCameron McInally <cameron.mcinally@nyu.edu>2019-05-22 18:27:43 +0000
committerCameron McInally <cameron.mcinally@nyu.edu>2019-05-22 18:27:43 +0000
commitadea0b6b40e63fdf9449ef0f3f2c89900c3d7da0 (patch)
treed2297535cb1f58aa2d484a39dd7be695d7e02eb1
parentc44cd1e4ed9ca85ccfa808d63960b3357872c130 (diff)
downloadbcm5719-llvm-adea0b6b40e63fdf9449ef0f3f2c89900c3d7da0.tar.gz
bcm5719-llvm-adea0b6b40e63fdf9449ef0f3f2c89900c3d7da0.zip
[NFC][InstCombine] Add unary fneg tests to maxnum.ll/minnum.ll
llvm-svn: 361415
-rw-r--r--llvm/test/Transforms/InstCombine/maxnum.ll58
-rw-r--r--llvm/test/Transforms/InstCombine/minnum.ll71
2 files changed, 129 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/maxnum.ll b/llvm/test/Transforms/InstCombine/maxnum.ll
index 29ee37f67ec..eb8188fc211 100644
--- a/llvm/test/Transforms/InstCombine/maxnum.ll
+++ b/llvm/test/Transforms/InstCombine/maxnum.ll
@@ -246,6 +246,18 @@ define float @neg_neg_vec_fmf(float %x, float %y) {
ret float %r
}
+define float @unary_neg_neg_vec_fmf(float %x, float %y) {
+; CHECK-LABEL: @unary_neg_neg_vec_fmf(
+; CHECK-NEXT: [[TMP1:%.*]] = call fast float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = fsub fast float -0.000000e+00, [[TMP1]]
+; CHECK-NEXT: ret float [[R]]
+;
+ %negx = fneg arcp float %x
+ %negy = fneg afn float %y
+ %r = call fast float @llvm.maxnum.f32(float %negx, float %negy)
+ ret float %r
+}
+
; 1 extra use of an intermediate value should still allow the fold,
; but 2 would require more instructions than we started with.
@@ -265,6 +277,21 @@ define float @neg_neg_extra_use_x(float %x, float %y) {
ret float %r
}
+define float @unary_neg_neg_extra_use_x(float %x, float %y) {
+; CHECK-LABEL: @unary_neg_neg_extra_use_x(
+; CHECK-NEXT: [[NEGX:%.*]] = fneg float [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.minnum.f32(float [[X]], float [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = fsub float -0.000000e+00, [[TMP1]]
+; CHECK-NEXT: call void @use(float [[NEGX]])
+; CHECK-NEXT: ret float [[R]]
+;
+ %negx = fneg float %x
+ %negy = fneg float %y
+ %r = call float @llvm.maxnum.f32(float %negx, float %negy)
+ call void @use(float %negx)
+ ret float %r
+}
+
define float @neg_neg_extra_use_y(float %x, float %y) {
; CHECK-LABEL: @neg_neg_extra_use_y(
; CHECK-NEXT: [[NEGY:%.*]] = fsub float -0.000000e+00, [[Y:%.*]]
@@ -280,6 +307,21 @@ define float @neg_neg_extra_use_y(float %x, float %y) {
ret float %r
}
+define float @unary_neg_neg_extra_use_y(float %x, float %y) {
+; CHECK-LABEL: @unary_neg_neg_extra_use_y(
+; CHECK-NEXT: [[NEGY:%.*]] = fneg float [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y]])
+; CHECK-NEXT: [[R:%.*]] = fsub float -0.000000e+00, [[TMP1]]
+; CHECK-NEXT: call void @use(float [[NEGY]])
+; CHECK-NEXT: ret float [[R]]
+;
+ %negx = fneg float %x
+ %negy = fneg float %y
+ %r = call float @llvm.maxnum.f32(float %negx, float %negy)
+ call void @use(float %negy)
+ ret float %r
+}
+
define float @neg_neg_extra_use_x_and_y(float %x, float %y) {
; CHECK-LABEL: @neg_neg_extra_use_x_and_y(
; CHECK-NEXT: [[NEGX:%.*]] = fsub float -0.000000e+00, [[X:%.*]]
@@ -297,3 +339,19 @@ define float @neg_neg_extra_use_x_and_y(float %x, float %y) {
ret float %r
}
+define float @unary_neg_neg_extra_use_x_and_y(float %x, float %y) {
+; CHECK-LABEL: @unary_neg_neg_extra_use_x_and_y(
+; CHECK-NEXT: [[NEGX:%.*]] = fneg float [[X:%.*]]
+; CHECK-NEXT: [[NEGY:%.*]] = fneg float [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = call float @llvm.maxnum.f32(float [[NEGX]], float [[NEGY]])
+; CHECK-NEXT: call void @use(float [[NEGX]])
+; CHECK-NEXT: call void @use(float [[NEGY]])
+; CHECK-NEXT: ret float [[R]]
+;
+ %negx = fneg float %x
+ %negy = fneg float %y
+ %r = call float @llvm.maxnum.f32(float %negx, float %negy)
+ call void @use(float %negx)
+ call void @use(float %negy)
+ ret float %r
+}
diff --git a/llvm/test/Transforms/InstCombine/minnum.ll b/llvm/test/Transforms/InstCombine/minnum.ll
index 5c012db8cd8..9684d426bbb 100644
--- a/llvm/test/Transforms/InstCombine/minnum.ll
+++ b/llvm/test/Transforms/InstCombine/minnum.ll
@@ -256,6 +256,18 @@ define double @neg_neg(double %x, double %y) {
ret double %r
}
+define double @unary_neg_neg(double %x, double %y) {
+; CHECK-LABEL: @unary_neg_neg(
+; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = fsub double -0.000000e+00, [[TMP1]]
+; CHECK-NEXT: ret double [[R]]
+;
+ %negx = fneg double %x
+ %negy = fneg double %y
+ %r = call double @llvm.minnum.f64(double %negx, double %negy)
+ ret double %r
+}
+
; FMF is not required, but it should be propagated from the intrinsic (not the fnegs).
; Also, make sure this works with vectors.
@@ -271,6 +283,18 @@ define <2 x double> @neg_neg_vec_fmf(<2 x double> %x, <2 x double> %y) {
ret <2 x double> %r
}
+define <2 x double> @unary_neg_neg_vec_fmf(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: @unary_neg_neg_vec_fmf(
+; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf <2 x double> @llvm.maxnum.v2f64(<2 x double> [[X:%.*]], <2 x double> [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = fsub nnan ninf <2 x double> <double -0.000000e+00, double -0.000000e+00>, [[TMP1]]
+; CHECK-NEXT: ret <2 x double> [[R]]
+;
+ %negx = fneg reassoc <2 x double> %x
+ %negy = fneg fast <2 x double> %y
+ %r = call nnan ninf <2 x double> @llvm.minnum.v2f64(<2 x double> %negx, <2 x double> %negy)
+ ret <2 x double> %r
+}
+
; 1 extra use of an intermediate value should still allow the fold,
; but 2 would require more instructions than we started with.
@@ -290,6 +314,21 @@ define double @neg_neg_extra_use_x(double %x, double %y) {
ret double %r
}
+define double @unary_neg_neg_extra_use_x(double %x, double %y) {
+; CHECK-LABEL: @unary_neg_neg_extra_use_x(
+; CHECK-NEXT: [[NEGX:%.*]] = fneg double [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.maxnum.f64(double [[X]], double [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = fsub double -0.000000e+00, [[TMP1]]
+; CHECK-NEXT: call void @use(double [[NEGX]])
+; CHECK-NEXT: ret double [[R]]
+;
+ %negx = fneg double %x
+ %negy = fneg double %y
+ %r = call double @llvm.minnum.f64(double %negx, double %negy)
+ call void @use(double %negx)
+ ret double %r
+}
+
define double @neg_neg_extra_use_y(double %x, double %y) {
; CHECK-LABEL: @neg_neg_extra_use_y(
; CHECK-NEXT: [[NEGY:%.*]] = fsub double -0.000000e+00, [[Y:%.*]]
@@ -305,6 +344,21 @@ define double @neg_neg_extra_use_y(double %x, double %y) {
ret double %r
}
+define double @unary_neg_neg_extra_use_y(double %x, double %y) {
+; CHECK-LABEL: @unary_neg_neg_extra_use_y(
+; CHECK-NEXT: [[NEGY:%.*]] = fneg double [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.maxnum.f64(double [[X:%.*]], double [[Y]])
+; CHECK-NEXT: [[R:%.*]] = fsub double -0.000000e+00, [[TMP1]]
+; CHECK-NEXT: call void @use(double [[NEGY]])
+; CHECK-NEXT: ret double [[R]]
+;
+ %negx = fneg double %x
+ %negy = fneg double %y
+ %r = call double @llvm.minnum.f64(double %negx, double %negy)
+ call void @use(double %negy)
+ ret double %r
+}
+
define double @neg_neg_extra_use_x_and_y(double %x, double %y) {
; CHECK-LABEL: @neg_neg_extra_use_x_and_y(
; CHECK-NEXT: [[NEGX:%.*]] = fsub double -0.000000e+00, [[X:%.*]]
@@ -322,3 +376,20 @@ define double @neg_neg_extra_use_x_and_y(double %x, double %y) {
ret double %r
}
+define double @unary_neg_neg_extra_use_x_and_y(double %x, double %y) {
+; CHECK-LABEL: @unary_neg_neg_extra_use_x_and_y(
+; CHECK-NEXT: [[NEGX:%.*]] = fneg double [[X:%.*]]
+; CHECK-NEXT: [[NEGY:%.*]] = fneg double [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = call double @llvm.minnum.f64(double [[NEGX]], double [[NEGY]])
+; CHECK-NEXT: call void @use(double [[NEGX]])
+; CHECK-NEXT: call void @use(double [[NEGY]])
+; CHECK-NEXT: ret double [[R]]
+;
+ %negx = fneg double %x
+ %negy = fneg double %y
+ %r = call double @llvm.minnum.f64(double %negx, double %negy)
+ call void @use(double %negx)
+ call void @use(double %negy)
+ ret double %r
+}
+
OpenPOWER on IntegriCloud