summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Green <david.green@arm.com>2020-02-04 09:25:01 +0000
committerHans Wennborg <hans@chromium.org>2020-02-05 13:53:24 +0100
commit8195a96595baca8c0141de2a121dcf3f8c0ea616 (patch)
tree851788137352a77cc91d8a36932f8708ba59526c
parent99c6a4ea9201f09e8107bb83675f1e7235456b6d (diff)
downloadbcm5719-llvm-8195a96595baca8c0141de2a121dcf3f8c0ea616.tar.gz
bcm5719-llvm-8195a96595baca8c0141de2a121dcf3f8c0ea616.zip
[ARM][VecReduce] Force expand vector_reduce_fmin
Under MVE, we do not have any lowering for fminimum, which a vector_reduce_fmin without NoNan will be expanded into. As with the other recent patches, force this to expand in the pre-isel pass. Note that Neon lowering would be OK because the scalar fminimum uses the vector VMIN instruction, but is probably better to just rely on the scalar operations, which is what is done here. Also fixes what appears to be the reversal of INF vs -INF in the vector_reduce_fmin widening code. (cherry picked from commit 362d00e0510ee75750499e2993a782428e377215)
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMTargetTransformInfo.h9
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll2264
4 files changed, 2273 insertions, 6 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 974914d00d0..d809139d380 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -4716,11 +4716,11 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) {
break;
case ISD::VECREDUCE_FMAX:
NeutralElem = DAG.getConstantFP(
- std::numeric_limits<double>::infinity(), dl, ElemVT);
+ -std::numeric_limits<double>::infinity(), dl, ElemVT);
break;
case ISD::VECREDUCE_FMIN:
NeutralElem = DAG.getConstantFP(
- -std::numeric_limits<double>::infinity(), dl, ElemVT);
+ std::numeric_limits<double>::infinity(), dl, ElemVT);
break;
}
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index b860df62b78..f66083eaf18 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -177,12 +177,15 @@ public:
// We don't have legalization support for ordered FP reductions.
if (!II->getFastMathFlags().allowReassoc())
return true;
- LLVM_FALLTHROUGH;
+ // Can't legalize reductions with soft floats.
+ return TLI->useSoftFloat() || !TLI->getSubtarget()->hasFPRegs();
case Intrinsic::experimental_vector_reduce_fmin:
case Intrinsic::experimental_vector_reduce_fmax:
- // Can't legalize reductions with soft floats.
- return TLI->useSoftFloat() || !TLI->getSubtarget()->hasFPRegs();
+ // Can't legalize reductions with soft floats, and NoNan will create
+ // fminimum which we do not know how to lower.
+ return TLI->useSoftFloat() || !TLI->getSubtarget()->hasFPRegs() ||
+ !II->getFastMathFlags().noNaNs();
default:
// Don't expand anything else, let legalization deal with it.
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
index 72c94aaf2d6..975ba268779 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
@@ -47,7 +47,7 @@ define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
define float @test_v3f32(<3 x float> %a) nounwind {
; CHECK-LABEL: test_v3f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #2139095040
+; CHECK-NEXT: mov w8, #-8388608
; CHECK-NEXT: fmov s1, w8
; CHECK-NEXT: mov v0.s[3], v1.s[0]
; CHECK-NEXT: fmaxnmv s0, v0.4s
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
new file mode 100644
index 00000000000..26541e6ec2b
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
@@ -0,0 +1,2264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FP
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16,+fp64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOFP
+
+; FIXME minnum nonan X, +Inf -> X ?
+define arm_aapcs_vfpcc float @fmin_v2f32(<2 x float> %x) {
+; CHECK-LABEL: fmin_v2f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr s4, .LCPI0_0
+; CHECK-NEXT: vminnm.f32 s0, s0, s1
+; CHECK-NEXT: vminnm.f32 s0, s0, s4
+; CHECK-NEXT: vminnm.f32 s0, s0, s4
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI0_0:
+; CHECK-NEXT: .long 2139095040 @ float +Inf
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32(<4 x float> %x) {
+; CHECK-LABEL: fmin_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f32 s4, s0, s1
+; CHECK-NEXT: vminnm.f32 s4, s4, s2
+; CHECK-NEXT: vminnm.f32 s0, s4, s3
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ ret float %z
+}
+
+; FIXME fminnum (vector) -> fminnum (scalar) ?
+define arm_aapcs_vfpcc float @fmin_v8f32(<8 x float> %x) {
+; CHECK-FP-LABEL: fmin_v8f32:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vminnm.f32 s4, s0, s1
+; CHECK-FP-NEXT: vminnm.f32 s4, s4, s2
+; CHECK-FP-NEXT: vminnm.f32 s0, s4, s3
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s10, s8
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s2, s12
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v4f16(<4 x half> %x) {
+; CHECK-LABEL: fmin_v4f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vminnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s1
+; CHECK-NEXT: vminnm.f16 s4, s4, s1
+; CHECK-NEXT: vldr.16 s2, .LCPI3_0
+; CHECK-NEXT: vminnm.f16 s0, s4, s0
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI3_0:
+; CHECK-NEXT: .short 31744 @ half +Inf
+entry:
+ %z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v8f16(<8 x half> %x) {
+; CHECK-LABEL: fmin_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vmovx.f16 s6, s1
+; CHECK-NEXT: vminnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s3
+; CHECK-NEXT: vminnm.f16 s4, s4, s1
+; CHECK-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-NEXT: vmovx.f16 s6, s2
+; CHECK-NEXT: vminnm.f16 s4, s4, s2
+; CHECK-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-NEXT: vminnm.f16 s4, s4, s3
+; CHECK-NEXT: vminnm.f16 s0, s4, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v16f16(<16 x half> %x) {
+; CHECK-FP-LABEL: fmin_v16f16:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmovx.f16 s4, s0
+; CHECK-FP-NEXT: vmovx.f16 s6, s1
+; CHECK-FP-NEXT: vminnm.f16 s4, s0, s4
+; CHECK-FP-NEXT: vmovx.f16 s0, s3
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s1
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vmovx.f16 s6, s2
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s2
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s3
+; CHECK-FP-NEXT: vminnm.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64(<1 x double> %x) {
+; CHECK-LABEL: fmin_v1f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64(<2 x double> %x) {
+; CHECK-LABEL: fmin_v2f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f64 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64(<4 x double> %x) {
+; CHECK-LABEL: fmin_v4f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d3, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d2, d0
+; CHECK-NEXT: vselgt.f64 d4, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vminnm.f64 d0, d0, d4
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ ret double %z
+}
+
+; FIXME should not be vminnm
+; FIXME better reductions (no vmovs/vdups)
+define arm_aapcs_vfpcc float @fmin_v2f32_nofast(<2 x float> %x) {
+; CHECK-FP-LABEL: fmin_v2f32_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v2f32_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT: vdup.32 q1, r0
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32_nofast(<4 x float> %x) {
+; CHECK-FP-LABEL: fmin_v4f32_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f32_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s3, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f64 d2, d1
+; CHECK-NOFP-NEXT: vmov.f32 s5, s3
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v8f32_nofast(<8 x float> %x) {
+; CHECK-FP-LABEL: fmin_v8f32_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT: vselgt.f32 s8, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s8, s10
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s12, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s2, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s12
+; CHECK-NOFP-NEXT: vcmp.f32 s2, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v4f16_nofast(<4 x half> %x) {
+; CHECK-FP-LABEL: fmin_v4f16_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f16_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vdup.32 q1, r1
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v8f16_nofast(<8 x half> %x) {
+; CHECK-FP-LABEL: fmin_v8f16_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f16_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmov.f64 d2, d1
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f32 s5, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s3, s1
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v16f16_nofast(<16 x half> %x) {
+; CHECK-FP-LABEL: fmin_v16f16_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s14
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64_nofast(<1 x double> %x) {
+; CHECK-LABEL: fmin_v1f64_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64_nofast(<2 x double> %x) {
+; CHECK-LABEL: fmin_v2f64_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64_nofast(<4 x double> %x) {
+; CHECK-LABEL: fmin_v4f64_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d3, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d2, d0
+; CHECK-NEXT: vselgt.f64 d4, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vcmp.f64 d4, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d4
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v2f32_acc(<2 x float> %x, float %y) {
+; CHECK-LABEL: fmin_v2f32_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr s6, .LCPI18_0
+; CHECK-NEXT: vminnm.f32 s0, s0, s1
+; CHECK-NEXT: vminnm.f32 s0, s0, s6
+; CHECK-NEXT: vminnm.f32 s0, s0, s6
+; CHECK-NEXT: vminnm.f32 s0, s4, s0
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI18_0:
+; CHECK-NEXT: .long 2139095040 @ float +Inf
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ %c = fcmp fast olt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32_acc(<4 x float> %x, float %y) {
+; CHECK-LABEL: fmin_v4f32_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f32 s6, s0, s1
+; CHECK-NEXT: vminnm.f32 s6, s6, s2
+; CHECK-NEXT: vminnm.f32 s0, s6, s3
+; CHECK-NEXT: vminnm.f32 s0, s4, s0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ %c = fcmp fast olt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v8f32_acc(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v8f32_acc:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vminnm.f32 s4, s0, s1
+; CHECK-FP-NEXT: vminnm.f32 s4, s4, s2
+; CHECK-FP-NEXT: vminnm.f32 s0, s4, s3
+; CHECK-FP-NEXT: vminnm.f32 s0, s8, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32_acc:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s12, s10
+; CHECK-NOFP-NEXT: vminnm.f32 s2, s2, s14
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT: vminnm.f32 s0, s8, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ %c = fcmp fast olt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmin_v4f16_acc(<4 x half> %x, half* %yy) {
+; CHECK-LABEL: fmin_v4f16_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vminnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s1
+; CHECK-NEXT: vminnm.f16 s4, s4, s1
+; CHECK-NEXT: vldr.16 s2, .LCPI21_0
+; CHECK-NEXT: vminnm.f16 s0, s4, s0
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vminnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, [r0]
+; CHECK-NEXT: vminnm.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI21_0:
+; CHECK-NEXT: .short 31744 @ half +Inf
+entry:
+ %y = load half, half* %yy
+ %z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ %c = fcmp fast olt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v8f16_acc(<8 x half> %x, half* %yy) {
+; CHECK-LABEL: fmin_v8f16_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vmovx.f16 s6, s1
+; CHECK-NEXT: vminnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s3
+; CHECK-NEXT: vminnm.f16 s4, s4, s1
+; CHECK-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-NEXT: vmovx.f16 s6, s2
+; CHECK-NEXT: vminnm.f16 s4, s4, s2
+; CHECK-NEXT: vldr.16 s2, [r0]
+; CHECK-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-NEXT: vminnm.f16 s4, s4, s3
+; CHECK-NEXT: vminnm.f16 s0, s4, s0
+; CHECK-NEXT: vminnm.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ %c = fcmp fast olt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v16f16_acc(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v16f16_acc:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmovx.f16 s4, s0
+; CHECK-FP-NEXT: vmovx.f16 s6, s1
+; CHECK-FP-NEXT: vminnm.f16 s4, s0, s4
+; CHECK-FP-NEXT: vmovx.f16 s0, s3
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s1
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vmovx.f16 s6, s2
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s2
+; CHECK-FP-NEXT: vldr.16 s2, [r0]
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vminnm.f16 s4, s4, s3
+; CHECK-FP-NEXT: vminnm.f16 s0, s4, s0
+; CHECK-FP-NEXT: vminnm.f16 s0, s2, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16_acc:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT: vminnm.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ %c = fcmp fast olt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64_acc(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v1f64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f64 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ %c = fcmp fast olt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64_acc(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v2f64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vminnm.f64 d0, d0, d1
+; CHECK-NEXT: vminnm.f64 d0, d2, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ %c = fcmp fast olt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64_acc(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v4f64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d3, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d2, d0
+; CHECK-NEXT: vselgt.f64 d5, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vminnm.f64 d0, d0, d5
+; CHECK-NEXT: vminnm.f64 d0, d4, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ %c = fcmp fast olt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v2f32_acc_nofast(<2 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v2f32_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q2, r0
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q2
+; CHECK-FP-NEXT: vcmp.f32 s0, s4
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v2f32_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT: vdup.32 q2, r0
+; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ %c = fcmp olt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32_acc_nofast(<4 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v4f32_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d4, d1
+; CHECK-FP-NEXT: vmov.f32 s9, s3
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q2
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q2, r0
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q2
+; CHECK-FP-NEXT: vcmp.f32 s0, s4
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f32_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s3, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f64 d4, d1
+; CHECK-NOFP-NEXT: vmov.f32 s9, s3
+; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s6, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f32 s6, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s6
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ %c = fcmp olt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v8f32_acc_nofast(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v8f32_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vcmp.f32 s0, s8
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f32 s0, s8, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s10, s12
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s14, s0
+; CHECK-NOFP-NEXT: vselgt.f32 s2, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s14
+; CHECK-NOFP-NEXT: vcmp.f32 s2, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s8, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ %c = fcmp olt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmin_v4f16_acc_nofast(<4 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v4f16_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vldr.16 s4, [r0]
+; CHECK-FP-NEXT: vcmp.f16 s0, s4
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f16_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vdup.32 q1, r1
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s2
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ %c = fcmp olt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v8f16_acc_nofast(<8 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v8f16_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vldr.16 s4, [r0]
+; CHECK-FP-NEXT: vcmp.f16 s0, s4
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f16_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmov.f64 d2, d1
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f32 s5, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s3, s1
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s2
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ %c = fcmp olt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v16f16_acc_nofast(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v16f16_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vldr.16 s4, [r0]
+; CHECK-FP-NEXT: vcmp.f16 s0, s4
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s14
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s2
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ %c = fcmp olt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64_acc_nofast(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v1f64_acc_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d0, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ %c = fcmp olt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64_acc_nofast(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v2f64_acc_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d1
+; CHECK-NEXT: vcmp.f64 d0, d2
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d2, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ %c = fcmp olt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64_acc_nofast(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v4f64_acc_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d3, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d2, d0
+; CHECK-NEXT: vselgt.f64 d5, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vcmp.f64 d5, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d5
+; CHECK-NEXT: vcmp.f64 d0, d4
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d4, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ %c = fcmp olt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32(<2 x float> %x) {
+; CHECK-LABEL: fmax_v2f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr s4, .LCPI36_0
+; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT: vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT: vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI36_0:
+; CHECK-NEXT: .long 4286578688 @ float -Inf
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32(<4 x float> %x) {
+; CHECK-LABEL: fmax_v4f32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f32 s4, s0, s1
+; CHECK-NEXT: vmaxnm.f32 s4, s4, s2
+; CHECK-NEXT: vmaxnm.f32 s0, s4, s3
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32(<8 x float> %x) {
+; CHECK-FP-LABEL: fmax_v8f32:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmaxnm.f32 s4, s0, s1
+; CHECK-FP-NEXT: vmaxnm.f32 s4, s4, s2
+; CHECK-FP-NEXT: vmaxnm.f32 s0, s4, s3
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s10, s8
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s2, s12
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v4f16(<4 x half> %x) {
+; CHECK-LABEL: fmax_v4f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s1
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT: vldr.16 s2, .LCPI39_0
+; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI39_0:
+; CHECK-NEXT: .short 64512 @ half -Inf
+entry:
+ %z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v8f16(<8 x half> %x) {
+; CHECK-LABEL: fmax_v8f16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vmovx.f16 s6, s1
+; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s3
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT: vmovx.f16 s6, s2
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s2
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s3
+; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v16f16(<16 x half> %x) {
+; CHECK-FP-LABEL: fmax_v16f16:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmovx.f16 s4, s0
+; CHECK-FP-NEXT: vmovx.f16 s6, s1
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s0, s4
+; CHECK-FP-NEXT: vmovx.f16 s0, s3
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s1
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vmovx.f16 s6, s2
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s2
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s3
+; CHECK-FP-NEXT: vmaxnm.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64(<1 x double> %x) {
+; CHECK-LABEL: fmax_v1f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64(<2 x double> %x) {
+; CHECK-LABEL: fmax_v2f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64(<4 x double> %x) {
+; CHECK-LABEL: fmax_v4f64:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d0, d2
+; CHECK-NEXT: vselgt.f64 d4, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vmaxnm.f64 d0, d0, d4
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32_nofast(<2 x float> %x) {
+; CHECK-FP-LABEL: fmax_v2f32_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v2f32_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT: vdup.32 q1, r0
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32_nofast(<4 x float> %x) {
+; CHECK-FP-LABEL: fmax_v4f32_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f32_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f64 d2, d1
+; CHECK-NOFP-NEXT: vmov.f32 s5, s3
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f32 s8, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32_nofast(<8 x float> %x) {
+; CHECK-FP-LABEL: fmax_v8f32_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT: vselgt.f32 s8, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s10, s8
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s12
+; CHECK-NOFP-NEXT: vselgt.f32 s2, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s12
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v4f16_nofast(<4 x half> %x) {
+; CHECK-FP-LABEL: fmax_v4f16_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f16_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vdup.32 q1, r1
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v8f16_nofast(<8 x half> %x) {
+; CHECK-FP-LABEL: fmax_v8f16_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f16_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmov.f64 d2, d1
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f32 s5, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s1, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v16f16_nofast(<16 x half> %x) {
+; CHECK-FP-LABEL: fmax_v16f16_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s14, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64_nofast(<1 x double> %x) {
+; CHECK-LABEL: fmax_v1f64_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64_nofast(<2 x double> %x) {
+; CHECK-LABEL: fmax_v2f64_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d0, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64_nofast(<4 x double> %x) {
+; CHECK-LABEL: fmax_v4f64_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d0, d2
+; CHECK-NEXT: vselgt.f64 d4, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vcmp.f64 d0, d4
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d4
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ ret double %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32_acc(<2 x float> %x, float %y) {
+; CHECK-LABEL: fmax_v2f32_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vldr s6, .LCPI54_0
+; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT: vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT: vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT: vmaxnm.f32 s0, s4, s0
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI54_0:
+; CHECK-NEXT: .long 4286578688 @ float -Inf
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ %c = fcmp fast ogt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32_acc(<4 x float> %x, float %y) {
+; CHECK-LABEL: fmax_v4f32_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f32 s6, s0, s1
+; CHECK-NEXT: vmaxnm.f32 s6, s6, s2
+; CHECK-NEXT: vmaxnm.f32 s0, s6, s3
+; CHECK-NEXT: vmaxnm.f32 s0, s4, s0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ %c = fcmp fast ogt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32_acc(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v8f32_acc:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmaxnm.f32 s4, s0, s1
+; CHECK-FP-NEXT: vmaxnm.f32 s4, s4, s2
+; CHECK-FP-NEXT: vmaxnm.f32 s0, s4, s3
+; CHECK-FP-NEXT: vmaxnm.f32 s0, s8, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32_acc:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s12, s10
+; CHECK-NOFP-NEXT: vmaxnm.f32 s2, s2, s14
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s8, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ %c = fcmp fast ogt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmax_v4f16_acc(<4 x half> %x, half* %yy) {
+; CHECK-LABEL: fmax_v4f16_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s1
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT: vldr.16 s2, .LCPI57_0
+; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT: vldr.16 s2, [r0]
+; CHECK-NEXT: vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI57_0:
+; CHECK-NEXT: .short 64512 @ half -Inf
+entry:
+ %y = load half, half* %yy
+ %z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ %c = fcmp fast ogt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v8f16_acc(<8 x half> %x, half* %yy) {
+; CHECK-LABEL: fmax_v8f16_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovx.f16 s4, s0
+; CHECK-NEXT: vmovx.f16 s6, s1
+; CHECK-NEXT: vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT: vmovx.f16 s0, s3
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT: vmovx.f16 s6, s2
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s2
+; CHECK-NEXT: vldr.16 s2, [r0]
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT: vmaxnm.f16 s4, s4, s3
+; CHECK-NEXT: vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT: vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT: vstr.16 s0, [r0]
+; CHECK-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ %c = fcmp fast ogt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v16f16_acc(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v16f16_acc:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmovx.f16 s4, s0
+; CHECK-FP-NEXT: vmovx.f16 s6, s1
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s0, s4
+; CHECK-FP-NEXT: vmovx.f16 s0, s3
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s1
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vmovx.f16 s6, s2
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s2
+; CHECK-FP-NEXT: vldr.16 s2, [r0]
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT: vmaxnm.f16 s4, s4, s3
+; CHECK-FP-NEXT: vmaxnm.f16 s0, s4, s0
+; CHECK-FP-NEXT: vmaxnm.f16 s0, s2, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16_acc:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ %c = fcmp fast ogt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64_acc(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v1f64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f64 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ %c = fcmp fast ogt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64_acc(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v2f64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
+; CHECK-NEXT: vmaxnm.f64 d0, d2, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ %c = fcmp fast ogt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64_acc(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v4f64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d0, d2
+; CHECK-NEXT: vselgt.f64 d5, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vmaxnm.f64 d0, d0, d5
+; CHECK-NEXT: vmaxnm.f64 d0, d4, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ %c = fcmp fast ogt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32_acc_nofast(<2 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v2f32_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q2, r0
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q2
+; CHECK-FP-NEXT: vcmp.f32 s4, s0
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v2f32_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT: vdup.32 q2, r0
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ %c = fcmp ogt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32_acc_nofast(<4 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v4f32_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d4, d1
+; CHECK-FP-NEXT: vmov.f32 s9, s3
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q2
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q2, r0
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q2
+; CHECK-FP-NEXT: vcmp.f32 s4, s0
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f32_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f64 d4, d1
+; CHECK-NOFP-NEXT: vmov.f32 s9, s3
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT: vselgt.f32 s6, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s6
+; CHECK-NOFP-NEXT: vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ %c = fcmp ogt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32_acc_nofast(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v8f32_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r0, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r0
+; CHECK-FP-NEXT: vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT: vcmp.f32 s8, s0
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f32 s0, s8, s0
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT: vselgt.f32 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f32 s12, s1, s5
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s12, s10
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s14
+; CHECK-NOFP-NEXT: vselgt.f32 s2, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s14
+; CHECK-NOFP-NEXT: vcmp.f32 s0, s2
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT: vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f32 s0, s8, s0
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ %c = fcmp ogt float %y, %z
+ %r = select i1 %c, float %y, float %z
+ ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmax_v4f16_acc_nofast(<4 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v4f16_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vldr.16 s4, [r0]
+; CHECK-FP-NEXT: vcmp.f16 s4, s0
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f16_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT: vdup.32 q1, r1
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ %c = fcmp ogt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v8f16_acc_nofast(<8 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v8f16_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vldr.16 s4, [r0]
+; CHECK-FP-NEXT: vcmp.f16 s4, s0
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f16_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmov.f64 d2, d1
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmov.f32 s5, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s1, s3
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ %c = fcmp ogt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v16f16_acc_nofast(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v16f16_acc_nofast:
+; CHECK-FP: @ %bb.0: @ %entry
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.f64 d2, d1
+; CHECK-FP-NEXT: vmov.f32 s5, s3
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.32 r1, q0[1]
+; CHECK-FP-NEXT: vdup.32 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT: vdup.16 q1, r1
+; CHECK-FP-NEXT: vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT: vldr.16 s4, [r0]
+; CHECK-FP-NEXT: vcmp.f16 s4, s0
+; CHECK-FP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT: vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT: vstr.16 s0, [r0]
+; CHECK-FP-NEXT: bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16_acc_nofast:
+; CHECK-NOFP: @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT: vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT: vcmp.f16 s14, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT: vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT: vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT: vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT: vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT: vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s12
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT: vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT: vcmp.f16 s2, s0
+; CHECK-NOFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT: vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT: vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT: bx lr
+entry:
+ %y = load half, half* %yy
+ %z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ %c = fcmp ogt half %y, %z
+ %r = select i1 %c, half %y, half %z
+ store half %r, half* %yy
+ ret void
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64_acc_nofast(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v1f64_acc_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ %c = fcmp ogt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64_acc_nofast(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v2f64_acc_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d0, d1
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d1
+; CHECK-NEXT: vcmp.f64 d2, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d2, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ %c = fcmp ogt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64_acc_nofast(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v4f64_acc_nofast:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vcmp.f64 d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vcmp.f64 d0, d2
+; CHECK-NEXT: vselgt.f64 d5, d1, d3
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d2
+; CHECK-NEXT: vcmp.f64 d0, d5
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d0, d5
+; CHECK-NEXT: vcmp.f64 d4, d0
+; CHECK-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEXT: vselgt.f64 d0, d4, d0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ %c = fcmp ogt double %y, %z
+ %r = select i1 %c, double %y, double %z
+ ret double %r
+}
+
+declare double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double>)
+declare double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double>)
+declare double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double>)
+declare float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float>)
+declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)
+declare float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float>)
+declare float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float>)
+declare float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float>)
+declare float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float>)
+declare half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half>)
+declare half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half>)
+declare half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half>)
+declare half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half>)
+declare half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half>)
+declare half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half>)
OpenPOWER on IntegriCloud