summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/ARM
diff options
context:
space:
mode:
authorKristof Beyls <kristof.beyls@arm.com>2019-10-08 08:25:42 +0000
committerKristof Beyls <kristof.beyls@arm.com>2019-10-08 08:25:42 +0000
commit78bfe3ab9475776ae72ca7c9446066f6eb816cc0 (patch)
treeed7b721d56d9495847a30e05500b877f49a52128 /llvm/test/CodeGen/ARM
parentc9ddda84052659698b921e6c3a5bf7df9df599ce (diff)
downloadbcm5719-llvm-78bfe3ab9475776ae72ca7c9446066f6eb816cc0.tar.gz
bcm5719-llvm-78bfe3ab9475776ae72ca7c9446066f6eb816cc0.zip
[ARM] Generate vcmp instead of vcmpe
Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 llvm-svn: 374025
Diffstat (limited to 'llvm/test/CodeGen/ARM')
-rw-r--r--llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll26
-rw-r--r--llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll4
-rw-r--r--llvm/test/CodeGen/ARM/compare-call.ll2
-rw-r--r--llvm/test/CodeGen/ARM/fcmp-xo.ll12
-rw-r--r--llvm/test/CodeGen/ARM/float-helpers.s40
-rw-r--r--llvm/test/CodeGen/ARM/fp16-instructions.ll64
-rw-r--r--llvm/test/CodeGen/ARM/fp16-promote.ll2
-rw-r--r--llvm/test/CodeGen/ARM/fpcmp.ll10
-rw-r--r--llvm/test/CodeGen/ARM/ifcvt11.ll6
-rw-r--r--llvm/test/CodeGen/ARM/swifterror.ll2
-rw-r--r--llvm/test/CodeGen/ARM/vcmp-crash.ll11
-rw-r--r--llvm/test/CodeGen/ARM/vfp.ll2
-rw-r--r--llvm/test/CodeGen/ARM/vsel-fp16.ll40
-rw-r--r--llvm/test/CodeGen/ARM/vsel.ll80
14 files changed, 145 insertions, 156 deletions
diff --git a/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll b/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
index 10b5ae4e237..9eae0d75e87 100644
--- a/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
+++ b/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
@@ -1317,19 +1317,19 @@ bb15:
}
; CHECK-LABEL: _build_delaunay:
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
-; CHECK: vcmpe
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
+; CHECK: vcmp
declare i32 @puts(i8* nocapture) nounwind
diff --git a/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll b/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll
index 4b043362afa..99936cd7eef 100644
--- a/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll
@@ -1781,7 +1781,7 @@ define float @debug_info(float %gamma, float %slopeLimit, i1 %or.cond, double %t
; ARM-NEXT: vmov.f32 s0, #1.000000e+00
; ARM-NEXT: vmov.f64 d16, #1.000000e+00
; ARM-NEXT: vadd.f64 d16, d9, d16
-; ARM-NEXT: vcmpe.f32 s16, s0
+; ARM-NEXT: vcmp.f32 s16, s0
; ARM-NEXT: vmrs APSR_nzcv, fpscr
; ARM-NEXT: vmov d17, r0, r1
; ARM-NEXT: vmov.f64 d18, d9
@@ -1828,7 +1828,7 @@ define float @debug_info(float %gamma, float %slopeLimit, i1 %or.cond, double %t
; THUMB-NEXT: vmov.f32 s0, #1.000000e+00
; THUMB-NEXT: vmov.f64 d16, #1.000000e+00
; THUMB-NEXT: vmov.f64 d18, d9
-; THUMB-NEXT: vcmpe.f32 s16, s0
+; THUMB-NEXT: vcmp.f32 s16, s0
; THUMB-NEXT: vadd.f64 d16, d9, d16
; THUMB-NEXT: vmrs APSR_nzcv, fpscr
; THUMB-NEXT: it gt
diff --git a/llvm/test/CodeGen/ARM/compare-call.ll b/llvm/test/CodeGen/ARM/compare-call.ll
index f45ed73adb7..47f20a28b8a 100644
--- a/llvm/test/CodeGen/ARM/compare-call.ll
+++ b/llvm/test/CodeGen/ARM/compare-call.ll
@@ -18,5 +18,5 @@ UnifiedReturnBlock: ; preds = %entry
declare i32 @bar(...)
-; CHECK: vcmpe.f32
+; CHECK: vcmp.f32
diff --git a/llvm/test/CodeGen/ARM/fcmp-xo.ll b/llvm/test/CodeGen/ARM/fcmp-xo.ll
index 8ff3b9017a5..3d5972f0658 100644
--- a/llvm/test/CodeGen/ARM/fcmp-xo.ll
+++ b/llvm/test/CodeGen/ARM/fcmp-xo.ll
@@ -5,7 +5,7 @@
define arm_aapcs_vfpcc float @foo0(float %a0) local_unnamed_addr {
; CHECK-LABEL: foo0:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vcmpe.f32 s0, #0
+; CHECK-NEXT: vcmp.f32 s0, #0
; CHECK-NEXT: vmov.f32 s2, #5.000000e-01
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vmov.f32 s4, #-5.000000e-01
@@ -24,7 +24,7 @@ define arm_aapcs_vfpcc float @float1(float %a0) local_unnamed_addr {
; CHECK-NEXT: vmov.f32 s2, #1.000000e+00
; CHECK-NEXT: vmov.f32 s4, #5.000000e-01
; CHECK-NEXT: vmov.f32 s6, #-5.000000e-01
-; CHECK-NEXT: vcmpe.f32 s2, s0
+; CHECK-NEXT: vcmp.f32 s2, s0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f32 s0, s6, s4
; CHECK-NEXT: bx lr
@@ -46,7 +46,7 @@ define arm_aapcs_vfpcc float @float128(float %a0) local_unnamed_addr {
; VMOVSR-NEXT: vmov.f32 s4, #5.000000e-01
; VMOVSR-NEXT: vmov s2, r0
; VMOVSR-NEXT: vmov.f32 s6, #-5.000000e-01
-; VMOVSR-NEXT: vcmpe.f32 s2, s0
+; VMOVSR-NEXT: vcmp.f32 s2, s0
; VMOVSR-NEXT: vmrs APSR_nzcv, fpscr
; VMOVSR-NEXT: vselgt.f32 s0, s6, s4
; VMOVSR-NEXT: bx lr
@@ -57,7 +57,7 @@ define arm_aapcs_vfpcc float @float128(float %a0) local_unnamed_addr {
; NEON-NEXT: vmov.f32 s2, #5.000000e-01
; NEON-NEXT: vmov d3, r0, r0
; NEON-NEXT: vmov.f32 s4, #-5.000000e-01
-; NEON-NEXT: vcmpe.f32 s6, s0
+; NEON-NEXT: vcmp.f32 s6, s0
; NEON-NEXT: vmrs APSR_nzcv, fpscr
; NEON-NEXT: vselgt.f32 s0, s4, s2
; NEON-NEXT: bx lr
@@ -70,7 +70,7 @@ define arm_aapcs_vfpcc double @double1(double %a0) local_unnamed_addr {
; CHECK-LABEL: double1:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov.f64 d18, #1.000000e+00
-; CHECK-NEXT: vcmpe.f64 d18, d0
+; CHECK-NEXT: vcmp.f64 d18, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vmov.f64 d16, #5.000000e-01
; CHECK-NEXT: vmov.f64 d17, #-5.000000e-01
@@ -89,7 +89,7 @@ define arm_aapcs_vfpcc double @double128(double %a0) local_unnamed_addr {
; CHECK-NEXT: movt r0, #16480
; CHECK-NEXT: vmov.f64 d16, #5.000000e-01
; CHECK-NEXT: vmov d18, r1, r0
-; CHECK-NEXT: vcmpe.f64 d18, d0
+; CHECK-NEXT: vcmp.f64 d18, d0
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vmov.f64 d17, #-5.000000e-01
; CHECK-NEXT: vselgt.f64 d0, d17, d16
diff --git a/llvm/test/CodeGen/ARM/float-helpers.s b/llvm/test/CodeGen/ARM/float-helpers.s
index d5388a372b8..1225b4c999f 100644
--- a/llvm/test/CodeGen/ARM/float-helpers.s
+++ b/llvm/test/CodeGen/ARM/float-helpers.s
@@ -174,13 +174,13 @@ define i32 @fcmplt(float %a, float %b) #0 {
; CHECK-SOFTFP: vmov s2, r0
; CHECK-SOFTFP-NEXT: mov r0, #0
; CHECK-SOFTFP-NEXT: vmov s0, r1
-; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0
+; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movmi r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-SP-LABEL: fcmplt:
-; CHECK-HARDFP-SP: vcmpe.f32 s0, s1
+; CHECK-HARDFP-SP: vcmp.f32 s0, s1
; CHECK-HARDFP-SP-NEXT: mov r0, #0
; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-SP-NEXT: movmi r0, #1
@@ -205,13 +205,13 @@ define i32 @fcmple(float %a, float %b) #0 {
; CHECK-SOFTFP: vmov s2, r0
; CHECK-SOFTFP-NEXT: mov r0, #0
; CHECK-SOFTFP-NEXT: vmov s0, r1
-; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0
+; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movls r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-SP-LABEL: fcmple:
-; CHECK-HARDFP-SP: vcmpe.f32 s0, s1
+; CHECK-HARDFP-SP: vcmp.f32 s0, s1
; CHECK-HARDFP-SP-NEXT: mov r0, #0
; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-SP-NEXT: movls r0, #1
@@ -236,13 +236,13 @@ define i32 @fcmpge(float %a, float %b) #0 {
; CHECK-SOFTFP: vmov s2, r0
; CHECK-SOFTFP-NEXT: mov r0, #0
; CHECK-SOFTFP-NEXT: vmov s0, r1
-; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0
+; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movge r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-SP-LABEL: fcmpge:
-; CHECK-HARDFP-SP: vcmpe.f32 s0, s1
+; CHECK-HARDFP-SP: vcmp.f32 s0, s1
; CHECK-HARDFP-SP-NEXT: mov r0, #0
; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-SP-NEXT: movge r0, #1
@@ -267,13 +267,13 @@ define i32 @fcmpgt(float %a, float %b) #0 {
; CHECK-SOFTFP: vmov s2, r0
; CHECK-SOFTFP-NEXT: mov r0, #0
; CHECK-SOFTFP-NEXT: vmov s0, r1
-; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0
+; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movgt r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-SP-LABEL: fcmpgt:
-; CHECK-HARDFP-SP: vcmpe.f32 s0, s1
+; CHECK-HARDFP-SP: vcmp.f32 s0, s1
; CHECK-HARDFP-SP-NEXT: mov r0, #0
; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-SP-NEXT: movgt r0, #1
@@ -298,13 +298,13 @@ define i32 @fcmpun(float %a, float %b) #0 {
; CHECK-SOFTFP: vmov s2, r0
; CHECK-SOFTFP-NEXT: mov r0, #0
; CHECK-SOFTFP-NEXT: vmov s0, r1
-; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0
+; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movvs r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-SP-LABEL: fcmpun:
-; CHECK-HARDFP-SP: vcmpe.f32 s0, s1
+; CHECK-HARDFP-SP: vcmp.f32 s0, s1
; CHECK-HARDFP-SP-NEXT: mov r0, #0
; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-SP-NEXT: movvs r0, #1
@@ -503,13 +503,13 @@ define i32 @dcmplt(double %a, double %b) #0 {
; CHECK-SOFTFP: vmov d16, r2, r3
; CHECK-SOFTFP-NEXT: vmov d17, r0, r1
; CHECK-SOFTFP-NEXT: mov r0, #0
-; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16
+; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movmi r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-DP-LABEL: dcmplt:
-; CHECK-HARDFP-DP: vcmpe.f64 d0, d1
+; CHECK-HARDFP-DP: vcmp.f64 d0, d1
; CHECK-HARDFP-DP-NEXT: mov r0, #0
; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-DP-NEXT: movmi r0, #1
@@ -545,13 +545,13 @@ define i32 @dcmple(double %a, double %b) #0 {
; CHECK-SOFTFP: vmov d16, r2, r3
; CHECK-SOFTFP-NEXT: vmov d17, r0, r1
; CHECK-SOFTFP-NEXT: mov r0, #0
-; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16
+; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movls r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-DP-LABEL: dcmple:
-; CHECK-HARDFP-DP: vcmpe.f64 d0, d1
+; CHECK-HARDFP-DP: vcmp.f64 d0, d1
; CHECK-HARDFP-DP-NEXT: mov r0, #0
; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-DP-NEXT: movls r0, #1
@@ -587,13 +587,13 @@ define i32 @dcmpge(double %a, double %b) #0 {
; CHECK-SOFTFP: vmov d16, r2, r3
; CHECK-SOFTFP-NEXT: vmov d17, r0, r1
; CHECK-SOFTFP-NEXT: mov r0, #0
-; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16
+; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movge r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-DP-LABEL: dcmpge:
-; CHECK-HARDFP-DP: vcmpe.f64 d0, d1
+; CHECK-HARDFP-DP: vcmp.f64 d0, d1
; CHECK-HARDFP-DP-NEXT: mov r0, #0
; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-DP-NEXT: movge r0, #1
@@ -629,13 +629,13 @@ define i32 @dcmpgt(double %a, double %b) #0 {
; CHECK-SOFTFP: vmov d16, r2, r3
; CHECK-SOFTFP-NEXT: vmov d17, r0, r1
; CHECK-SOFTFP-NEXT: mov r0, #0
-; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16
+; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movgt r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-DP-LABEL: dcmpgt:
-; CHECK-HARDFP-DP: vcmpe.f64 d0, d1
+; CHECK-HARDFP-DP: vcmp.f64 d0, d1
; CHECK-HARDFP-DP-NEXT: mov r0, #0
; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-DP-NEXT: movgt r0, #1
@@ -671,13 +671,13 @@ define i32 @dcmpun(double %a, double %b) #0 {
; CHECK-SOFTFP: vmov d16, r2, r3
; CHECK-SOFTFP-NEXT: vmov d17, r0, r1
; CHECK-SOFTFP-NEXT: mov r0, #0
-; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16
+; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16
; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-NEXT: movvs r0, #1
; CHECK-SOFTFP-NEXT: mov pc, lr
;
; CHECK-HARDFP-DP-LABEL: dcmpun:
-; CHECK-HARDFP-DP: vcmpe.f64 d0, d1
+; CHECK-HARDFP-DP: vcmp.f64 d0, d1
; CHECK-HARDFP-DP-NEXT: mov r0, #0
; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-DP-NEXT: movvs r0, #1
diff --git a/llvm/test/CodeGen/ARM/fp16-instructions.ll b/llvm/test/CodeGen/ARM/fp16-instructions.ll
index a8fc532070e..260dd12b3e2 100644
--- a/llvm/test/CodeGen/ARM/fp16-instructions.ll
+++ b/llvm/test/CodeGen/ARM/fp16-instructions.ll
@@ -164,9 +164,9 @@ entry:
; CHECK-LABEL: VCMPE1:
; CHECK-SOFT: bl __aeabi_fcmplt
-; CHECK-SOFTFP-FP16: vcmpe.f32 s0, #0
-; CHECK-SOFTFP-FULLFP16: vcmpe.f16 s0, #0
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, #0
+; CHECK-SOFTFP-FP16: vcmp.f32 s0, #0
+; CHECK-SOFTFP-FULLFP16: vcmp.f16 s0, #0
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, #0
}
define i32 @VCMPE2(float %F.coerce, float %G.coerce) {
@@ -184,9 +184,9 @@ entry:
; CHECK-LABEL: VCMPE2:
; CHECK-SOFT: bl __aeabi_fcmplt
-; CHECK-SOFTFP-FP16: vcmpe.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FULLFP16: vcmpe.f16 s{{.}}, s{{.}}
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s{{.}}, s{{.}}
+; CHECK-SOFTFP-FP16: vcmp.f32 s{{.}}, s{{.}}
+; CHECK-SOFTFP-FULLFP16: vcmp.f16 s{{.}}, s{{.}}
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s{{.}}, s{{.}}
}
; Test lowering of BR_CC
@@ -212,10 +212,10 @@ for.end:
; CHECK-SOFT: cmp r0, #{{0|1}}
; CHECK-SOFTFP-FP16: vcvtb.f32.f16 [[S2:s[0-9]]], [[S2]]
-; CHECK-SOFTFP-FP16: vcmpe.f32 [[S2]], s0
+; CHECK-SOFTFP-FP16: vcmp.f32 [[S2]], s0
; CHECK-SOFTFP-FP16: vmrs APSR_nzcv, fpscr
-; CHECK-SOFTFP-FULLFP16: vcmpe.f16 s{{.}}, s{{.}}
+; CHECK-SOFTFP-FULLFP16: vcmp.f16 s{{.}}, s{{.}}
; CHECK-SOFTFP-FULLFP16: vmrs APSR_nzcv, fpscr
}
@@ -727,15 +727,15 @@ define half @select_cc_ge1(half* %a0) {
; CHECK-LABEL: select_cc_ge1:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovge.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it ge
; CHECK-SOFTFP-FP16-T32-NEXT: vmovge.f32 s{{.}}, s{{.}}
@@ -749,15 +749,15 @@ define half @select_cc_ge2(half* %a0) {
; CHECK-LABEL: select_cc_ge2:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovls.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it ls
; CHECK-SOFTFP-FP16-T32-NEXT: vmovls.f32 s{{.}}, s{{.}}
@@ -771,15 +771,15 @@ define half @select_cc_ge3(half* %a0) {
; CHECK-LABEL: select_cc_ge3:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovhi.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it hi
; CHECK-SOFTFP-FP16-T32-NEXT: vmovhi.f32 s{{.}}, s{{.}}
@@ -793,15 +793,15 @@ define half @select_cc_ge4(half* %a0) {
; CHECK-LABEL: select_cc_ge4:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovlt.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it lt
; CHECK-SOFTFP-FP16-T32-NEXT: vmovlt.f32 s{{.}}, s{{.}}
@@ -816,15 +816,15 @@ define half @select_cc_gt1(half* %a0) {
; CHECK-LABEL: select_cc_gt1:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovgt.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it gt
; CHECK-SOFTFP-FP16-T32-NEXT: vmovgt.f32 s{{.}}, s{{.}}
@@ -838,15 +838,15 @@ define half @select_cc_gt2(half* %a0) {
; CHECK-LABEL: select_cc_gt2:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovpl.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it pl
; CHECK-SOFTFP-FP16-T32-NEXT: vmovpl.f32 s{{.}}, s{{.}}
@@ -860,15 +860,15 @@ define half @select_cc_gt3(half* %a0) {
; CHECK-LABEL: select_cc_gt3:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovle.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it le
; CHECK-SOFTFP-FP16-T32-NEXT: vmovle.f32 s{{.}}, s{{.}}
@@ -882,15 +882,15 @@ define half @select_cc_gt4(half* %a0) {
; CHECK-LABEL: select_cc_gt4:
-; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6
+; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6
; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-A32-NEXT: vmovmi.f32 s{{.}}, s{{.}}
-; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0
; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32-NEXT: it mi
; CHECK-SOFTFP-FP16-T32-NEXT: vmovmi.f32 s{{.}}, s{{.}}
diff --git a/llvm/test/CodeGen/ARM/fp16-promote.ll b/llvm/test/CodeGen/ARM/fp16-promote.ll
index f382144cf95..183653036f3 100644
--- a/llvm/test/CodeGen/ARM/fp16-promote.ll
+++ b/llvm/test/CodeGen/ARM/fp16-promote.ll
@@ -202,7 +202,7 @@ define i1 @test_fcmp_ueq(half* %p, half* %q) #0 {
; CHECK-FP16: vcvtb.f32.f16
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP: vcmpe.f32
+; CHECK-VFP: vcmp.f32
; CHECK-NOVFP: bl __aeabi_fcmplt
; CHECK-FP16: vmrs APSR_nzcv, fpscr
; CHECK-VFP: strmi
diff --git a/llvm/test/CodeGen/ARM/fpcmp.ll b/llvm/test/CodeGen/ARM/fpcmp.ll
index 67326e00016..b8fc21f8146 100644
--- a/llvm/test/CodeGen/ARM/fpcmp.ll
+++ b/llvm/test/CodeGen/ARM/fpcmp.ll
@@ -2,7 +2,7 @@
define i32 @f1(float %a) {
;CHECK-LABEL: f1:
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
;CHECK: movmi
entry:
%tmp = fcmp olt float %a, 1.000000e+00 ; <i1> [#uses=1]
@@ -22,7 +22,7 @@ entry:
define i32 @f3(float %a) {
;CHECK-LABEL: f3:
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
;CHECK: movgt
entry:
%tmp = fcmp ogt float %a, 1.000000e+00 ; <i1> [#uses=1]
@@ -32,7 +32,7 @@ entry:
define i32 @f4(float %a) {
;CHECK-LABEL: f4:
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
;CHECK: movge
entry:
%tmp = fcmp oge float %a, 1.000000e+00 ; <i1> [#uses=1]
@@ -42,7 +42,7 @@ entry:
define i32 @f5(float %a) {
;CHECK-LABEL: f5:
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
;CHECK: movls
entry:
%tmp = fcmp ole float %a, 1.000000e+00 ; <i1> [#uses=1]
@@ -62,7 +62,7 @@ entry:
define i32 @g1(double %a) {
;CHECK-LABEL: g1:
-;CHECK: vcmpe.f64
+;CHECK: vcmp.f64
;CHECK: movmi
entry:
%tmp = fcmp olt double %a, 1.000000e+00 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/ARM/ifcvt11.ll b/llvm/test/CodeGen/ARM/ifcvt11.ll
index eae41e21c61..7d577065a6d 100644
--- a/llvm/test/CodeGen/ARM/ifcvt11.ll
+++ b/llvm/test/CodeGen/ARM/ifcvt11.ll
@@ -17,7 +17,7 @@ bb.nph: ; preds = %entry
br label %bb
bb: ; preds = %bb4, %bb.nph
-; CHECK: vcmpe.f64
+; CHECK: vcmp.f64
; CHECK: vmrs APSR_nzcv, fpscr
%r.19 = phi i32 [ 0, %bb.nph ], [ %r.0, %bb4 ]
%n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ]
@@ -30,9 +30,9 @@ bb: ; preds = %bb4, %bb.nph
bb1: ; preds = %bb
; CHECK-NOT: it
-; CHECK-NOT: vcmpemi
+; CHECK-NOT: vcmpmi
; CHECK-NOT: vmrsmi
-; CHECK: vcmpe.f64
+; CHECK: vcmp.f64
; CHECK: vmrs APSR_nzcv, fpscr
%scevgep12 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 2
%6 = load double, double* %scevgep12, align 4
diff --git a/llvm/test/CodeGen/ARM/swifterror.ll b/llvm/test/CodeGen/ARM/swifterror.ll
index 6424754a982..d96bc0249b4 100644
--- a/llvm/test/CodeGen/ARM/swifterror.ll
+++ b/llvm/test/CodeGen/ARM/swifterror.ll
@@ -194,7 +194,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
; CHECK-O0: strb [[ID2]], [{{.*}}[[ID]], #8]
; spill r0
; CHECK-O0: str r0, [sp{{.*}}]
-; CHECK-O0: vcmpe
+; CHECK-O0: vcmp
; CHECK-O0: ble
; reload from stack
; CHECK-O0: ldr r8
diff --git a/llvm/test/CodeGen/ARM/vcmp-crash.ll b/llvm/test/CodeGen/ARM/vcmp-crash.ll
deleted file mode 100644
index 2d3262be584..00000000000
--- a/llvm/test/CodeGen/ARM/vcmp-crash.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc -mcpu=cortex-m4 < %s | FileCheck %s
-
-target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
-target triple = "thumbv7em-none--eabi"
-
-; CHECK: vcmp.f32
-define double @f(double %a, double %b, double %c, float %d) {
- %1 = fcmp oeq float %d, 0.0
- %2 = select i1 %1, double %a, double %c
- ret double %2
-}
diff --git a/llvm/test/CodeGen/ARM/vfp.ll b/llvm/test/CodeGen/ARM/vfp.ll
index 8fa5113d8a3..c18855abd87 100644
--- a/llvm/test/CodeGen/ARM/vfp.ll
+++ b/llvm/test/CodeGen/ARM/vfp.ll
@@ -142,7 +142,7 @@ define void @test_cmpfp0(float* %glob, i32 %X) {
;CHECK-LABEL: test_cmpfp0:
entry:
%tmp = load float, float* %glob ; <float> [#uses=1]
-;CHECK: vcmpe.f32
+;CHECK: vcmp.f32
%tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00 ; <i1> [#uses=1]
br i1 %tmp.upgrd.3, label %cond_true, label %cond_false
diff --git a/llvm/test/CodeGen/ARM/vsel-fp16.ll b/llvm/test/CodeGen/ARM/vsel-fp16.ll
index 9ccc6f42728..fda1fcb5f87 100644
--- a/llvm/test/CodeGen/ARM/vsel-fp16.ll
+++ b/llvm/test/CodeGen/ARM/vsel-fp16.ll
@@ -106,7 +106,7 @@ define void @test_vsel32ogt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s0, s2
@@ -130,7 +130,7 @@ define void @test_vsel32oge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s0, s2
@@ -178,7 +178,7 @@ define void @test_vsel32ugt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s2, s0
@@ -202,7 +202,7 @@ define void @test_vsel32uge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s2, s0
@@ -226,7 +226,7 @@ define void @test_vsel32olt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s0, s2
@@ -250,7 +250,7 @@ define void @test_vsel32ult(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s2, s0
@@ -274,7 +274,7 @@ define void @test_vsel32ole(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s0, s2
@@ -298,7 +298,7 @@ define void @test_vsel32ule(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s2, s0
@@ -322,7 +322,7 @@ define void @test_vsel32ord(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselvs.f16 s0, s2, s0
@@ -370,7 +370,7 @@ define void @test_vsel32uno(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half*
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselvs.f16 s0, s0, s2
@@ -395,7 +395,7 @@ define void @test_vsel32ogt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s0, s2
@@ -419,7 +419,7 @@ define void @test_vsel32oge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s0, s2
@@ -467,7 +467,7 @@ define void @test_vsel32ugt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s0, s2
@@ -491,7 +491,7 @@ define void @test_vsel32uge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s0, s2
@@ -515,7 +515,7 @@ define void @test_vsel32olt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s0, s2
@@ -539,7 +539,7 @@ define void @test_vsel32ult_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselgt.f16 s0, s0, s2
@@ -563,7 +563,7 @@ define void @test_vsel32ole_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s0, s2
@@ -587,7 +587,7 @@ define void @test_vsel32ule_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s6, s4
+; CHECK-NEXT: vcmp.f16 s6, s4
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselge.f16 s0, s0, s2
@@ -611,7 +611,7 @@ define void @test_vsel32ord_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselvs.f16 s0, s2, s0
@@ -659,7 +659,7 @@ define void @test_vsel32uno_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h
; CHECK-NEXT: vldr.16 s4, [r0]
; CHECK-NEXT: vldr.16 s6, [r1]
; CHECK-NEXT: movw r0, :lower16:varhalf
-; CHECK-NEXT: vcmpe.f16 s4, s6
+; CHECK-NEXT: vcmp.f16 s4, s6
; CHECK-NEXT: movt r0, :upper16:varhalf
; CHECK-NEXT: vmrs APSR_nzcv, fpscr
; CHECK-NEXT: vselvs.f16 s0, s0, s2
diff --git a/llvm/test/CodeGen/ARM/vsel.ll b/llvm/test/CodeGen/ARM/vsel.ll
index 9408424e3a6..33d16ad45e2 100644
--- a/llvm/test/CodeGen/ARM/vsel.ll
+++ b/llvm/test/CodeGen/ARM/vsel.ll
@@ -96,7 +96,7 @@ define void @test_vsel32ogt(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp ogt float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f32 s0, s2, s3
ret void
}
@@ -105,7 +105,7 @@ define void @test_vsel64ogt(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp ogt float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f64 d16, d1, d2
ret void
}
@@ -114,7 +114,7 @@ define void @test_vsel32oge(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp oge float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f32 s0, s2, s3
ret void
}
@@ -123,7 +123,7 @@ define void @test_vsel64oge(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp oge float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f64 d16, d1, d2
ret void
}
@@ -150,7 +150,7 @@ define void @test_vsel32ugt(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp ugt float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f32 s0, s3, s2
ret void
}
@@ -159,7 +159,7 @@ define void @test_vsel64ugt(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp ugt float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f64 d16, d2, d1
ret void
}
@@ -168,7 +168,7 @@ define void @test_vsel32uge(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp uge float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f32 s0, s3, s2
ret void
}
@@ -177,7 +177,7 @@ define void @test_vsel64uge(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp uge float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f64 d16, d2, d1
ret void
}
@@ -186,7 +186,7 @@ define void @test_vsel32olt(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp olt float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f32 s0, s2, s3
ret void
}
@@ -195,7 +195,7 @@ define void @test_vsel64olt(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp olt float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f64 d16, d1, d2
ret void
}
@@ -204,7 +204,7 @@ define void @test_vsel32ult(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp ult float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f32 s0, s3, s2
ret void
}
@@ -213,7 +213,7 @@ define void @test_vsel64ult(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp ult float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f64 d16, d2, d1
ret void
}
@@ -222,7 +222,7 @@ define void @test_vsel32ole(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp ole float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f32 s0, s2, s3
ret void
}
@@ -231,7 +231,7 @@ define void @test_vsel64ole(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp ole float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f64 d16, d1, d2
ret void
}
@@ -240,7 +240,7 @@ define void @test_vsel32ule(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp ule float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f32 s0, s3, s2
ret void
}
@@ -249,7 +249,7 @@ define void @test_vsel64ule(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp ule float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f64 d16, d2, d1
ret void
}
@@ -258,7 +258,7 @@ define void @test_vsel32ord(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp ord float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f32 s0, s3, s2
ret void
}
@@ -267,7 +267,7 @@ define void @test_vsel64ord(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp ord float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f64 d16, d2, d1
ret void
}
@@ -294,7 +294,7 @@ define void @test_vsel32uno(float %lhs32, float %rhs32, float %a, float %b) {
%tst1 = fcmp uno float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f32 s0, s2, s3
ret void
}
@@ -303,7 +303,7 @@ define void @test_vsel64uno(float %lhs32, float %rhs32, double %a, double %b) {
%tst1 = fcmp uno float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f64 d16, d1, d2
ret void
}
@@ -313,7 +313,7 @@ define void @test_vsel32ogt_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan ogt float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f32 s0, s2, s3
ret void
}
@@ -322,7 +322,7 @@ define void @test_vsel64ogt_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan ogt float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f64 d16, d1, d2
ret void
}
@@ -331,7 +331,7 @@ define void @test_vsel32oge_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan oge float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f32 s0, s2, s3
ret void
}
@@ -340,7 +340,7 @@ define void @test_vsel64oge_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan oge float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f64 d16, d1, d2
ret void
}
@@ -367,7 +367,7 @@ define void @test_vsel32ugt_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan ugt float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f32 s0, s2, s3
ret void
}
@@ -376,7 +376,7 @@ define void @test_vsel64ugt_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan ugt float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselgt.f64 d16, d1, d2
ret void
}
@@ -385,7 +385,7 @@ define void @test_vsel32uge_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan uge float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f32 s0, s2, s3
ret void
}
@@ -394,7 +394,7 @@ define void @test_vsel64uge_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan uge float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselge.f64 d16, d1, d2
ret void
}
@@ -403,7 +403,7 @@ define void @test_vsel32olt_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan olt float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f32 s0, s2, s3
ret void
}
@@ -412,7 +412,7 @@ define void @test_vsel64olt_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan olt float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f64 d16, d1, d2
ret void
}
@@ -421,7 +421,7 @@ define void @test_vsel32ult_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan ult float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f32 s0, s2, s3
ret void
}
@@ -430,7 +430,7 @@ define void @test_vsel64ult_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan ult float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselgt.f64 d16, d1, d2
ret void
}
@@ -439,7 +439,7 @@ define void @test_vsel32ole_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan ole float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f32 s0, s2, s3
ret void
}
@@ -448,7 +448,7 @@ define void @test_vsel64ole_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan ole float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f64 d16, d1, d2
ret void
}
@@ -457,7 +457,7 @@ define void @test_vsel32ule_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan ule float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f32 s0, s2, s3
ret void
}
@@ -466,7 +466,7 @@ define void @test_vsel64ule_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan ule float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s1, s0
+; CHECK: vcmp.f32 s1, s0
; CHECK: vselge.f64 d16, d1, d2
ret void
}
@@ -475,7 +475,7 @@ define void @test_vsel32ord_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan ord float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f32 s0, s3, s2
ret void
}
@@ -484,7 +484,7 @@ define void @test_vsel64ord_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan ord float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f64 d16, d2, d1
ret void
}
@@ -511,7 +511,7 @@ define void @test_vsel32uno_nnan(float %lhs32, float %rhs32, float %a, float %b)
%tst1 = fcmp nnan uno float %lhs32, %rhs32
%val1 = select i1 %tst1, float %a, float %b
store float %val1, float* @varfloat
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f32 s0, s2, s3
ret void
}
@@ -520,7 +520,7 @@ define void @test_vsel64uno_nnan(float %lhs32, float %rhs32, double %a, double %
%tst1 = fcmp nnan uno float %lhs32, %rhs32
%val1 = select i1 %tst1, double %a, double %b
store double %val1, double* @vardouble
-; CHECK: vcmpe.f32 s0, s1
+; CHECK: vcmp.f32 s0, s1
; CHECK: vselvs.f64 d16, d1, d2
ret void
}
OpenPOWER on IntegriCloud