diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/double-select-fcmp.ll')
| -rw-r--r-- | llvm/test/CodeGen/RISCV/double-select-fcmp.ll | 215 |
1 files changed, 215 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll index fa10ee25724..4503c2365c8 100644 --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s define double @select_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: select_fcmp_false: @@ -8,6 +10,11 @@ define double @select_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-NEXT: mv a1, a3 ; RV32IFD-NEXT: mv a0, a2 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_false: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: mv a0, a1 +; RV64IFD-NEXT: ret %1 = fcmp false double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -33,6 +40,18 @@ define double @select_fcmp_oeq(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB1_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB1_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -58,6 +77,18 @@ define double @select_fcmp_ogt(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ogt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB2_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB2_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ogt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -83,6 +114,18 @@ define double @select_fcmp_oge(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_oge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB3_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB3_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oge double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -108,6 +151,18 @@ define double @select_fcmp_olt(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_olt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: flt.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB4_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB4_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -133,6 +188,18 @@ define double @select_fcmp_ole(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ole: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fle.d a0, ft0, ft1 +; RV64IFD-NEXT: bnez a0, .LBB5_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB5_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -166,6 +233,25 @@ define double @select_fcmp_one(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_one: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: not a1, a1 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB6_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB6_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp one double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -195,6 +281,22 @@ define double @select_fcmp_ord(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ord: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB7_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB7_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ord double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -225,6 +327,23 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ueq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: or a0, a1, a0 +; RV64IFD-NEXT: bnez a0, .LBB8_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB8_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ueq double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -251,6 +370,19 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ugt: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fle.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB9_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB9_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ugt double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -277,6 +409,19 @@ define double @select_fcmp_uge(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_uge: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: flt.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB10_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB10_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp uge double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -303,6 +448,19 @@ define double @select_fcmp_ult(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ult: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB11_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB11_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ult double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -329,6 +487,19 @@ define double @select_fcmp_ule(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_ule: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB12_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB12_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ule double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -355,6 +526,19 @@ define double @select_fcmp_une(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_une: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: xori a0, a0, 1 +; RV64IFD-NEXT: bnez a0, .LBB13_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB13_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp une double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -384,6 +568,21 @@ define double @select_fcmp_uno(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_uno: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: bnez a0, .LBB14_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fmv.d ft0, ft1 +; RV64IFD-NEXT: .LBB14_2: +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -393,6 +592,10 @@ define double @select_fcmp_true(double %a, double %b) nounwind { ; RV32IFD-LABEL: select_fcmp_true: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: select_fcmp_true: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: ret %1 = fcmp true double %a, %b %2 = select i1 %1, double %a, double %b ret double %2 @@ -417,6 +620,18 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind { ; RV32IFD-NEXT: mv a0, a4 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: i32_select_fcmp_oeq: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB16_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: mv a2, a3 +; RV64IFD-NEXT: .LBB16_2: +; RV64IFD-NEXT: mv a0, a2 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, i32 %c, i32 %d ret i32 %2 |

