diff options
| author | Alex Bradbury <asb@lowrisc.org> | 2019-02-01 03:53:30 +0000 |
|---|---|---|
| committer | Alex Bradbury <asb@lowrisc.org> | 2019-02-01 03:53:30 +0000 |
| commit | 7539fa2c2d1fe54fbb7f9e01ea07ebcec012a144 (patch) | |
| tree | f0d5928b5c81073d838604a8dbaee42672a1d934 /llvm/test/CodeGen/RISCV/double-arith.ll | |
| parent | 32b77383ecf67774cd311c23fe5f112202729592 (diff) | |
| download | bcm5719-llvm-7539fa2c2d1fe54fbb7f9e01ea07ebcec012a144.tar.gz bcm5719-llvm-7539fa2c2d1fe54fbb7f9e01ea07ebcec012a144.zip | |
[RISCV] Implement RV64D codegen
This patch:
* Adds necessary RV64D codegen patterns
* Modifies CC_RISCV so it will properly handle f64 types (with soft float ABI)
Note that in general there is no reason to try to select fcvt.w[u].d rather than fcvt.l[u].d for i32 conversions because fptosi/fptoui produce poison if the input won't fit into the target type.
Differential Revision: https://reviews.llvm.org/D53237
llvm-svn: 352833
Diffstat (limited to 'llvm/test/CodeGen/RISCV/double-arith.ll')
| -rw-r--r-- | llvm/test/CodeGen/RISCV/double-arith.ll | 167 |
1 files changed, 167 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index a5243ea18ab..2b696210ae1 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s ; These tests are each targeted at a particular RISC-V FPU instruction. Most ; other files in this folder exercise LLVM IR instructions that don't directly @@ -22,6 +24,14 @@ define double @fadd_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b ret double %1 } @@ -42,6 +52,14 @@ define double @fsub_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsub.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fsub double %a, %b ret double %1 } @@ -62,6 +80,14 @@ define double @fmul_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmul_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fmul double %a, %b ret double %1 } @@ -82,6 +108,14 @@ define double @fdiv_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fdiv_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fdiv.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fdiv double %a, %b ret double %1 } @@ -101,6 +135,13 @@ define double @fsqrt_d(double %a) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsqrt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fsqrt.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.sqrt.f64(double %a) ret double %1 } @@ -123,6 +164,14 @@ define double @fsgnj_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsgnj_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.copysign.f64(double %a, double %b) ret double %1 } @@ -141,6 +190,14 @@ define i32 @fneg_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: feq.d a0, ft0, ft1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fneg_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fadd.d ft0, ft0, ft0 +; RV64IFD-NEXT: fneg.d ft1, ft0 +; RV64IFD-NEXT: feq.d a0, ft0, ft1 +; RV64IFD-NEXT: ret %1 = fadd double %a, %a %2 = fneg double %1 %3 = fcmp oeq double %1, %2 @@ -149,6 +206,9 @@ define i32 @fneg_d(double %a, double %b) nounwind { } define double @fsgnjn_d(double %a, double %b) nounwind { +; TODO: fsgnjn.s isn't selected on RV64 because DAGCombiner::visitBITCAST will +; convert (bitconvert (fneg x)) to a xor. +; ; RV32IFD-LABEL: fsgnjn_d: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 @@ -164,6 +224,17 @@ define double @fsgnjn_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsgnjn_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi a2, zero, -1 +; RV64IFD-NEXT: slli a2, a2, 63 +; RV64IFD-NEXT: xor a1, a1, a2 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fsub double -0.0, %b %2 = call double @llvm.copysign.f64(double %a, double %1) ret double %2 @@ -191,6 +262,16 @@ define double @fabs_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fabs_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fabs.d ft1, ft0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = fadd double %a, %b %2 = call double @llvm.fabs.f64(double %1) %3 = fadd double %2, %1 @@ -215,6 +296,14 @@ define double @fmin_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmin_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmin.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.minnum.f64(double %a, double %b) ret double %1 } @@ -237,6 +326,14 @@ define double @fmax_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmax_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmax.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.maxnum.f64(double %a, double %b) ret double %1 } @@ -254,6 +351,13 @@ define i32 @feq_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: feq.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: feq_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -272,6 +376,13 @@ define i32 @flt_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: flt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: flt.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp olt double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -290,6 +401,13 @@ define i32 @fle_d(double %a, double %b) nounwind { ; RV32IFD-NEXT: fle.d a0, ft1, ft0 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fle_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fle.d a0, ft1, ft0 +; RV64IFD-NEXT: ret %1 = fcmp ole double %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -316,6 +434,15 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %1 = call double @llvm.fma.f64(double %a, double %b, double %c) ret double %1 } @@ -343,6 +470,19 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: lui a2, %hi(.LCPI15_0) +; RV64IFD-NEXT: addi a2, a2, %lo(.LCPI15_0) +; RV64IFD-NEXT: fld ft1, 0(a2) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmsub.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %c_ = fadd double 0.0, %c ; avoid negation using xor %negc = fsub double -0.0, %c_ %1 = call double @llvm.fma.f64(double %a, double %b, double %negc) @@ -373,6 +513,20 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: lui a2, %hi(.LCPI16_0) +; RV64IFD-NEXT: addi a2, a2, %lo(.LCPI16_0) +; RV64IFD-NEXT: fld ft1, 0(a2) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fadd.d ft1, ft2, ft1 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fnmadd.d ft0, ft1, ft2, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a %c_ = fadd double 0.0, %c %nega = fsub double -0.0, %a_ @@ -404,6 +558,19 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, %hi(.LCPI17_0) +; RV64IFD-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fnmsub.d ft0, ft0, ft2, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a %nega = fsub double -0.0, %a_ %1 = call double @llvm.fma.f64(double %nega, double %b, double %c) |

