diff options
| author | Alex Bradbury <asb@lowrisc.org> | 2018-04-12 05:42:42 +0000 |
|---|---|---|
| committer | Alex Bradbury <asb@lowrisc.org> | 2018-04-12 05:42:42 +0000 |
| commit | 5d0dfa5e0e758fb075b70c4686e46abf45f3c3ba (patch) | |
| tree | 9159564127fd554c740c3a982bd9b18a0dc8da01 /llvm/test/CodeGen | |
| parent | dfaa021e990238902823882858b873d96eacb4da (diff) | |
| download | bcm5719-llvm-5d0dfa5e0e758fb075b70c4686e46abf45f3c3ba.tar.gz bcm5719-llvm-5d0dfa5e0e758fb075b70c4686e46abf45f3c3ba.zip | |
[RISCV] Add codegen support for RV32D floating point arithmetic operations
llvm-svn: 329874
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/RISCV/double-arith.ll | 256 |
1 files changed, 256 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index d0e4d786a2f..a9bdf68e94d 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -21,3 +21,259 @@ define double @fadd_d(double %a, double %b) nounwind { %1 = fadd double %a, %b ret double %1 } + +define double @fsub_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fsub_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fsub.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fsub double %a, %b + ret double %1 +} + +define double @fmul_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fmul_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fmul double %a, %b + ret double %1 +} + +define double @fdiv_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fdiv_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fdiv.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fdiv double %a, %b + ret double %1 +} + +declare double @llvm.sqrt.f32(double) + +define double @fsqrt_d(double %a) nounwind { +; RV32IFD-LABEL: fsqrt_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: fsqrt.d ft0, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = call double @llvm.sqrt.f32(double %a) + ret double %1 +} + +declare double @llvm.copysign.f32(double, double) + +define double @fsgnj_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fsgnj_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fsgnj.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = call double @llvm.copysign.f32(double %a, double %b) + ret double %1 +} + +define double @fneg_d(double %a) nounwind { +; RV32IFD-LABEL: fneg_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: fneg.d ft0, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fsub double -0.0, %a + ret double %1 +} + +define double @fsgnjn_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fsgnjn_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fsgnjn.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fsub double -0.0, %b + %2 = call double @llvm.copysign.f32(double %a, double %1) + ret double %2 +} + +declare double @llvm.fabs.f32(double) + +define double @fabs_d(double %a) nounwind { +; RV32IFD-LABEL: fabs_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: fabs.d ft0, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = call double @llvm.fabs.f32(double %a) + ret double %1 +} + +declare double @llvm.minnum.f32(double, double) + +define double @fmin_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fmin_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fmin.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = call double @llvm.minnum.f32(double %a, double %b) + ret double %1 +} + +declare double @llvm.maxnum.f32(double, double) + +define double @fmax_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fmax_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fmax.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = call double @llvm.maxnum.f32(double %a, double %b) + ret double %1 +} + +define i32 @feq_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: feq_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: feq.d a0, ft1, ft0 +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fcmp oeq double %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @flt_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: flt_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: flt.d a0, ft1, ft0 +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fcmp olt double %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @fle_d(double %a, double %b) nounwind { +; RV32IFD-LABEL: fle_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fle.d a0, ft1, ft0 +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret + %1 = fcmp ole double %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} |

