summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/RISCV/double-mem.ll
diff options
context:
space:
mode:
authorAlex Bradbury <asb@lowrisc.org>2019-02-01 03:53:30 +0000
committerAlex Bradbury <asb@lowrisc.org>2019-02-01 03:53:30 +0000
commit7539fa2c2d1fe54fbb7f9e01ea07ebcec012a144 (patch)
treef0d5928b5c81073d838604a8dbaee42672a1d934 /llvm/test/CodeGen/RISCV/double-mem.ll
parent32b77383ecf67774cd311c23fe5f112202729592 (diff)
downloadbcm5719-llvm-7539fa2c2d1fe54fbb7f9e01ea07ebcec012a144.tar.gz
bcm5719-llvm-7539fa2c2d1fe54fbb7f9e01ea07ebcec012a144.zip
[RISCV] Implement RV64D codegen
This patch: * Adds necessary RV64D codegen patterns * Modifies CC_RISCV so it will properly handle f64 types (with soft float ABI) Note that in general there is no reason to try to select fcvt.w[u].d rather than fcvt.l[u].d for i32 conversions because fptosi/fptoui produce poison if the input won't fit into the target type. Differential Revision: https://reviews.llvm.org/D53237 llvm-svn: 352833
Diffstat (limited to 'llvm/test/CodeGen/RISCV/double-mem.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/double-mem.ll83
1 files changed, 83 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 5b20447ed73..a7a93de8a50 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IFD %s
define double @fld(double *%a) nounwind {
; RV32IFD-LABEL: fld:
@@ -14,6 +16,14 @@ define double @fld(double *%a) nounwind {
; RV32IFD-NEXT: lw a1, 12(sp)
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fld:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fld ft0, 24(a0)
+; RV64IFD-NEXT: fld ft1, 0(a0)
+; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT: fmv.x.d a0, ft0
+; RV64IFD-NEXT: ret
%1 = load double, double* %a
%2 = getelementptr double, double* %a, i32 3
%3 = load double, double* %2
@@ -38,6 +48,15 @@ define void @fsd(double *%a, double %b, double %c) nounwind {
; RV32IFD-NEXT: fsd ft0, 0(a0)
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fsd:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fmv.d.x ft0, a2
+; RV64IFD-NEXT: fmv.d.x ft1, a1
+; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT: fsd ft0, 64(a0)
+; RV64IFD-NEXT: fsd ft0, 0(a0)
+; RV64IFD-NEXT: ret
; Use %b and %c in an FP op to ensure floating point registers are used, even
; for the soft float ABI
%1 = fadd double %b, %c
@@ -72,6 +91,20 @@ define double @fld_fsd_global(double %a, double %b) nounwind {
; RV32IFD-NEXT: lw a1, 12(sp)
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fld_fsd_global:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fmv.d.x ft0, a1
+; RV64IFD-NEXT: fmv.d.x ft1, a0
+; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT: lui a0, %hi(G)
+; RV64IFD-NEXT: fld ft1, %lo(G)(a0)
+; RV64IFD-NEXT: fsd ft0, %lo(G)(a0)
+; RV64IFD-NEXT: addi a0, a0, %lo(G)
+; RV64IFD-NEXT: fld ft1, 72(a0)
+; RV64IFD-NEXT: fsd ft0, 72(a0)
+; RV64IFD-NEXT: fmv.x.d a0, ft0
+; RV64IFD-NEXT: ret
; Use %a and %b in an FP op to ensure floating point registers are used, even
; for the soft float ABI
%1 = fadd double %a, %b
@@ -100,6 +133,18 @@ define double @fld_fsd_constant(double %a) nounwind {
; RV32IFD-NEXT: lw a1, 12(sp)
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fld_fsd_constant:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fmv.d.x ft0, a0
+; RV64IFD-NEXT: lui a0, 56
+; RV64IFD-NEXT: addiw a0, a0, -1353
+; RV64IFD-NEXT: slli a0, a0, 14
+; RV64IFD-NEXT: fld ft1, -273(a0)
+; RV64IFD-NEXT: fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT: fsd ft0, -273(a0)
+; RV64IFD-NEXT: fmv.x.d a0, ft0
+; RV64IFD-NEXT: ret
%1 = inttoptr i32 3735928559 to double*
%2 = load volatile double, double* %1
%3 = fadd double %a, %2
@@ -133,6 +178,23 @@ define double @fld_stack(double %a) nounwind {
; RV32IFD-NEXT: lw ra, 28(sp)
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fld_stack:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -32
+; RV64IFD-NEXT: sd ra, 24(sp)
+; RV64IFD-NEXT: sd s1, 16(sp)
+; RV64IFD-NEXT: mv s1, a0
+; RV64IFD-NEXT: addi a0, sp, 8
+; RV64IFD-NEXT: call notdead
+; RV64IFD-NEXT: fmv.d.x ft0, s1
+; RV64IFD-NEXT: fld ft1, 8(sp)
+; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT: fmv.x.d a0, ft0
+; RV64IFD-NEXT: ld s1, 16(sp)
+; RV64IFD-NEXT: ld ra, 24(sp)
+; RV64IFD-NEXT: addi sp, sp, 32
+; RV64IFD-NEXT: ret
%1 = alloca double, align 8
%2 = bitcast double* %1 to i8*
call void @notdead(i8* %2)
@@ -159,6 +221,20 @@ define void @fsd_stack(double %a, double %b) nounwind {
; RV32IFD-NEXT: lw ra, 28(sp)
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fsd_stack:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp)
+; RV64IFD-NEXT: fmv.d.x ft0, a1
+; RV64IFD-NEXT: fmv.d.x ft1, a0
+; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT: fsd ft0, 0(sp)
+; RV64IFD-NEXT: mv a0, sp
+; RV64IFD-NEXT: call notdead
+; RV64IFD-NEXT: ld ra, 8(sp)
+; RV64IFD-NEXT: addi sp, sp, 16
+; RV64IFD-NEXT: ret
%1 = fadd double %a, %b ; force store from FPR64
%2 = alloca double, align 8
store double %1, double* %2
@@ -179,6 +255,13 @@ define void @fsd_trunc(float* %a, double %b) nounwind noinline optnone {
; RV32IFD-NEXT: fsw ft0, 0(a0)
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
+;
+; RV64IFD-LABEL: fsd_trunc:
+; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: fmv.d.x ft0, a1
+; RV64IFD-NEXT: fcvt.s.d ft0, ft0
+; RV64IFD-NEXT: fsw ft0, 0(a0)
+; RV64IFD-NEXT: ret
%1 = fptrunc double %b to float
store float %1, float* %a, align 4
ret void
OpenPOWER on IntegriCloud