summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorAlex Bradbury <asb@lowrisc.org>2018-03-20 13:26:12 +0000
committerAlex Bradbury <asb@lowrisc.org>2018-03-20 13:26:12 +0000
commit80c8eb769649c6178ff561812d441640c4eaac95 (patch)
treec28f0a535d891930758b902c218c918539a473f8 /llvm/test
parent7b3d162fba795b6db258ab835b43953e59629478 (diff)
downloadbcm5719-llvm-80c8eb769649c6178ff561812d441640c4eaac95.tar.gz
bcm5719-llvm-80c8eb769649c6178ff561812d441640c4eaac95.zip
[RISCV] Add codegen for RV32F floating point load/store
As part of this, add support for load/store from the constant pool. This is used to materialise f32 constants. llvm-svn: 327979
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/RISCV/float-imm.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/float-mem.ll84
2 files changed, 111 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll
new file mode 100644
index 00000000000..a8d032571e1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/float-imm.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IF %s
+
+define float @float_imm() nounwind {
+; RV32IF-LABEL: float_imm:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: lui a0, 263313
+; RV32IF-NEXT: addi a0, a0, -37
+; RV32IF-NEXT: ret
+ ret float 3.14159274101257324218750
+}
+
+define float @float_imm_op(float %a) nounwind {
+; TODO: addi should be folded in to the flw
+; RV32IF-LABEL: float_imm_op:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: fmv.w.x ft0, a0
+; RV32IF-NEXT: lui a0, %hi(.LCPI1_0)
+; RV32IF-NEXT: addi a0, a0, %lo(.LCPI1_0)
+; RV32IF-NEXT: flw ft1, 0(a0)
+; RV32IF-NEXT: fadd.s ft0, ft0, ft1
+; RV32IF-NEXT: fmv.x.w a0, ft0
+; RV32IF-NEXT: ret
+ %1 = fadd float %a, 1.0
+ ret float %1
+}
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
new file mode 100644
index 00000000000..2d9f8c8f410
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IF %s
+
+define float @flw(float *%a) nounwind {
+; RV32IF-LABEL: flw:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: flw ft0, 12(a0)
+; RV32IF-NEXT: flw ft1, 0(a0)
+; RV32IF-NEXT: fadd.s ft0, ft1, ft0
+; RV32IF-NEXT: fmv.x.w a0, ft0
+; RV32IF-NEXT: ret
+ %1 = load float, float* %a
+ %2 = getelementptr float, float* %a, i32 3
+ %3 = load float, float* %2
+; Use both loaded values in an FP op to ensure an flw is used, even for the
+; soft float ABI
+ %4 = fadd float %1, %3
+ ret float %4
+}
+
+define void @fsw(float *%a, float %b, float %c) nounwind {
+; Use %b and %c in an FP op to ensure floating point registers are used, even
+; for the soft float ABI
+; RV32IF-LABEL: fsw:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: fmv.w.x ft0, a2
+; RV32IF-NEXT: fmv.w.x ft1, a1
+; RV32IF-NEXT: fadd.s ft0, ft1, ft0
+; RV32IF-NEXT: fsw ft0, 32(a0)
+; RV32IF-NEXT: fsw ft0, 0(a0)
+; RV32IF-NEXT: ret
+ %1 = fadd float %b, %c
+ store float %1, float* %a
+ %2 = getelementptr float, float* %a, i32 8
+ store float %1, float* %2
+ ret void
+}
+
+; Check load and store to a global
+@G = global float 0.0
+
+define float @flw_fsw_global(float %a, float %b) nounwind {
+; Use %a and %b in an FP op to ensure floating point registers are used, even
+; for the soft float ABI
+; RV32IF-LABEL: flw_fsw_global:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: fmv.w.x ft0, a1
+; RV32IF-NEXT: fmv.w.x ft1, a0
+; RV32IF-NEXT: fadd.s ft0, ft1, ft0
+; RV32IF-NEXT: lui a0, %hi(G)
+; RV32IF-NEXT: flw ft1, %lo(G)(a0)
+; RV32IF-NEXT: fsw ft0, %lo(G)(a0)
+; RV32IF-NEXT: lui a0, %hi(G+36)
+; RV32IF-NEXT: flw ft1, %lo(G+36)(a0)
+; RV32IF-NEXT: fsw ft0, %lo(G+36)(a0)
+; RV32IF-NEXT: fmv.x.w a0, ft0
+; RV32IF-NEXT: ret
+ %1 = fadd float %a, %b
+ %2 = load volatile float, float* @G
+ store float %1, float* @G
+ %3 = getelementptr float, float* @G, i32 9
+ %4 = load volatile float, float* %3
+ store float %1, float* %3
+ ret float %1
+}
+
+; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
+define float @flw_fsw_constant(float %a) nounwind {
+; RV32IF-LABEL: flw_fsw_constant:
+; RV32IF: # %bb.0:
+; RV32IF-NEXT: fmv.w.x ft0, a0
+; RV32IF-NEXT: lui a0, 912092
+; RV32IF-NEXT: flw ft1, -273(a0)
+; RV32IF-NEXT: fadd.s ft0, ft0, ft1
+; RV32IF-NEXT: fsw ft0, -273(a0)
+; RV32IF-NEXT: fmv.x.w a0, ft0
+; RV32IF-NEXT: ret
+ %1 = inttoptr i32 3735928559 to float*
+ %2 = load volatile float, float* %1
+ %3 = fadd float %a, %2
+ store float %3, float* %1
+ ret float %3
+}
OpenPOWER on IntegriCloud