diff options
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/RISCV/mem.ll | 25 | ||||
| -rw-r--r-- | llvm/test/CodeGen/RISCV/wide-mem.ll | 18 |
2 files changed, 43 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll index ef3c376ca87..b06382f8742 100644 --- a/llvm/test/CodeGen/RISCV/mem.ll +++ b/llvm/test/CodeGen/RISCV/mem.ll @@ -159,6 +159,31 @@ define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { ret i16 %7 } +; Check load and store to a global +@G = global i32 0 + +define i32 @lw_sw_global(i32 %a) nounwind { +; TODO: the addi should be folded in to the lw/sw operations +; RV32I-LABEL: lw_sw_global: +; RV32I: # BB#0: +; RV32I-NEXT: lui a1, %hi(G) +; RV32I-NEXT: addi a2, a1, %lo(G) +; RV32I-NEXT: lw a1, 0(a2) +; RV32I-NEXT: sw a0, 0(a2) +; RV32I-NEXT: lui a2, %hi(G+36) +; RV32I-NEXT: addi a2, a2, %lo(G+36) +; RV32I-NEXT: lw a3, 0(a2) +; RV32I-NEXT: sw a0, 0(a2) +; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = load volatile i32, i32* @G + store i32 %a, i32* @G + %2 = getelementptr i32, i32* @G, i32 9 + %3 = load volatile i32, i32* %2 + store i32 %a, i32* %2 + ret i32 %1 +} + ; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1 define i32 @lw_sw_constant(i32 %a) nounwind { ; TODO: the addi should be folded in to the lw/sw diff --git a/llvm/test/CodeGen/RISCV/wide-mem.ll b/llvm/test/CodeGen/RISCV/wide-mem.ll index 917b2147b95..18ab52aaf13 100644 --- a/llvm/test/CodeGen/RISCV/wide-mem.ll +++ b/llvm/test/CodeGen/RISCV/wide-mem.ll @@ -14,3 +14,21 @@ define i64 @load_i64(i64 *%a) nounwind { %1 = load i64, i64* %a ret i64 %1 } + +@val64 = local_unnamed_addr global i64 2863311530, align 8 + +; TODO: codegen on this should be improved. It shouldn't be necessary to +; generate two addi +define i64 @load_i64_global() nounwind { +; RV32I-LABEL: load_i64_global: +; RV32I: # BB#0: +; RV32I-NEXT: lui a0, %hi(val64) +; RV32I-NEXT: addi a0, a0, %lo(val64) +; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: lui a1, %hi(val64+4) +; RV32I-NEXT: addi a1, a1, %lo(val64+4) +; RV32I-NEXT: lw a1, 0(a1) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = load i64, i64* @val64 + ret i64 %1 +} |

