diff options
| author | Alex Bradbury <asb@lowrisc.org> | 2019-01-11 19:46:48 +0000 |
|---|---|---|
| committer | Alex Bradbury <asb@lowrisc.org> | 2019-01-11 19:46:48 +0000 |
| commit | eea0b07028c41f7979d4ddde12efd97db3139c62 (patch) | |
| tree | aa3ea7f154c635df9b49a95a1b31b9ef1b5d2ffe /llvm/test/CodeGen/RISCV/atomic-load-store.ll | |
| parent | 946fe976fded7fb2784e3662e905d63743cf4aec (diff) | |
| download | bcm5719-llvm-eea0b07028c41f7979d4ddde12efd97db3139c62.tar.gz bcm5719-llvm-eea0b07028c41f7979d4ddde12efd97db3139c62.zip | |
[RISCV][NFC] Add CHECK lines for atomic operations on RV64I
As or RV32I, we include these for completeness. Committing now to make it
easier to review the RV64A patch.
llvm-svn: 350962
Diffstat (limited to 'llvm/test/CodeGen/RISCV/atomic-load-store.ll')
| -rw-r--r-- | llvm/test/CodeGen/RISCV/atomic-load-store.ll | 322 |
1 files changed, 322 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll index ba2b594c1c7..2df3a9fb319 100644 --- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll @@ -3,6 +3,8 @@ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IA %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { ; RV32I-LABEL: atomic_load_i8_unordered: @@ -19,6 +21,16 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i8_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i8, i8* %a unordered, align 1 ret i8 %1 } @@ -38,6 +50,16 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i8_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i8, i8* %a monotonic, align 1 ret i8 %1 } @@ -58,6 +80,16 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind { ; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i8_acquire: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: call __atomic_load_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i8, i8* %a acquire, align 1 ret i8 %1 } @@ -79,6 +111,16 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind { ; RV32IA-NEXT: lb a0, 0(a0) ; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i8_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __atomic_load_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i8, i8* %a seq_cst, align 1 ret i8 %1 } @@ -98,6 +140,16 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i16_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i16, i16* %a unordered, align 2 ret i16 %1 } @@ -117,6 +169,16 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i16_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i16, i16* %a monotonic, align 2 ret i16 %1 } @@ -137,6 +199,16 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind { ; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i16_acquire: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: call __atomic_load_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i16, i16* %a acquire, align 2 ret i16 %1 } @@ -158,6 +230,16 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind { ; RV32IA-NEXT: lh a0, 0(a0) ; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i16_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __atomic_load_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i16, i16* %a seq_cst, align 2 ret i16 %1 } @@ -177,6 +259,16 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i32_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i32, i32* %a unordered, align 4 ret i32 %1 } @@ -196,6 +288,16 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i32_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i32, i32* %a monotonic, align 4 ret i32 %1 } @@ -216,6 +318,16 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind { ; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i32_acquire: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: call __atomic_load_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i32, i32* %a acquire, align 4 ret i32 %1 } @@ -237,6 +349,16 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind { ; RV32IA-NEXT: lw a0, 0(a0) ; RV32IA-NEXT: fence r, rw ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i32_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __atomic_load_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i32, i32* %a seq_cst, align 4 ret i32 %1 } @@ -261,6 +383,16 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i64_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i64, i64* %a unordered, align 8 ret i64 %1 } @@ -285,6 +417,16 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i64_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: call __atomic_load_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i64, i64* %a monotonic, align 8 ret i64 %1 } @@ -309,6 +451,16 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i64_acquire: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: call __atomic_load_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i64, i64* %a acquire, align 8 ret i64 %1 } @@ -333,6 +485,16 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_load_i64_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __atomic_load_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret %1 = load atomic i64, i64* %a seq_cst, align 8 ret i64 %1 } @@ -352,6 +514,16 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i8_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i8 %b, i8* %a unordered, align 1 ret void } @@ -371,6 +543,16 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i8_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i8 %b, i8* %a monotonic, align 1 ret void } @@ -391,6 +573,16 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind { ; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i8_release: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: call __atomic_store_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i8 %b, i8* %a release, align 1 ret void } @@ -411,6 +603,16 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: sb a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i8_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: call __atomic_store_1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i8 %b, i8* %a seq_cst, align 1 ret void } @@ -430,6 +632,16 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i16_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i16 %b, i16* %a unordered, align 2 ret void } @@ -449,6 +661,16 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i16_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i16 %b, i16* %a monotonic, align 2 ret void } @@ -469,6 +691,16 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind { ; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i16_release: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: call __atomic_store_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i16 %b, i16* %a release, align 2 ret void } @@ -489,6 +721,16 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: sh a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i16_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: call __atomic_store_2 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i16 %b, i16* %a seq_cst, align 2 ret void } @@ -508,6 +750,16 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i32_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i32 %b, i32* %a unordered, align 4 ret void } @@ -527,6 +779,16 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV32IA: # %bb.0: ; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i32_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i32 %b, i32* %a monotonic, align 4 ret void } @@ -547,6 +809,16 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind { ; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i32_release: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: call __atomic_store_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i32 %b, i32* %a release, align 4 ret void } @@ -567,6 +839,16 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV32IA-NEXT: fence rw, w ; RV32IA-NEXT: sw a1, 0(a0) ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i32_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: call __atomic_store_4 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i32 %b, i32* %a seq_cst, align 4 ret void } @@ -591,6 +873,16 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i64_unordered: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i64 %b, i64* %a unordered, align 8 ret void } @@ -615,6 +907,16 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i64_monotonic: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: call __atomic_store_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i64 %b, i64* %a monotonic, align 8 ret void } @@ -639,6 +941,16 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i64_release: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 3 +; RV64I-NEXT: call __atomic_store_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i64 %b, i64* %a release, align 8 ret void } @@ -663,6 +975,16 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 ; RV32IA-NEXT: ret +; +; RV64I-LABEL: atomic_store_i64_seq_cst: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a2, zero, 5 +; RV64I-NEXT: call __atomic_store_8 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret store atomic i64 %b, i64* %a seq_cst, align 8 ret void } |

