diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/atomic-rmw.ll')
-rw-r--r-- | llvm/test/CodeGen/RISCV/atomic-rmw.ll | 3017 |
1 files changed, 3017 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll index 256a669c1e8..1bd5e9e7f5f 100644 --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -5,6 +5,8 @@ ; RUN: | FileCheck -check-prefix=RV32IA %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IA %s define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) { ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic: @@ -47,6 +49,27 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB0_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b monotonic ret i8 %1 } @@ -92,6 +115,27 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB1_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acquire ret i8 %1 } @@ -137,6 +181,27 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB2_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b release ret i8 %1 } @@ -182,6 +247,27 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB3_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acq_rel ret i8 %1 } @@ -227,6 +313,27 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB4_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b seq_cst ret i8 %1 } @@ -272,6 +379,27 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB5_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b monotonic ret i8 %1 } @@ -317,6 +445,27 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB6_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b acquire ret i8 %1 } @@ -362,6 +511,27 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB7_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b release ret i8 %1 } @@ -407,6 +577,27 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB8_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b acq_rel ret i8 %1 } @@ -452,6 +643,27 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB9_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b seq_cst ret i8 %1 } @@ -497,6 +709,27 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB10_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b monotonic ret i8 %1 } @@ -542,6 +775,27 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB11_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acquire ret i8 %1 } @@ -587,6 +841,27 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB12_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b release ret i8 %1 } @@ -632,6 +907,27 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB13_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acq_rel ret i8 %1 } @@ -677,6 +973,27 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB14_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b seq_cst ret i8 %1 } @@ -716,6 +1033,21 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b monotonic ret i8 %1 } @@ -755,6 +1087,21 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b acquire ret i8 %1 } @@ -794,6 +1141,21 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b release ret i8 %1 } @@ -833,6 +1195,21 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b acq_rel ret i8 %1 } @@ -872,6 +1249,21 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sll a3, a3, a2 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: or a1, a3, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b seq_cst ret i8 %1 } @@ -918,6 +1310,28 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB20_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b monotonic ret i8 %1 } @@ -964,6 +1378,28 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB21_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acquire ret i8 %1 } @@ -1010,6 +1446,28 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB22_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b release ret i8 %1 } @@ -1056,6 +1514,28 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB23_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acq_rel ret i8 %1 } @@ -1102,6 +1582,28 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a3, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB24_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b seq_cst ret i8 %1 } @@ -1137,6 +1639,17 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b monotonic ret i8 %1 } @@ -1172,6 +1685,17 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b acquire ret i8 %1 } @@ -1207,6 +1731,17 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b release ret i8 %1 } @@ -1242,6 +1777,17 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b acq_rel ret i8 %1 } @@ -1277,6 +1823,17 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b seq_cst ret i8 %1 } @@ -1312,6 +1869,17 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b monotonic ret i8 %1 } @@ -1347,6 +1915,17 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b acquire ret i8 %1 } @@ -1382,6 +1961,17 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b release ret i8 %1 } @@ -1417,6 +2007,17 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b acq_rel ret i8 %1 } @@ -1452,6 +2053,17 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b seq_cst ret i8 %1 } @@ -1572,6 +2184,36 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB35_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB35_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b monotonic ret i8 %1 } @@ -1698,6 +2340,36 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB36_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB36_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b acquire ret i8 %1 } @@ -1824,6 +2496,36 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB37_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB37_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b release ret i8 %1 } @@ -1956,6 +2658,36 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB38_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB38_3: # in Loop: Header=BB38_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB38_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b acq_rel ret i8 %1 } @@ -2082,6 +2814,36 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB39_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB39_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b seq_cst ret i8 %1 } @@ -2202,6 +2964,36 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB40_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB40_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b monotonic ret i8 %1 } @@ -2328,6 +3120,36 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB41_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB41_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b acquire ret i8 %1 } @@ -2454,6 +3276,36 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB42_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB42_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB42_3: # in Loop: Header=BB42_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB42_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b release ret i8 %1 } @@ -2586,6 +3438,36 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB43_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB43_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b acq_rel ret i8 %1 } @@ -2712,6 +3594,36 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 56 +; RV64IA-NEXT: srai a1, a1, 56 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB44_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB44_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b seq_cst ret i8 %1 } @@ -2823,6 +3735,31 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB45_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB45_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b monotonic ret i8 %1 } @@ -2940,6 +3877,31 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB46_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB46_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b acquire ret i8 %1 } @@ -3057,6 +4019,31 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB47_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB47_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b release ret i8 %1 } @@ -3180,6 +4167,31 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB48_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB48_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b acq_rel ret i8 %1 } @@ -3297,6 +4309,31 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a3, a1, .LBB49_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB49_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b seq_cst ret i8 %1 } @@ -3408,6 +4445,31 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB50_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB50_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b monotonic ret i8 %1 } @@ -3525,6 +4587,31 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB51_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB51_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b acquire ret i8 %1 } @@ -3642,6 +4729,31 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB52_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB52_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b release ret i8 %1 } @@ -3765,6 +4877,31 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB53_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB53_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b acq_rel ret i8 %1 } @@ -3882,6 +5019,31 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: addi a3, zero, 255 +; RV64IA-NEXT: sllw a6, a3, a2 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a3, .LBB54_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB54_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b seq_cst ret i8 %1 } @@ -3928,6 +5090,28 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB55_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b monotonic ret i16 %1 } @@ -3974,6 +5158,28 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB56_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acquire ret i16 %1 } @@ -4020,6 +5226,28 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB57_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b release ret i16 %1 } @@ -4066,6 +5294,28 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB58_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4112,6 +5362,28 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, zero, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB59_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4158,6 +5430,28 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB60_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b monotonic ret i16 %1 } @@ -4204,6 +5498,28 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB61_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b acquire ret i16 %1 } @@ -4250,6 +5566,28 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB62_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b release ret i16 %1 } @@ -4296,6 +5634,28 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB63_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4342,6 +5702,28 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB64_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4388,6 +5770,28 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB65_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b monotonic ret i16 %1 } @@ -4434,6 +5838,28 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB66_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acquire ret i16 %1 } @@ -4480,6 +5906,28 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB67_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b release ret i16 %1 } @@ -4526,6 +5974,28 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB68_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4572,6 +6042,28 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB69_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4612,6 +6104,22 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b monotonic ret i16 %1 } @@ -4652,6 +6160,22 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b acquire ret i16 %1 } @@ -4692,6 +6216,22 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b release ret i16 %1 } @@ -4732,6 +6272,22 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b acq_rel ret i16 %1 } @@ -4772,6 +6328,22 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sll a1, a1, a3 +; RV64IA-NEXT: sll a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 +; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b seq_cst ret i16 %1 } @@ -4819,6 +6391,29 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB75_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b monotonic ret i16 %1 } @@ -4866,6 +6461,29 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB76_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acquire ret i16 %1 } @@ -4913,6 +6531,29 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB77_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b release ret i16 %1 } @@ -4960,6 +6601,29 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB78_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acq_rel ret i16 %1 } @@ -5007,6 +6671,29 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 +; RV64IA-NEXT: not a5, a5 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB79_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b seq_cst ret i16 %1 } @@ -5044,6 +6731,19 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b monotonic ret i16 %1 } @@ -5081,6 +6781,19 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b acquire ret i16 %1 } @@ -5118,6 +6831,19 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b release ret i16 %1 } @@ -5155,6 +6881,19 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b acq_rel ret i16 %1 } @@ -5192,6 +6931,19 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b seq_cst ret i16 %1 } @@ -5229,6 +6981,19 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b monotonic ret i16 %1 } @@ -5266,6 +7031,19 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b acquire ret i16 %1 } @@ -5303,6 +7081,19 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b release ret i16 %1 } @@ -5340,6 +7131,19 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b acq_rel ret i16 %1 } @@ -5377,6 +7181,19 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a2, a0, 3 +; RV64IA-NEXT: slli a2, a2, 3 +; RV64IA-NEXT: sll a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b seq_cst ret i16 %1 } @@ -5498,6 +7315,37 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB90_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB90_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b monotonic ret i16 %1 } @@ -5625,6 +7473,37 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB91_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB91_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acquire ret i16 %1 } @@ -5752,6 +7631,37 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB92_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB92_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b release ret i16 %1 } @@ -5885,6 +7795,37 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB93_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB93_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acq_rel ret i16 %1 } @@ -6012,6 +7953,37 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB94_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB94_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b seq_cst ret i16 %1 } @@ -6133,6 +8105,37 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB95_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB95_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b monotonic ret i16 %1 } @@ -6260,6 +8263,37 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB96_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB96_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acquire ret i16 %1 } @@ -6387,6 +8421,37 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB97_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB97_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b release ret i16 %1 } @@ -6520,6 +8585,37 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB98_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB98_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acq_rel ret i16 %1 } @@ -6647,6 +8743,37 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 +; RV64IA-NEXT: slli a1, a1, 48 +; RV64IA-NEXT: srai a1, a1, 48 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB99_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 +; RV64IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB99_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a5, a2 +; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b seq_cst ret i16 %1 } @@ -6767,6 +8894,32 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB100_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB100_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB100_3: # in Loop: Header=BB100_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB100_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b monotonic ret i16 %1 } @@ -6893,6 +9046,32 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB101_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB101_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB101_3: # in Loop: Header=BB101_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB101_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b acquire ret i16 %1 } @@ -7019,6 +9198,32 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB102_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB102_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB102_3: # in Loop: Header=BB102_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB102_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b release ret i16 %1 } @@ -7151,6 +9356,32 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 72(sp) ; RV64I-NEXT: addi sp, sp, 80 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB103_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB103_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB103_3: # in Loop: Header=BB103_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB103_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b acq_rel ret i16 %1 } @@ -7277,6 +9508,32 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a2, a1, .LBB104_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB104_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB104_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b seq_cst ret i16 %1 } @@ -7397,6 +9654,32 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB105_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB105_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB105_3: # in Loop: Header=BB105_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB105_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b monotonic ret i16 %1 } @@ -7523,6 +9806,32 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB106_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB106_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB106_3: # in Loop: Header=BB106_1 Depth=1 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB106_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b acquire ret i16 %1 } @@ -7649,6 +9958,32 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB107_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB107_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB107_3: # in Loop: Header=BB107_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB107_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b release ret i16 %1 } @@ -7781,6 +10116,32 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 72(sp) ; RV64I-NEXT: addi sp, sp, 80 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB108_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB108_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB108_3: # in Loop: Header=BB108_1 Depth=1 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB108_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b acq_rel ret i16 %1 } @@ -7907,6 +10268,32 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: andi a3, a0, 3 +; RV64IA-NEXT: slli a3, a3, 3 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 +; RV64IA-NEXT: bgeu a1, a2, .LBB109_3 +; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB109_1 Depth=1 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB109_1 +; RV64IA-NEXT: # %bb.4: +; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b seq_cst ret i16 %1 } @@ -7936,6 +10323,11 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b monotonic ret i32 %1 } @@ -7965,6 +10357,11 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b acquire ret i32 %1 } @@ -7994,6 +10391,11 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b release ret i32 %1 } @@ -8023,6 +10425,11 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8052,6 +10459,11 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8081,6 +10493,11 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b monotonic ret i32 %1 } @@ -8110,6 +10527,11 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b acquire ret i32 %1 } @@ -8139,6 +10561,11 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b release ret i32 %1 } @@ -8168,6 +10595,11 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8197,6 +10629,11 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8227,6 +10664,12 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b monotonic ret i32 %1 } @@ -8257,6 +10700,12 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b acquire ret i32 %1 } @@ -8287,6 +10736,12 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b release ret i32 %1 } @@ -8317,6 +10772,12 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8347,6 +10808,12 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8376,6 +10843,11 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b monotonic ret i32 %1 } @@ -8405,6 +10877,11 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b acquire ret i32 %1 } @@ -8434,6 +10911,11 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b release ret i32 %1 } @@ -8463,6 +10945,11 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8492,6 +10979,11 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8528,6 +11020,18 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB130_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB130_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b monotonic ret i32 %1 } @@ -8564,6 +11068,18 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB131_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB131_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b acquire ret i32 %1 } @@ -8600,6 +11116,18 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB132_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB132_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b release ret i32 %1 } @@ -8636,6 +11164,18 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB133_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB133_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8672,6 +11212,18 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB134_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.w.aqrl a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB134_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8701,6 +11253,11 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b monotonic ret i32 %1 } @@ -8730,6 +11287,11 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b acquire ret i32 %1 } @@ -8759,6 +11321,11 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b release ret i32 %1 } @@ -8788,6 +11355,11 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8817,6 +11389,11 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i32* %a, i32 %b seq_cst ret i32 %1 } @@ -8846,6 +11423,11 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b monotonic ret i32 %1 } @@ -8875,6 +11457,11 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b acquire ret i32 %1 } @@ -8904,6 +11491,11 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b release ret i32 %1 } @@ -8933,6 +11525,11 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b acq_rel ret i32 %1 } @@ -8962,6 +11559,11 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i32* %a, i32 %b seq_cst ret i32 %1 } @@ -9048,6 +11650,11 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b monotonic ret i32 %1 } @@ -9140,6 +11747,11 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b acquire ret i32 %1 } @@ -9232,6 +11844,11 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b release ret i32 %1 } @@ -9330,6 +11947,11 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b acq_rel ret i32 %1 } @@ -9422,6 +12044,11 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i32* %a, i32 %b seq_cst ret i32 %1 } @@ -9508,6 +12135,11 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b monotonic ret i32 %1 } @@ -9600,6 +12232,11 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b acquire ret i32 %1 } @@ -9692,6 +12329,11 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b release ret i32 %1 } @@ -9790,6 +12432,11 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b acq_rel ret i32 %1 } @@ -9882,6 +12529,11 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i32* %a, i32 %b seq_cst ret i32 %1 } @@ -9968,6 +12620,11 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b monotonic ret i32 %1 } @@ -10060,6 +12717,11 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b acquire ret i32 %1 } @@ -10152,6 +12814,11 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b release ret i32 %1 } @@ -10250,6 +12917,11 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b acq_rel ret i32 %1 } @@ -10342,6 +13014,11 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i32* %a, i32 %b seq_cst ret i32 %1 } @@ -10428,6 +13105,11 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b monotonic ret i32 %1 } @@ -10520,6 +13202,11 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b acquire ret i32 %1 } @@ -10612,6 +13299,11 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b release ret i32 %1 } @@ -10710,6 +13402,11 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b acq_rel ret i32 %1 } @@ -10802,6 +13499,11 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i32_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i32* %a, i32 %b seq_cst ret i32 %1 } @@ -10836,6 +13538,11 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b monotonic ret i64 %1 } @@ -10870,6 +13577,11 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b acquire ret i64 %1 } @@ -10904,6 +13616,11 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b release ret i64 %1 } @@ -10938,6 +13655,11 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b acq_rel ret i64 %1 } @@ -10972,6 +13694,11 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xchg_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11006,6 +13733,11 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b monotonic ret i64 %1 } @@ -11040,6 +13772,11 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b acquire ret i64 %1 } @@ -11074,6 +13811,11 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b release ret i64 %1 } @@ -11108,6 +13850,11 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11142,6 +13889,11 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_add_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw add i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11176,6 +13928,12 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b monotonic ret i64 %1 } @@ -11210,6 +13968,12 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b acquire ret i64 %1 } @@ -11244,6 +14008,12 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b release ret i64 %1 } @@ -11278,6 +14048,12 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11312,6 +14088,12 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_sub_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: neg a1, a1 +; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11346,6 +14128,11 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b monotonic ret i64 %1 } @@ -11380,6 +14167,11 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b acquire ret i64 %1 } @@ -11414,6 +14206,11 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b release ret i64 %1 } @@ -11448,6 +14245,11 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11482,6 +14284,11 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_and_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoand.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw and i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11516,6 +14323,18 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB185_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB185_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b monotonic ret i64 %1 } @@ -11550,6 +14369,18 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB186_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB186_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b acquire ret i64 %1 } @@ -11584,6 +14415,18 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB187_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB187_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b release ret i64 %1 } @@ -11618,6 +14461,18 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB188_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aq a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB188_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11652,6 +14507,18 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_nand_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: .LBB189_1: # =>This Inner Loop Header: Depth=1 +; RV64IA-NEXT: lr.d.aqrl a2, (a0) +; RV64IA-NEXT: and a3, a2, a1 +; RV64IA-NEXT: not a3, a3 +; RV64IA-NEXT: sc.d.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB189_1 +; RV64IA-NEXT: # %bb.2: +; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11686,6 +14553,11 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b monotonic ret i64 %1 } @@ -11720,6 +14592,11 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b acquire ret i64 %1 } @@ -11754,6 +14631,11 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b release ret i64 %1 } @@ -11788,6 +14670,11 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11822,6 +14709,11 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_or_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw or i64* %a, i64 %b seq_cst ret i64 %1 } @@ -11856,6 +14748,11 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b monotonic ret i64 %1 } @@ -11890,6 +14787,11 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b acquire ret i64 %1 } @@ -11924,6 +14826,11 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b release ret i64 %1 } @@ -11958,6 +14865,11 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b acq_rel ret i64 %1 } @@ -11992,6 +14904,11 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_xor_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amoxor.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw xor i64* %a, i64 %b seq_cst ret i64 %1 } @@ -12148,6 +15065,11 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b monotonic ret i64 %1 } @@ -12313,6 +15235,11 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b acquire ret i64 %1 } @@ -12478,6 +15405,11 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b release ret i64 %1 } @@ -12652,6 +15584,11 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b acq_rel ret i64 %1 } @@ -12817,6 +15754,11 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_max_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomax.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw max i64* %a, i64 %b seq_cst ret i64 %1 } @@ -12975,6 +15917,11 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b monotonic ret i64 %1 } @@ -13142,6 +16089,11 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b acquire ret i64 %1 } @@ -13309,6 +16261,11 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b release ret i64 %1 } @@ -13485,6 +16442,11 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b acq_rel ret i64 %1 } @@ -13652,6 +16614,11 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_min_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomin.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw min i64* %a, i64 %b seq_cst ret i64 %1 } @@ -13808,6 +16775,11 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b monotonic ret i64 %1 } @@ -13973,6 +16945,11 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b acquire ret i64 %1 } @@ -14138,6 +17115,11 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b release ret i64 %1 } @@ -14312,6 +17294,11 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b acq_rel ret i64 %1 } @@ -14477,6 +17464,11 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umax_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amomaxu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umax i64* %a, i64 %b seq_cst ret i64 %1 } @@ -14635,6 +17627,11 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_monotonic: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b monotonic ret i64 %1 } @@ -14802,6 +17799,11 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_acquire: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.aq a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b acquire ret i64 %1 } @@ -14969,6 +17971,11 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_release: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.rl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b release ret i64 %1 } @@ -15145,6 +18152,11 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_acq_rel: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b acq_rel ret i64 %1 } @@ -15312,6 +18324,11 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret +; +; RV64IA-LABEL: atomicrmw_umin_i64_seq_cst: +; RV64IA: # %bb.0: +; RV64IA-NEXT: amominu.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: ret %1 = atomicrmw umin i64* %a, i64 %b seq_cst ret i64 %1 } |