diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/urem-lkk.ll')
| -rw-r--r-- | llvm/test/CodeGen/RISCV/urem-lkk.ll | 68 |
1 files changed, 7 insertions, 61 deletions
diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll index 374ce07b2ac..5286ad50786 100644 --- a/llvm/test/CodeGen/RISCV/urem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll @@ -8,19 +8,15 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=CHECK,RV64IM %s -define i32 @fold_urem_positive_odd(i32 %x) { +define i32 @fold_urem_positive_odd(i32 %x) nounwind { ; RV32I-LABEL: fold_urem_positive_odd: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a1, zero, 95 ; RV32I-NEXT: call __umodsi3 ; RV32I-NEXT: lw ra, 12(sp) -; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: fold_urem_positive_odd: @@ -35,23 +31,18 @@ define i32 @fold_urem_positive_odd(i32 %x) { ; RV32IM-NEXT: addi a2, zero, 95 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 -; RV32IM-NEXT: .cfi_def_cfa_offset 0 ; RV32IM-NEXT: ret ; ; RV64I-LABEL: fold_urem_positive_odd: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) -; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: addi a1, zero, 95 ; RV64I-NEXT: call __umoddi3 ; RV64I-NEXT: ld ra, 8(sp) -; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 -; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: fold_urem_positive_odd: @@ -74,26 +65,21 @@ define i32 @fold_urem_positive_odd(i32 %x) { ; RV64IM-NEXT: addi a2, zero, 95 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: sub a0, a0, a1 -; RV64IM-NEXT: .cfi_def_cfa_offset 0 ; RV64IM-NEXT: ret %1 = urem i32 %x, 95 ret i32 %1 } -define i32 @fold_urem_positive_even(i32 %x) { +define i32 @fold_urem_positive_even(i32 %x) nounwind { ; RV32I-LABEL: fold_urem_positive_even: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a1, zero, 1060 ; RV32I-NEXT: call __umodsi3 ; RV32I-NEXT: lw ra, 12(sp) -; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: fold_urem_positive_even: @@ -105,23 +91,18 @@ define i32 @fold_urem_positive_even(i32 %x) { ; RV32IM-NEXT: addi a2, zero, 1060 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: sub a0, a0, a1 -; RV32IM-NEXT: .cfi_def_cfa_offset 0 ; RV32IM-NEXT: ret ; ; RV64I-LABEL: fold_urem_positive_even: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) -; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: addi a1, zero, 1060 ; RV64I-NEXT: call __umoddi3 ; RV64I-NEXT: ld ra, 8(sp) -; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 -; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: fold_urem_positive_even: @@ -141,7 +122,6 @@ define i32 @fold_urem_positive_even(i32 %x) { ; RV64IM-NEXT: addi a2, zero, 1060 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: sub a0, a0, a1 -; RV64IM-NEXT: .cfi_def_cfa_offset 0 ; RV64IM-NEXT: ret %1 = urem i32 %x, 1060 ret i32 %1 @@ -149,17 +129,13 @@ define i32 @fold_urem_positive_even(i32 %x) { ; Don't fold if we can combine urem with udiv. -define i32 @combine_urem_udiv(i32 %x) { +define i32 @combine_urem_udiv(i32 %x) nounwind { ; RV32I-LABEL: combine_urem_udiv: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: sw s1, 4(sp) -; RV32I-NEXT: .cfi_offset ra, -4 -; RV32I-NEXT: .cfi_offset s0, -8 -; RV32I-NEXT: .cfi_offset s1, -12 ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: addi a1, zero, 95 ; RV32I-NEXT: call __umodsi3 @@ -171,11 +147,7 @@ define i32 @combine_urem_udiv(i32 %x) { ; RV32I-NEXT: lw s1, 4(sp) ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) -; RV32I-NEXT: .cfi_restore ra -; RV32I-NEXT: .cfi_restore s0 -; RV32I-NEXT: .cfi_restore s1 ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: combine_urem_udiv: @@ -191,19 +163,14 @@ define i32 @combine_urem_udiv(i32 %x) { ; RV32IM-NEXT: mul a2, a1, a2 ; RV32IM-NEXT: sub a0, a0, a2 ; RV32IM-NEXT: add a0, a0, a1 -; RV32IM-NEXT: .cfi_def_cfa_offset 0 ; RV32IM-NEXT: ret ; ; RV64I-LABEL: combine_urem_udiv: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -32 -; RV64I-NEXT: .cfi_def_cfa_offset 32 ; RV64I-NEXT: sd ra, 24(sp) ; RV64I-NEXT: sd s0, 16(sp) ; RV64I-NEXT: sd s1, 8(sp) -; RV64I-NEXT: .cfi_offset ra, -8 -; RV64I-NEXT: .cfi_offset s0, -16 -; RV64I-NEXT: .cfi_offset s1, -24 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli s0, a0, 32 ; RV64I-NEXT: addi a1, zero, 95 @@ -217,11 +184,7 @@ define i32 @combine_urem_udiv(i32 %x) { ; RV64I-NEXT: ld s1, 8(sp) ; RV64I-NEXT: ld s0, 16(sp) ; RV64I-NEXT: ld ra, 24(sp) -; RV64I-NEXT: .cfi_restore ra -; RV64I-NEXT: .cfi_restore s0 -; RV64I-NEXT: .cfi_restore s1 ; RV64I-NEXT: addi sp, sp, 32 -; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: combine_urem_udiv: @@ -245,7 +208,6 @@ define i32 @combine_urem_udiv(i32 %x) { ; RV64IM-NEXT: mul a2, a1, a2 ; RV64IM-NEXT: sub a0, a0, a2 ; RV64IM-NEXT: add a0, a0, a1 -; RV64IM-NEXT: .cfi_def_cfa_offset 0 ; RV64IM-NEXT: ret %1 = urem i32 %x, 95 %2 = udiv i32 %x, 95 @@ -254,81 +216,66 @@ define i32 @combine_urem_udiv(i32 %x) { } ; Don't fold for divisors that are a power of two. -define i32 @dont_fold_urem_power_of_two(i32 %x) { +define i32 @dont_fold_urem_power_of_two(i32 %x) nounwind { ; CHECK-LABEL: dont_fold_urem_power_of_two: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 63 -; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = urem i32 %x, 64 ret i32 %1 } ; Don't fold if the divisor is one. -define i32 @dont_fold_urem_one(i32 %x) { +define i32 @dont_fold_urem_one(i32 %x) nounwind { ; CHECK-LABEL: dont_fold_urem_one: ; CHECK: # %bb.0: ; CHECK-NEXT: mv a0, zero -; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = urem i32 %x, 1 ret i32 %1 } ; Don't fold if the divisor is 2^32. -define i32 @dont_fold_urem_i32_umax(i32 %x) { +define i32 @dont_fold_urem_i32_umax(i32 %x) nounwind { ; CHECK-LABEL: dont_fold_urem_i32_umax: ; CHECK: # %bb.0: -; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = urem i32 %x, 4294967296 ret i32 %1 } ; Don't fold i64 urem -define i64 @dont_fold_urem_i64(i64 %x) { +define i64 @dont_fold_urem_i64(i64 %x) nounwind { ; RV32I-LABEL: dont_fold_urem_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 98 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: call __umoddi3 ; RV32I-NEXT: lw ra, 12(sp) -; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: dont_fold_urem_i64: ; RV32IM: # %bb.0: ; RV32IM-NEXT: addi sp, sp, -16 -; RV32IM-NEXT: .cfi_def_cfa_offset 16 ; RV32IM-NEXT: sw ra, 12(sp) -; RV32IM-NEXT: .cfi_offset ra, -4 ; RV32IM-NEXT: addi a2, zero, 98 ; RV32IM-NEXT: mv a3, zero ; RV32IM-NEXT: call __umoddi3 ; RV32IM-NEXT: lw ra, 12(sp) -; RV32IM-NEXT: .cfi_restore ra ; RV32IM-NEXT: addi sp, sp, 16 -; RV32IM-NEXT: .cfi_def_cfa_offset 0 ; RV32IM-NEXT: ret ; ; RV64I-LABEL: dont_fold_urem_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) -; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a1, zero, 98 ; RV64I-NEXT: call __umoddi3 ; RV64I-NEXT: ld ra, 8(sp) -; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 -; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: dont_fold_urem_i64: @@ -347,7 +294,6 @@ define i64 @dont_fold_urem_i64(i64 %x) { ; RV64IM-NEXT: addi a2, zero, 98 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: sub a0, a0, a1 -; RV64IM-NEXT: .cfi_def_cfa_offset 0 ; RV64IM-NEXT: ret %1 = urem i64 %x, 98 ret i64 %1 |

