diff options
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll | 30 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/alu32.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/blockaddress.ll | 28 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll | 547 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/div.ll | 134 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/i32-icmp.ll | 114 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/indirectbr.ll | 36 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/jumptable.ll | 63 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/mul.ll | 81 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rem.ll | 29 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rotl-rotr.ll | 38 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/sext-zext-trunc.ll | 296 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/shifts.ll | 45 |
13 files changed, 1445 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll b/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll new file mode 100644 index 00000000000..50de47d7c1f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -0,0 +1,30 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s + +; Ensure that the ISDOpcodes ADDC, ADDE, SUBC, SUBE are handled correctly + +define i64 @addc_adde(i64 %a, i64 %b) { +; RV32I-LABEL: addc_adde: +; RV32I: # BB#0: +; RV32I-NEXT: add a1, a1, a3 +; RV32I-NEXT: add a2, a0, a2 +; RV32I-NEXT: sltu a0, a2, a0 +; RV32I-NEXT: add a1, a1, a0 +; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = add i64 %a, %b + ret i64 %1 +} + +define i64 @subc_sube(i64 %a, i64 %b) { +; RV32I-LABEL: subc_sube: +; RV32I: # BB#0: +; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sltu a3, a0, a2 +; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sub a0, a0, a2 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sub i64 %a, %b + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll index 1dbcd2fd45d..9aa6058c2a0 100644 --- a/llvm/test/CodeGen/RISCV/alu32.ll +++ b/llvm/test/CodeGen/RISCV/alu32.ll @@ -2,6 +2,10 @@ ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32I +; These tests are each targeted at a particular RISC-V ALU instruction. Other +; files in this folder exercise LLVM IR instructions that don't directly match a +; RISC-V instruction + ; Register-immediate instructions define i32 @addi(i32 %a) nounwind { diff --git a/llvm/test/CodeGen/RISCV/blockaddress.ll b/llvm/test/CodeGen/RISCV/blockaddress.ll new file mode 100644 index 00000000000..f51598ff5a7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/blockaddress.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +@addr = global i8* null + +define void @test_blockaddress() nounwind { +; RV32I-LABEL: test_blockaddress: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 0(s0) +; RV32I-NEXT: lui a0, %hi(addr) +; RV32I-NEXT: addi a0, a0, %lo(addr) +; RV32I-NEXT: lui a1, %hi(.Ltmp0) +; RV32I-NEXT: addi a1, a1, %lo(.Ltmp0) +; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: jalr zero, a0, 0 +; RV32I-NEXT: .Ltmp0: # Block address taken +; RV32I-NEXT: .LBB0_1: # %block +; RV32I-NEXT: lw ra, 0(s0) +; RV32I-NEXT: jalr zero, ra, 0 + store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr + %val = load volatile i8*, i8** @addr + indirectbr i8* %val, [label %block] + +block: + ret void +} diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll new file mode 100644 index 00000000000..f10f7799fbb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -0,0 +1,547 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +declare i16 @llvm.bswap.i16(i16) +declare i32 @llvm.bswap.i32(i32) +declare i64 @llvm.bswap.i64(i64) +declare i8 @llvm.cttz.i8(i8, i1) +declare i16 @llvm.cttz.i16(i16, i1) +declare i32 @llvm.cttz.i32(i32, i1) +declare i64 @llvm.cttz.i64(i64, i1) +declare i32 @llvm.ctlz.i32(i32, i1) +declare i32 @llvm.ctpop.i32(i32) + +define i16 @test_bswap_i16(i16 %a) nounwind { +; RV32I-LABEL: test_bswap_i16: +; RV32I: # BB#0: +; RV32I-NEXT: lui a1, 4080 +; RV32I-NEXT: addi a1, a1, 0 +; RV32I-NEXT: slli a2, a0, 8 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a0, a0, 16 +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i16 @llvm.bswap.i16(i16 %a) + ret i16 %tmp +} + +define i32 @test_bswap_i32(i32 %a) nounwind { +; RV32I-LABEL: test_bswap_i32: +; RV32I: # BB#0: +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi a1, a1, -256 +; RV32I-NEXT: srli a2, a0, 8 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: srli a2, a0, 24 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: lui a2, 4080 +; RV32I-NEXT: addi a2, a2, 0 +; RV32I-NEXT: slli a3, a0, 8 +; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i32 @llvm.bswap.i32(i32 %a) + ret i32 %tmp +} + +define i64 @test_bswap_i64(i64 %a) nounwind { +; RV32I-LABEL: test_bswap_i64: +; RV32I: # BB#0: +; RV32I-NEXT: lui a2, 16 +; RV32I-NEXT: addi a3, a2, -256 +; RV32I-NEXT: srli a2, a1, 8 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: srli a4, a1, 24 +; RV32I-NEXT: or a2, a2, a4 +; RV32I-NEXT: lui a4, 4080 +; RV32I-NEXT: addi a4, a4, 0 +; RV32I-NEXT: slli a5, a1, 8 +; RV32I-NEXT: and a5, a5, a4 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: or a1, a1, a5 +; RV32I-NEXT: or a2, a1, a2 +; RV32I-NEXT: srli a1, a0, 8 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: srli a3, a0, 24 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: slli a3, a0, 8 +; RV32I-NEXT: and a3, a3, a4 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or a0, a0, a3 +; RV32I-NEXT: or a1, a0, a1 +; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i64 @llvm.bswap.i64(i64 %a) + ret i64 %tmp +} + +define i8 @test_cttz_i8(i8 %a) nounwind { +; RV32I-LABEL: test_cttz_i8: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: addi a0, zero, 8 +; RV32I-NEXT: andi a2, a1, 255 +; RV32I-NEXT: addi a3, zero, 0 +; RV32I-NEXT: beq a2, a3, .LBB3_2 +; RV32I-NEXT: jal zero, .LBB3_1 +; RV32I-NEXT: .LBB3_1: # %cond.false +; RV32I-NEXT: addi a0, a1, -1 +; RV32I-NEXT: xori a1, a1, -1 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: .LBB3_2: # %cond.end +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false) + ret i8 %tmp +} + +define i16 @test_cttz_i16(i16 %a) nounwind { +; RV32I-LABEL: test_cttz_i16: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: addi a0, zero, 16 +; RV32I-NEXT: lui a2, 16 +; RV32I-NEXT: addi a2, a2, -1 +; RV32I-NEXT: and a2, a1, a2 +; RV32I-NEXT: addi a3, zero, 0 +; RV32I-NEXT: beq a2, a3, .LBB4_2 +; RV32I-NEXT: jal zero, .LBB4_1 +; RV32I-NEXT: .LBB4_1: # %cond.false +; RV32I-NEXT: addi a0, a1, -1 +; RV32I-NEXT: xori a1, a1, -1 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: .LBB4_2: # %cond.end +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false) + ret i16 %tmp +} + +define i32 @test_cttz_i32(i32 %a) nounwind { +; RV32I-LABEL: test_cttz_i32: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: addi a2, zero, 0 +; RV32I-NEXT: beq a1, a2, .LBB5_2 +; RV32I-NEXT: jal zero, .LBB5_1 +; RV32I-NEXT: .LBB5_1: # %cond.false +; RV32I-NEXT: addi a0, a1, -1 +; RV32I-NEXT: xori a1, a1, -1 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: .LBB5_2: # %cond.end +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +define i32 @test_ctlz_i32(i32 %a) nounwind { +; RV32I-LABEL: test_ctlz_i32: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: addi a2, zero, 0 +; RV32I-NEXT: beq a1, a2, .LBB6_2 +; RV32I-NEXT: jal zero, .LBB6_1 +; RV32I-NEXT: .LBB6_1: # %cond.false +; RV32I-NEXT: srli a0, a1, 1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 8 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 16 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: .LBB6_2: # %cond.end +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +define i64 @test_cttz_i64(i64 %a) nounwind { +; RV32I-LABEL: test_cttz_i64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 28(s0) +; RV32I-NEXT: sw s1, 24(s0) +; RV32I-NEXT: sw s2, 20(s0) +; RV32I-NEXT: sw s3, 16(s0) +; RV32I-NEXT: sw s4, 12(s0) +; RV32I-NEXT: sw s5, 8(s0) +; RV32I-NEXT: sw s6, 4(s0) +; RV32I-NEXT: sw s7, 0(s0) +; RV32I-NEXT: addi s1, a1, 0 +; RV32I-NEXT: addi s2, a0, 0 +; RV32I-NEXT: addi a0, s2, -1 +; RV32I-NEXT: xori a1, s2, -1 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi s4, a1, 1365 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: and a1, a1, s4 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi s5, a1, 819 +; RV32I-NEXT: and a1, a0, s5 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, s5 +; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi s3, a1, 257 +; RV32I-NEXT: lui a1, %hi(__mulsi3) +; RV32I-NEXT: addi s6, a1, %lo(__mulsi3) +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi s7, a1, -241 +; RV32I-NEXT: and a0, a0, s7 +; RV32I-NEXT: addi a1, s3, 0 +; RV32I-NEXT: jalr ra, s6, 0 +; RV32I-NEXT: addi a1, s1, -1 +; RV32I-NEXT: xori a2, s1, -1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: srli a2, a1, 1 +; RV32I-NEXT: and a2, a2, s4 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: and a2, a1, s5 +; RV32I-NEXT: srli a1, a1, 2 +; RV32I-NEXT: and a1, a1, s5 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: srli a2, a1, 4 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: and a1, a1, s7 +; RV32I-NEXT: srli s1, a0, 24 +; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: addi a1, s3, 0 +; RV32I-NEXT: jalr ra, s6, 0 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: bne s2, a1, .LBB7_2 +; RV32I-NEXT: # BB#1: +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: addi s1, a0, 32 +; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: addi a0, s1, 0 +; RV32I-NEXT: lw s7, 0(s0) +; RV32I-NEXT: lw s6, 4(s0) +; RV32I-NEXT: lw s5, 8(s0) +; RV32I-NEXT: lw s4, 12(s0) +; RV32I-NEXT: lw s3, 16(s0) +; RV32I-NEXT: lw s2, 20(s0) +; RV32I-NEXT: lw s1, 24(s0) +; RV32I-NEXT: lw ra, 28(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false) + ret i64 %tmp +} + +define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { +; RV32I-LABEL: test_cttz_i8_zero_undef: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true) + ret i8 %tmp +} + +define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { +; RV32I-LABEL: test_cttz_i16_zero_undef: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true) + ret i16 %tmp +} + +define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { +; RV32I-LABEL: test_cttz_i32_zero_undef: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true) + ret i32 %tmp +} + +define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { +; RV32I-LABEL: test_cttz_i64_zero_undef: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 28(s0) +; RV32I-NEXT: sw s1, 24(s0) +; RV32I-NEXT: sw s2, 20(s0) +; RV32I-NEXT: sw s3, 16(s0) +; RV32I-NEXT: sw s4, 12(s0) +; RV32I-NEXT: sw s5, 8(s0) +; RV32I-NEXT: sw s6, 4(s0) +; RV32I-NEXT: sw s7, 0(s0) +; RV32I-NEXT: addi s1, a1, 0 +; RV32I-NEXT: addi s2, a0, 0 +; RV32I-NEXT: addi a0, s2, -1 +; RV32I-NEXT: xori a1, s2, -1 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi s4, a1, 1365 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: and a1, a1, s4 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi s5, a1, 819 +; RV32I-NEXT: and a1, a0, s5 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, s5 +; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi s3, a1, 257 +; RV32I-NEXT: lui a1, %hi(__mulsi3) +; RV32I-NEXT: addi s6, a1, %lo(__mulsi3) +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi s7, a1, -241 +; RV32I-NEXT: and a0, a0, s7 +; RV32I-NEXT: addi a1, s3, 0 +; RV32I-NEXT: jalr ra, s6, 0 +; RV32I-NEXT: addi a1, s1, -1 +; RV32I-NEXT: xori a2, s1, -1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: srli a2, a1, 1 +; RV32I-NEXT: and a2, a2, s4 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: and a2, a1, s5 +; RV32I-NEXT: srli a1, a1, 2 +; RV32I-NEXT: and a1, a1, s5 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: srli a2, a1, 4 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: and a1, a1, s7 +; RV32I-NEXT: srli s1, a0, 24 +; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: addi a1, s3, 0 +; RV32I-NEXT: jalr ra, s6, 0 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: bne s2, a1, .LBB11_2 +; RV32I-NEXT: # BB#1: +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: addi s1, a0, 32 +; RV32I-NEXT: .LBB11_2: +; RV32I-NEXT: addi a0, s1, 0 +; RV32I-NEXT: lw s7, 0(s0) +; RV32I-NEXT: lw s6, 4(s0) +; RV32I-NEXT: lw s5, 8(s0) +; RV32I-NEXT: lw s4, 12(s0) +; RV32I-NEXT: lw s3, 16(s0) +; RV32I-NEXT: lw s2, 20(s0) +; RV32I-NEXT: lw s1, 24(s0) +; RV32I-NEXT: lw ra, 28(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true) + ret i64 %tmp +} + +define i32 @test_ctpop_i32(i32 %a) nounwind { +; RV32I-LABEL: test_ctpop_i32: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: lui a1, 209715 +; RV32I-NEXT: addi a1, a1, 819 +; RV32I-NEXT: and a2, a0, a1 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi a1, a1, -241 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a1, 4112 +; RV32I-NEXT: addi a1, a1, 257 +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = call i32 @llvm.ctpop.i32(i32 %a) + ret i32 %1 +} diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll new file mode 100644 index 00000000000..4c0f5de0358 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/div.ll @@ -0,0 +1,134 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +define i32 @udiv(i32 %a, i32 %b) { +; RV32I-LABEL: udiv: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__udivsi3) +; RV32I-NEXT: addi a2, a2, %lo(__udivsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = udiv i32 %a, %b + ret i32 %1 +} + +define i32 @udiv_constant(i32 %a) { +; RV32I-LABEL: udiv_constant: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a1, %hi(__udivsi3) +; RV32I-NEXT: addi a2, a1, %lo(__udivsi3) +; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = udiv i32 %a, 5 + ret i32 %1 +} + +define i32 @udiv_pow2(i32 %a) { +; RV32I-LABEL: udiv_pow2: +; RV32I: # BB#0: +; RV32I-NEXT: srli a0, a0, 3 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = udiv i32 %a, 8 + ret i32 %1 +} + +define i64 @udiv64(i64 %a, i64 %b) { +; RV32I-LABEL: udiv64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a4, %hi(__udivdi3) +; RV32I-NEXT: addi a4, a4, %lo(__udivdi3) +; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = udiv i64 %a, %b + ret i64 %1 +} + +define i64 @udiv64_constant(i64 %a) { +; RV32I-LABEL: udiv64_constant: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__udivdi3) +; RV32I-NEXT: addi a4, a2, %lo(__udivdi3) +; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: addi a3, zero, 0 +; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = udiv i64 %a, 5 + ret i64 %1 +} + +define i32 @sdiv(i32 %a, i32 %b) { +; RV32I-LABEL: sdiv: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__divsi3) +; RV32I-NEXT: addi a2, a2, %lo(__divsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sdiv i32 %a, %b + ret i32 %1 +} + +define i32 @sdiv_constant(i32 %a) { +; RV32I-LABEL: sdiv_constant: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a1, %hi(__divsi3) +; RV32I-NEXT: addi a2, a1, %lo(__divsi3) +; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sdiv i32 %a, 5 + ret i32 %1 +} + +define i32 @sdiv_pow2(i32 %a) { +; RV32I-LABEL: sdiv_pow2: +; RV32I: # BB#0: +; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: srli a1, a1, 29 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: srai a0, a0, 3 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sdiv i32 %a, 8 + ret i32 %1 +} + +define i64 @sdiv64(i64 %a, i64 %b) { +; RV32I-LABEL: sdiv64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a4, %hi(__divdi3) +; RV32I-NEXT: addi a4, a4, %lo(__divdi3) +; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sdiv i64 %a, %b + ret i64 %1 +} + +define i64 @sdiv64_constant(i64 %a) { +; RV32I-LABEL: sdiv64_constant: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__divdi3) +; RV32I-NEXT: addi a4, a2, %lo(__divdi3) +; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: addi a3, zero, 0 +; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sdiv i64 %a, 5 + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll new file mode 100644 index 00000000000..4d86ced2584 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +; TODO: check the generated instructions for the equivalent of seqz, snez, +; sltz, sgtz map to something simple + +define i32 @icmp_eq(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_eq: +; RV32I: # BB#0: +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sltiu a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp eq i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ne(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_ne: +; RV32I: # BB#0: +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sltu a0, zero, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp ne i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_ugt: +; RV32I: # BB#0: +; RV32I-NEXT: sltu a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp ugt i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_uge(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_uge: +; RV32I: # BB#0: +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp uge i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ult(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_ult: +; RV32I: # BB#0: +; RV32I-NEXT: sltu a0, a0, a1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp ult i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ule(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_ule: +; RV32I: # BB#0: +; RV32I-NEXT: sltu a0, a1, a0 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp ule i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_sgt: +; RV32I: # BB#0: +; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp sgt i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sge(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_sge: +; RV32I: # BB#0: +; RV32I-NEXT: slt a0, a0, a1 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp sge i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_slt(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_slt: +; RV32I: # BB#0: +; RV32I-NEXT: slt a0, a0, a1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp slt i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sle(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: icmp_sle: +; RV32I: # BB#0: +; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = icmp sle i32 %a, %b + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +; TODO: check variants with an immediate? diff --git a/llvm/test/CodeGen/RISCV/indirectbr.ll b/llvm/test/CodeGen/RISCV/indirectbr.ll new file mode 100644 index 00000000000..0a51e3d0b2e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/indirectbr.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +define i32 @indirectbr(i8* %target) nounwind { +; RV32I-LABEL: indirectbr: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 0(s0) +; RV32I-NEXT: jalr zero, a0, 0 +; RV32I-NEXT: .LBB0_1: # %ret +; RV32I-NEXT: addi a0, zero, 0 +; RV32I-NEXT: lw ra, 0(s0) +; RV32I-NEXT: jalr zero, ra, 0 + indirectbr i8* %target, [label %test_label] +test_label: + br label %ret +ret: + ret i32 0 +} + +define i32 @indirectbr_with_offset(i8* %a) nounwind { +; RV32I-LABEL: indirectbr_with_offset: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 0(s0) +; RV32I-NEXT: jalr zero, a0, 1380 +; RV32I-NEXT: .LBB1_1: # %ret +; RV32I-NEXT: addi a0, zero, 0 +; RV32I-NEXT: lw ra, 0(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %target = getelementptr inbounds i8, i8* %a, i32 1380 + indirectbr i8* %target, [label %test_label] +test_label: + br label %ret +ret: + ret i32 0 +} diff --git a/llvm/test/CodeGen/RISCV/jumptable.ll b/llvm/test/CodeGen/RISCV/jumptable.ll new file mode 100644 index 00000000000..98144c7c1e6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/jumptable.ll @@ -0,0 +1,63 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +define void @jt(i32 %in, i32* %out) { +; RV32I-LABEL: jt: +; RV32I: # BB#0: # %entry +; RV32I-NEXT: addi a2, zero, 2 +; RV32I-NEXT: blt a2, a0, .LBB0_3 +; RV32I-NEXT: jal zero, .LBB0_1 +; RV32I-NEXT: .LBB0_1: # %entry +; RV32I-NEXT: addi a3, zero, 1 +; RV32I-NEXT: beq a0, a3, .LBB0_5 +; RV32I-NEXT: jal zero, .LBB0_2 +; RV32I-NEXT: .LBB0_2: # %entry +; RV32I-NEXT: beq a0, a2, .LBB0_6 +; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: .LBB0_6: # %bb2 +; RV32I-NEXT: addi a0, zero, 3 +; RV32I-NEXT: sw a0, 0(a1) +; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: .LBB0_3: # %entry +; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: beq a0, a3, .LBB0_7 +; RV32I-NEXT: jal zero, .LBB0_4 +; RV32I-NEXT: .LBB0_4: # %entry +; RV32I-NEXT: addi a2, zero, 4 +; RV32I-NEXT: beq a0, a2, .LBB0_8 +; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: .LBB0_8: # %bb4 +; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: sw a0, 0(a1) +; RV32I-NEXT: .LBB0_9: # %exit +; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: .LBB0_5: # %bb1 +; RV32I-NEXT: addi a0, zero, 4 +; RV32I-NEXT: sw a0, 0(a1) +; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: .LBB0_7: # %bb3 +; RV32I-NEXT: sw a2, 0(a1) +; RV32I-NEXT: jal zero, .LBB0_9 +entry: + switch i32 %in, label %exit [ + i32 1, label %bb1 + i32 2, label %bb2 + i32 3, label %bb3 + i32 4, label %bb4 + ] +bb1: + store i32 4, i32* %out + br label %exit +bb2: + store i32 3, i32* %out + br label %exit +bb3: + store i32 2, i32* %out + br label %exit +bb4: + store i32 1, i32* %out + br label %exit +exit: + ret void +} diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll new file mode 100644 index 00000000000..41653256deb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -0,0 +1,81 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +define i32 @square(i32 %a) { +; RV32I-LABEL: square: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a1, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) +; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = mul i32 %a, %a + ret i32 %1 +} + +define i32 @mul(i32 %a, i32 %b) { +; RV32I-LABEL: mul: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = mul i32 %a, %b + ret i32 %1 +} + +define i32 @mul_constant(i32 %a) { +; RV32I-LABEL: mul_constant: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a1, %hi(__mulsi3) +; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) +; RV32I-NEXT: addi a1, zero, 5 +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = mul i32 %a, 5 + ret i32 %1 +} + +define i32 @mul_pow2(i32 %a) { +; RV32I-LABEL: mul_pow2: +; RV32I: # BB#0: +; RV32I-NEXT: slli a0, a0, 3 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = mul i32 %a, 8 + ret i32 %1 +} + +define i64 @mul64(i64 %a, i64 %b) { +; RV32I-LABEL: mul64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a4, %hi(__muldi3) +; RV32I-NEXT: addi a4, a4, %lo(__muldi3) +; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = mul i64 %a, %b + ret i64 %1 +} + +define i64 @mul64_constant(i64 %a) { +; RV32I-LABEL: mul64_constant: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__muldi3) +; RV32I-NEXT: addi a4, a2, %lo(__muldi3) +; RV32I-NEXT: addi a2, zero, 5 +; RV32I-NEXT: addi a3, zero, 0 +; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = mul i64 %a, 5 + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll new file mode 100644 index 00000000000..80f79817b74 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rem.ll @@ -0,0 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +define i32 @urem(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: urem: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__umodsi3) +; RV32I-NEXT: addi a2, a2, %lo(__umodsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = urem i32 %a, %b + ret i32 %1 +} + +define i32 @srem(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: srem: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a2, %hi(__modsi3) +; RV32I-NEXT: addi a2, a2, %lo(__modsi3) +; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = srem i32 %a, %b + ret i32 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll new file mode 100644 index 00000000000..bf0689feafa --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -0,0 +1,38 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +; These IR sequences will generate ISD::ROTL and ISD::ROTR nodes, that the +; RISC-V backend must be able to select + +define i32 @rotl(i32 %x, i32 %y) { +; RV32I-LABEL: rotl: +; RV32I: # BB#0: +; RV32I-NEXT: addi a2, zero, 32 +; RV32I-NEXT: sub a2, a2, a1 +; RV32I-NEXT: sll a1, a0, a1 +; RV32I-NEXT: srl a0, a0, a2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %z = sub i32 32, %y + %b = shl i32 %x, %y + %c = lshr i32 %x, %z + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotr(i32 %x, i32 %y) { +; RV32I-LABEL: rotr: +; RV32I: # BB#0: +; RV32I-NEXT: addi a2, zero, 32 +; RV32I-NEXT: sub a2, a2, a1 +; RV32I-NEXT: srl a1, a0, a1 +; RV32I-NEXT: sll a0, a0, a2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %z = sub i32 32, %y + %b = lshr i32 %x, %y + %c = shl i32 %x, %z + %d = or i32 %b, %c + ret i32 %d +} diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll new file mode 100644 index 00000000000..1855d55b4df --- /dev/null +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -0,0 +1,296 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +; FIXME: an unncessary register is allocated just to store 0. X0 should be +; used instead + +define i8 @sext_i1_to_i8(i1 %a) { +; TODO: the addi that stores 0 in t1 is unnecessary +; RV32I-LABEL: sext_i1_to_i8: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i1 %a to i8 + ret i8 %1 +} + +define i16 @sext_i1_to_i16(i1 %a) { +; TODO: the addi that stores 0 in t1 is unnecessary +; RV32I-LABEL: sext_i1_to_i16: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i1 %a to i16 + ret i16 %1 +} + +define i32 @sext_i1_to_i32(i1 %a) { +; TODO: the addi that stores 0 in t1 is unnecessary +; RV32I-LABEL: sext_i1_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i1 %a to i32 + ret i32 %1 +} + +define i64 @sext_i1_to_i64(i1 %a) { +; TODO: the addi that stores 0 in t1 is unnecessary +; RV32I-LABEL: sext_i1_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i1 %a to i64 + ret i64 %1 +} + +define i16 @sext_i8_to_i16(i8 %a) { +; RV32I-LABEL: sext_i8_to_i16: +; RV32I: # BB#0: +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i8 %a to i16 + ret i16 %1 +} + +define i32 @sext_i8_to_i32(i8 %a) { +; RV32I-LABEL: sext_i8_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i8 %a to i32 + ret i32 %1 +} + +define i64 @sext_i8_to_i64(i8 %a) { +; RV32I-LABEL: sext_i8_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a0, a1, 24 +; RV32I-NEXT: srai a1, a1, 31 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i8 %a to i64 + ret i64 %1 +} + +define i32 @sext_i16_to_i32(i16 %a) { +; RV32I-LABEL: sext_i16_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i16 %a to i32 + ret i32 %1 +} + +define i64 @sext_i16_to_i64(i16 %a) { +; RV32I-LABEL: sext_i16_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a0, a1, 16 +; RV32I-NEXT: srai a1, a1, 31 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i16 %a to i64 + ret i64 %1 +} + +define i64 @sext_i32_to_i64(i32 %a) { +; RV32I-LABEL: sext_i32_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = sext i32 %a to i64 + ret i64 %1 +} + +define i8 @zext_i1_to_i8(i1 %a) { +; RV32I-LABEL: zext_i1_to_i8: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i1 %a to i8 + ret i8 %1 +} + +define i16 @zext_i1_to_i16(i1 %a) { +; RV32I-LABEL: zext_i1_to_i16: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i1 %a to i16 + ret i16 %1 +} + +define i32 @zext_i1_to_i32(i1 %a) { +; RV32I-LABEL: zext_i1_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i1 %a to i32 + ret i32 %1 +} + +define i64 @zext_i1_to_i64(i1 %a) { +; RV32I-LABEL: zext_i1_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i1 %a to i64 + ret i64 %1 +} + +define i16 @zext_i8_to_i16(i8 %a) { +; RV32I-LABEL: zext_i8_to_i16: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i8 %a to i16 + ret i16 %1 +} + +define i32 @zext_i8_to_i32(i8 %a) { +; RV32I-LABEL: zext_i8_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i8 %a to i32 + ret i32 %1 +} + +define i64 @zext_i8_to_i64(i8 %a) { +; RV32I-LABEL: zext_i8_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i8 %a to i64 + ret i64 %1 +} + +define i32 @zext_i16_to_i32(i16 %a) { +; RV32I-LABEL: zext_i16_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i16 %a to i32 + ret i32 %1 +} + +define i64 @zext_i16_to_i64(i16 %a) { +; RV32I-LABEL: zext_i16_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i16 %a to i64 + ret i64 %1 +} + +define i64 @zext_i32_to_i64(i32 %a) { +; RV32I-LABEL: zext_i32_to_i64: +; RV32I: # BB#0: +; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: jalr zero, ra, 0 + %1 = zext i32 %a to i64 + ret i64 %1 +} + +; TODO: should the trunc tests explicitly ensure no code is generated before +; jalr? + +define i1 @trunc_i8_to_i1(i8 %a) { +; RV32I-LABEL: trunc_i8_to_i1: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i8 %a to i1 + ret i1 %1 +} + +define i1 @trunc_i16_to_i1(i16 %a) { +; RV32I-LABEL: trunc_i16_to_i1: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i16 %a to i1 + ret i1 %1 +} + +define i1 @trunc_i32_to_i1(i32 %a) { +; RV32I-LABEL: trunc_i32_to_i1: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i32 %a to i1 + ret i1 %1 +} + +define i1 @trunc_i64_to_i1(i64 %a) { +; RV32I-LABEL: trunc_i64_to_i1: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i64 %a to i1 + ret i1 %1 +} + +define i8 @trunc_i16_to_i8(i16 %a) { +; RV32I-LABEL: trunc_i16_to_i8: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i16 %a to i8 + ret i8 %1 +} + +define i8 @trunc_i32_to_i8(i32 %a) { +; RV32I-LABEL: trunc_i32_to_i8: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i32 %a to i8 + ret i8 %1 +} + +define i8 @trunc_i64_to_i8(i64 %a) { +; RV32I-LABEL: trunc_i64_to_i8: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i64 %a to i8 + ret i8 %1 +} + +define i16 @trunc_i32_to_i16(i32 %a) { +; RV32I-LABEL: trunc_i32_to_i16: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i32 %a to i16 + ret i16 %1 +} + +define i16 @trunc_i64_to_i16(i64 %a) { +; RV32I-LABEL: trunc_i64_to_i16: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i64 %a to i16 + ret i16 %1 +} + +define i32 @trunc_i64_to_i32(i64 %a) { +; RV32I-LABEL: trunc_i64_to_i32: +; RV32I: # BB#0: +; RV32I-NEXT: jalr zero, ra, 0 + %1 = trunc i64 %a to i32 + ret i32 %1 +} diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll new file mode 100644 index 00000000000..d773a6ad62a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -0,0 +1,45 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I + +; Basic shift support is tested as part of ALU.ll. This file ensures that +; shifts which may not be supported natively are lowered properly. + +define i64 @lshr64(i64 %a, i64 %b) nounwind { +; RV32I-LABEL: lshr64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a3, %hi(__lshrdi3) +; RV32I-NEXT: addi a3, a3, %lo(__lshrdi3) +; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = lshr i64 %a, %b + ret i64 %1 +} + +define i64 @ashr64(i64 %a, i64 %b) nounwind { +; RV32I-LABEL: ashr64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a3, %hi(__ashrdi3) +; RV32I-NEXT: addi a3, a3, %lo(__ashrdi3) +; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = ashr i64 %a, %b + ret i64 %1 +} + +define i64 @shl64(i64 %a, i64 %b) nounwind { +; RV32I-LABEL: shl64: +; RV32I: # BB#0: +; RV32I-NEXT: sw ra, 12(s0) +; RV32I-NEXT: lui a3, %hi(__ashldi3) +; RV32I-NEXT: addi a3, a3, %lo(__ashldi3) +; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: lw ra, 12(s0) +; RV32I-NEXT: jalr zero, ra, 0 + %1 = shl i64 %a, %b + ret i64 %1 +} |