summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorAlexei Starovoitov <alexei.starovoitov@gmail.com>2015-01-24 17:51:26 +0000
committerAlexei Starovoitov <alexei.starovoitov@gmail.com>2015-01-24 17:51:26 +0000
commite4c8c807bb609daa9be3fb9977703355b119fe8c (patch)
treeaf604a0de6c3d8b3cd068fa4364fe54bad7122f1 /llvm/test/CodeGen
parent9fa8c9984c78dccf82cbfda8d261f0c43f9018c5 (diff)
downloadbcm5719-llvm-e4c8c807bb609daa9be3fb9977703355b119fe8c.tar.gz
bcm5719-llvm-e4c8c807bb609daa9be3fb9977703355b119fe8c.zip
BPF backend
Summary: V8->V9: - cleanup tests V7->V8: - addressed feedback from David: - switched to range-based 'for' loops - fixed formatting of tests V6->V7: - rebased and adjusted AsmPrinter args - CamelCased .td, fixed formatting, cleaned up names, removed unused patterns - diffstat: 3 files changed, 203 insertions(+), 227 deletions(-) V5->V6: - addressed feedback from Chandler: - reinstated full verbose standard banner in all files - fixed variables that were not in CamelCase - fixed names of #ifdef in header files - removed redundant braces in if/else chains with single statements - fixed comments - removed trailing empty line - dropped debug annotations from tests - diffstat of these changes: 46 files changed, 456 insertions(+), 469 deletions(-) V4->V5: - fix setLoadExtAction() interface - clang-formated all where it made sense V3->V4: - added CODE_OWNERS entry for BPF backend V2->V3: - fix metadata in tests V1->V2: - addressed feedback from Tom and Matt - removed top level change to configure (now everything via 'experimental-backend') - reworked error reporting via DiagnosticInfo (similar to R600) - added few more tests - added cmake build - added Triple::bpf - tested on linux and darwin V1 cover letter: --------------------- recently linux gained "universal in-kernel virtual machine" which is called eBPF or extended BPF. The name comes from "Berkeley Packet Filter", since new instruction set is based on it. This patch adds a new backend that emits extended BPF instruction set. The concept and development are covered by the following articles: http://lwn.net/Articles/599755/ http://lwn.net/Articles/575531/ http://lwn.net/Articles/603983/ http://lwn.net/Articles/606089/ http://lwn.net/Articles/612878/ One of use cases: dtrace/systemtap alternative. bpf syscall manpage: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=b4fc1a460f3017e958e6a8ea560ea0afd91bf6fe instruction set description and differences vs classic BPF: http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/networking/filter.txt Short summary of instruction set: - 64-bit registers R0 - return value from in-kernel function, and exit value for BPF program R1 - R5 - arguments from BPF program to in-kernel function R6 - R9 - callee saved registers that in-kernel function will preserve R10 - read-only frame pointer to access stack - two-operand instructions like +, -, *, mov, load/store - implicit prologue/epilogue (invisible stack pointer) - no floating point, no simd Short history of extended BPF in kernel: interpreter in 3.15, x64 JIT in 3.16, arm64 JIT, verifier, bpf syscall in 3.18, more to come in the future. It's a very small and simple backend. There is no support for global variables, arbitrary function calls, floating point, varargs, exceptions, indirect jumps, arbitrary pointer arithmetic, alloca, etc. From C front-end point of view it's very restricted. It's done on purpose, since kernel rejects all programs that it cannot prove safe. It rejects programs with loops and with memory accesses via arbitrary pointers. When kernel accepts the program it is guaranteed that program will terminate and will not crash the kernel. This patch implements all 'must have' bits. There are several things on TODO list, so this is not the end of development. Most of the code is a boiler plate code, copy-pasted from other backends. Only odd things are lack or < and <= instructions, specialized load_byte intrinsics and 'compare and goto' as single instruction. Current instruction set is fixed, but more instructions can be added in the future. Signed-off-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Subscribers: majnemer, chandlerc, echristo, joerg, pete, rengolin, kristof.beyls, arsenm, t.p.northover, tstellarAMD, aemerson, llvm-commits Differential Revision: http://reviews.llvm.org/D6494 llvm-svn: 227008
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/BPF/alu8.ll46
-rw-r--r--llvm/test/CodeGen/BPF/atomics.ll20
-rw-r--r--llvm/test/CodeGen/BPF/basictest.ll28
-rw-r--r--llvm/test/CodeGen/BPF/byval.ll27
-rw-r--r--llvm/test/CodeGen/BPF/cc_args.ll96
-rw-r--r--llvm/test/CodeGen/BPF/cc_ret.ll48
-rw-r--r--llvm/test/CodeGen/BPF/cmp.ll119
-rw-r--r--llvm/test/CodeGen/BPF/ex1.ll46
-rw-r--r--llvm/test/CodeGen/BPF/intrinsics.ll50
-rw-r--r--llvm/test/CodeGen/BPF/load.ll43
-rw-r--r--llvm/test/CodeGen/BPF/loops.ll111
-rw-r--r--llvm/test/CodeGen/BPF/many_args1.ll12
-rw-r--r--llvm/test/CodeGen/BPF/many_args2.ll15
-rw-r--r--llvm/test/CodeGen/BPF/sanity.ll117
-rw-r--r--llvm/test/CodeGen/BPF/setcc.ll99
-rw-r--r--llvm/test/CodeGen/BPF/shifts.ll101
-rw-r--r--llvm/test/CodeGen/BPF/sockex2.ll326
-rw-r--r--llvm/test/CodeGen/BPF/struct_ret1.ll17
-rw-r--r--llvm/test/CodeGen/BPF/struct_ret2.ll12
-rw-r--r--llvm/test/CodeGen/BPF/vararg1.ll9
20 files changed, 1342 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/BPF/alu8.ll b/llvm/test/CodeGen/BPF/alu8.ll
new file mode 100644
index 00000000000..0233225f81b
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/alu8.ll
@@ -0,0 +1,46 @@
+; RUN: llc -march=bpf -show-mc-encoding < %s | FileCheck %s
+; test little endian only for now
+
+define i8 @mov(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: mov:
+; CHECK: mov r0, r2 # encoding: [0xbf,0x20,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: ret # encoding: [0x95,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+ ret i8 %b
+}
+
+define i8 @add(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: add:
+; CHECK: add r1, r2 # encoding: [0x0f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: mov r0, r1 # encoding: [0xbf,0x10,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = add i8 %a, %b
+ ret i8 %1
+}
+
+define i8 @and(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: and:
+; CHECK: and r1, r2 # encoding: [0x5f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = and i8 %a, %b
+ ret i8 %1
+}
+
+define i8 @bis(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: bis:
+; CHECK: or r1, r2 # encoding: [0x4f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = or i8 %a, %b
+ ret i8 %1
+}
+
+define i8 @xorand(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: xorand:
+; CHECK: xori r2, -1 # encoding: [0xa7,0x02,0x00,0x00,0xff,0xff,0xff,0xff]
+ %1 = xor i8 %b, -1
+ %2 = and i8 %a, %1
+ ret i8 %2
+}
+
+define i8 @xor(i8 %a, i8 %b) nounwind {
+; CHECK-LABEL: xor:
+; CHECK: xor r1, r2 # encoding: [0xaf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %1 = xor i8 %a, %b
+ ret i8 %1
+}
diff --git a/llvm/test/CodeGen/BPF/atomics.ll b/llvm/test/CodeGen/BPF/atomics.ll
new file mode 100644
index 00000000000..2f9730dddde
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/atomics.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+; CHECK-LABEL: test_load_add_32
+; CHECK: xadd32
+; CHECK: encoding: [0xc3
+define void @test_load_add_32(i32* %p, i32 zeroext %v) {
+entry:
+ atomicrmw add i32* %p, i32 %v seq_cst
+ ret void
+}
+
+; CHECK-LABEL: test_load_add_64
+; CHECK: xadd64
+; CHECK: encoding: [0xdb
+define void @test_load_add_64(i64* %p, i64 zeroext %v) {
+entry:
+ atomicrmw add i64* %p, i64 %v seq_cst
+ ret void
+}
diff --git a/llvm/test/CodeGen/BPF/basictest.ll b/llvm/test/CodeGen/BPF/basictest.ll
new file mode 100644
index 00000000000..0cbfff83442
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/basictest.ll
@@ -0,0 +1,28 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define i32 @test0(i32 %X) {
+ %tmp.1 = add i32 %X, 1
+ ret i32 %tmp.1
+; CHECK-LABEL: test0:
+; CHECK: addi r1, 1
+}
+
+; CHECK-LABEL: store_imm:
+; CHECK: stw 0(r1), r0
+; CHECK: stw 4(r2), r0
+define i32 @store_imm(i32* %a, i32* %b) {
+entry:
+ store i32 0, i32* %a, align 4
+ %0 = getelementptr inbounds i32* %b, i32 1
+ store i32 0, i32* %0, align 4
+ ret i32 0
+}
+
+@G = external global i8
+define zeroext i8 @loadG() {
+ %tmp = load i8* @G
+ ret i8 %tmp
+; CHECK-LABEL: loadG:
+; CHECK: ld_64 r1
+; CHECK: ldb r0, 0(r1)
+}
diff --git a/llvm/test/CodeGen/BPF/byval.ll b/llvm/test/CodeGen/BPF/byval.ll
new file mode 100644
index 00000000000..065604b29e9
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/byval.ll
@@ -0,0 +1,27 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: by value not supported
+
+%struct.S = type { [10 x i32] }
+
+; Function Attrs: nounwind uwtable
+define void @bar(i32 %a) #0 {
+entry:
+ %.compoundliteral = alloca %struct.S, align 8
+ %arrayinit.begin = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 0
+ store i32 1, i32* %arrayinit.begin, align 8
+ %arrayinit.element = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 1
+ store i32 2, i32* %arrayinit.element, align 4
+ %arrayinit.element2 = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 2
+ store i32 3, i32* %arrayinit.element2, align 8
+ %arrayinit.start = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 3
+ %scevgep4 = bitcast i32* %arrayinit.start to i8*
+ call void @llvm.memset.p0i8.i64(i8* %scevgep4, i8 0, i64 28, i32 4, i1 false)
+ call void @foo(i32 %a, %struct.S* byval align 8 %.compoundliteral) #3
+ ret void
+}
+
+declare void @foo(i32, %struct.S* byval align 8) #1
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3
diff --git a/llvm/test/CodeGen/BPF/cc_args.ll b/llvm/test/CodeGen/BPF/cc_args.ll
new file mode 100644
index 00000000000..5085fe5684e
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/cc_args.ll
@@ -0,0 +1,96 @@
+; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: mov r1, 123 # encoding: [0xb7,0x01,0x00,0x00,0x7b,0x00,0x00,0x00]
+; CHECK: call f_i16
+ call void @f_i16(i16 123)
+
+; CHECK: mov r1, 12345678 # encoding: [0xb7,0x01,0x00,0x00,0x4e,0x61,0xbc,0x00]
+; CHECK: call f_i32
+ call void @f_i32(i32 12345678)
+
+; CHECK: ld_64 r1, 72623859790382856 # encoding: [0x18,0x01,0x00,0x00,0x08,0x07,0x06,0x05,0x00,0x00,0x00,0x00,0x04,0x03,0x02,0x01]
+; CHECK: call f_i64
+ call void @f_i64(i64 72623859790382856)
+
+; CHECK: mov r1, 1234
+; CHECK: mov r2, 5678
+; CHECK: call f_i32_i32
+ call void @f_i32_i32(i32 1234, i32 5678)
+
+; CHECK: mov r1, 2
+; CHECK: mov r2, 3
+; CHECK: mov r3, 4
+; CHECK: call f_i16_i32_i16
+ call void @f_i16_i32_i16(i16 2, i32 3, i16 4)
+
+; CHECK: mov r1, 5
+; CHECK: ld_64 r2, 7262385979038285
+; CHECK: mov r3, 6
+; CHECK: call f_i16_i64_i16
+ call void @f_i16_i64_i16(i16 5, i64 7262385979038285, i16 6)
+
+ ret void
+}
+
+@g_i16 = common global i16 0, align 2
+@g_i32 = common global i32 0, align 2
+@g_i64 = common global i64 0, align 4
+
+define void @f_i16(i16 %a) #0 {
+; CHECK: f_i16:
+; CHECK: sth 0(r2), r1 # encoding: [0x6b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i16 %a, i16* @g_i16, align 2
+ ret void
+}
+
+define void @f_i32(i32 %a) #0 {
+; CHECK: f_i32:
+; CHECK: sth 0(r2), r1 # encoding: [0x6b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: sth 2(r2), r1 # encoding: [0x6b,0x12,0x02,0x00,0x00,0x00,0x00,0x00]
+ store volatile i32 %a, i32* @g_i32, align 2
+ ret void
+}
+
+define void @f_i64(i64 %a) #0 {
+; CHECK: f_i64:
+; CHECK: stw 0(r2), r1
+; CHECK: stw 4(r2), r1 # encoding: [0x63,0x12,0x04,0x00,0x00,0x00,0x00,0x00]
+ store volatile i64 %a, i64* @g_i64, align 2
+ ret void
+}
+
+define void @f_i32_i32(i32 %a, i32 %b) #0 {
+; CHECK: f_i32_i32:
+; CHECK: stw 0(r3), r1
+ store volatile i32 %a, i32* @g_i32, align 4
+; CHECK: stw 0(r3), r2
+ store volatile i32 %b, i32* @g_i32, align 4
+ ret void
+}
+
+define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
+; CHECK: f_i16_i32_i16:
+; CHECK: sth 0(r4), r1
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: stw 0(r1), r2
+ store volatile i32 %b, i32* @g_i32, align 4
+; CHECK: sth 0(r4), r3
+ store volatile i16 %c, i16* @g_i16, align 2
+ ret void
+}
+
+define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
+; CHECK: f_i16_i64_i16:
+; CHECK: sth 0(r4), r1
+ store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: std 0(r1), r2 # encoding: [0x7b,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ store volatile i64 %b, i64* @g_i64, align 8
+; CHECK: sth 0(r4), r3
+ store volatile i16 %c, i16* @g_i16, align 2
+ ret void
+}
diff --git a/llvm/test/CodeGen/BPF/cc_ret.ll b/llvm/test/CodeGen/BPF/cc_ret.ll
new file mode 100644
index 00000000000..e32b17bcc61
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/cc_ret.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: call f_i16
+; CHECK: sth 0(r1), r0
+ %0 = call i16 @f_i16()
+ store volatile i16 %0, i16* @g_i16
+
+; CHECK: call f_i32
+; CHECK: stw 0(r1), r0
+ %1 = call i32 @f_i32()
+ store volatile i32 %1, i32* @g_i32
+
+; CHECK: call f_i64
+; CHECK: std 0(r1), r0
+ %2 = call i64 @f_i64()
+ store volatile i64 %2, i64* @g_i64
+
+ ret void
+}
+
+@g_i16 = common global i16 0, align 2
+@g_i32 = common global i32 0, align 2
+@g_i64 = common global i64 0, align 2
+
+define i16 @f_i16() #0 {
+; CHECK: f_i16:
+; CHECK: mov r0, 1
+; CHECK: ret
+ ret i16 1
+}
+
+define i32 @f_i32() #0 {
+; CHECK: f_i32:
+; CHECK: mov r0, 16909060
+; CHECK: ret
+ ret i32 16909060
+}
+
+define i64 @f_i64() #0 {
+; CHECK: f_i64:
+; CHECK: ld_64 r0, 72623859790382856
+; CHECK: ret
+ ret i64 72623859790382856
+}
diff --git a/llvm/test/CodeGen/BPF/cmp.ll b/llvm/test/CodeGen/BPF/cmp.ll
new file mode 100644
index 00000000000..b353f90ab56
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/cmp.ll
@@ -0,0 +1,119 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp1(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp sgt i8 %a, %b
+ br i1 %1, label %2, label %4
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp1:
+; CHECK: jsge r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp2(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ br i1 %1, label %4, label %2
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp2:
+; CHECK: jsgt r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp3(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ br i1 %1, label %2, label %4
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp3:
+; CHECK: jsge r1, r2
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp4(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp sgt i8 %a, %b
+ br i1 %1, label %4, label %2
+
+; <label>:2 ; preds = %0
+ %3 = mul i8 %b, %a
+ br label %6
+
+; <label>:4 ; preds = %0
+ %5 = shl i8 %b, 3
+ br label %6
+
+; <label>:6 ; preds = %4, %2
+ %.0 = phi i8 [ %3, %2 ], [ %5, %4 ]
+ ret i8 %.0
+; CHECK-LABEL:foo_cmp4:
+; CHECK: jsgt r1, r2
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @min(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL:min:
+; CHECK: jsgt r2, r1
+; CHECK: mov r1, r2
+; CHECK: mov r0, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define zeroext i8 @minu(i8 zeroext %a, i8 zeroext %b) #0 {
+ %1 = icmp ult i8 %a, 100
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL:minu:
+; CHECK: jgt r3, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @max(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp sgt i8 %a, %b
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL:max:
+; CHECK: jsgt r1, r2
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @meq(i8 signext %a, i8 signext %b, i8 signext %c) #0 {
+ %1 = icmp eq i8 %a, %b
+ %c.a = select i1 %1, i8 %c, i8 %a
+ ret i8 %c.a
+; CHECK-LABEL:meq:
+; CHECK: jeq r1, r2
+}
diff --git a/llvm/test/CodeGen/BPF/ex1.ll b/llvm/test/CodeGen/BPF/ex1.ll
new file mode 100644
index 00000000000..5fc1200371e
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/ex1.ll
@@ -0,0 +1,46 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+%struct.bpf_context = type { i64, i64, i64, i64, i64, i64, i64 }
+%struct.sk_buff = type { i64, i64, i64, i64, i64, i64, i64 }
+%struct.net_device = type { i64, i64, i64, i64, i64, i64, i64 }
+
+@bpf_prog1.devname = private unnamed_addr constant [3 x i8] c"lo\00", align 1
+@bpf_prog1.fmt = private unnamed_addr constant [15 x i8] c"skb %x dev %x\0A\00", align 1
+
+; Function Attrs: nounwind uwtable
+define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/net/netif_receive_skb" {
+ %devname = alloca [3 x i8], align 1
+ %fmt = alloca [15 x i8], align 1
+ %1 = getelementptr inbounds [3 x i8]* %devname, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i32 1, i1 false)
+ %2 = getelementptr inbounds %struct.bpf_context* %ctx, i64 0, i32 0
+ %3 = load i64* %2, align 8
+ %4 = inttoptr i64 %3 to %struct.sk_buff*
+ %5 = getelementptr inbounds %struct.sk_buff* %4, i64 0, i32 2
+ %6 = bitcast i64* %5 to i8*
+ %7 = call i8* inttoptr (i64 4 to i8* (i8*)*)(i8* %6) #1
+ %8 = call i32 inttoptr (i64 9 to i32 (i8*, i8*, i32)*)(i8* %7, i8* %1, i32 2) #1
+ %9 = icmp eq i32 %8, 0
+ br i1 %9, label %10, label %13
+
+; <label>:10 ; preds = %0
+ %11 = getelementptr inbounds [15 x i8]* %fmt, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i32 1, i1 false)
+ %12 = call i32 (i8*, i32, ...)* inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1
+; CHECK-LABEL: bpf_prog1:
+; CHECK: call 4
+; CHECK: call 9
+; CHECK: jnei r0, 0
+; CHECK: mov r1, 622884453
+; CHECK: ld_64 r1, 7214898703899978611
+; CHECK: call 11
+; CHECK: mov r0, 0
+; CHECK: ret
+ br label %13
+
+; <label>:13 ; preds = %10, %0
+ ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #1
diff --git a/llvm/test/CodeGen/BPF/intrinsics.ll b/llvm/test/CodeGen/BPF/intrinsics.ll
new file mode 100644
index 00000000000..9a078fb3d15
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/intrinsics.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; Function Attrs: nounwind uwtable
+define i32 @ld_b(i64 %foo, i64* nocapture %bar, i8* %ctx, i8* %ctx2) #0 {
+ %1 = tail call i64 @llvm.bpf.load.byte(i8* %ctx, i64 123) #2
+ %2 = add i64 %1, %foo
+ %3 = load volatile i64* %bar, align 8
+ %4 = add i64 %2, %3
+ %5 = tail call i64 @llvm.bpf.load.byte(i8* %ctx2, i64 %foo) #2
+ %6 = add i64 %4, %5
+ %7 = load volatile i64* %bar, align 8
+ %8 = add i64 %6, %7
+ %9 = trunc i64 %8 to i32
+ ret i32 %9
+; CHECK-LABEL: ld_b:
+; CHECK: ldabs_b r0, r6.data + 123
+; CHECK: ldind_b r0, r6.data
+}
+
+declare i64 @llvm.bpf.load.byte(i8*, i64) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @ld_h(i8* %ctx, i8* %ctx2, i32 %foo) #0 {
+ %1 = tail call i64 @llvm.bpf.load.half(i8* %ctx, i64 123) #2
+ %2 = sext i32 %foo to i64
+ %3 = tail call i64 @llvm.bpf.load.half(i8* %ctx2, i64 %2) #2
+ %4 = add i64 %3, %1
+ %5 = trunc i64 %4 to i32
+ ret i32 %5
+; CHECK-LABEL: ld_h:
+; CHECK: ldind_h r0, r6.data
+; CHECK: ldabs_h r0, r6.data + 123
+}
+
+declare i64 @llvm.bpf.load.half(i8*, i64) #1
+
+; Function Attrs: nounwind uwtable
+define i32 @ld_w(i8* %ctx, i8* %ctx2, i32 %foo) #0 {
+ %1 = tail call i64 @llvm.bpf.load.word(i8* %ctx, i64 123) #2
+ %2 = sext i32 %foo to i64
+ %3 = tail call i64 @llvm.bpf.load.word(i8* %ctx2, i64 %2) #2
+ %4 = add i64 %3, %1
+ %5 = trunc i64 %4 to i32
+ ret i32 %5
+; CHECK-LABEL: ld_w:
+; CHECK: ldind_w r0, r6.data
+; CHECK: ldabs_w r0, r6.data + 123
+}
+
+declare i64 @llvm.bpf.load.word(i8*, i64) #1
diff --git a/llvm/test/CodeGen/BPF/load.ll b/llvm/test/CodeGen/BPF/load.ll
new file mode 100644
index 00000000000..b0974350bab
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/load.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define i16 @am1(i16* %a) nounwind {
+ %1 = load i16* %a
+ ret i16 %1
+}
+; CHECK-LABEL: am1:
+; CHECK: ldh r0, 0(r1)
+
+@foo = external global i16
+
+define i16 @am2() nounwind {
+ %1 = load i16* @foo
+ ret i16 %1
+}
+; CHECK-LABEL: am2:
+; CHECK: ldh r0, 0(r1)
+
+define i16 @am4() nounwind {
+ %1 = load volatile i16* inttoptr(i16 32 to i16*)
+ ret i16 %1
+}
+; CHECK-LABEL: am4:
+; CHECK: mov r1, 32
+; CHECK: ldh r0, 0(r1)
+
+define i16 @am5(i16* %a) nounwind {
+ %1 = getelementptr i16* %a, i16 2
+ %2 = load i16* %1
+ ret i16 %2
+}
+; CHECK-LABEL: am5:
+; CHECK: ldh r0, 4(r1)
+
+%S = type { i16, i16 }
+@baz = common global %S zeroinitializer, align 1
+
+define i16 @am6() nounwind {
+ %1 = load i16* getelementptr (%S* @baz, i32 0, i32 1)
+ ret i16 %1
+}
+; CHECK-LABEL: am6:
+; CHECK: ldh r0, 2(r1)
diff --git a/llvm/test/CodeGen/BPF/loops.ll b/llvm/test/CodeGen/BPF/loops.ll
new file mode 100644
index 00000000000..40bf4499e4b
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/loops.ll
@@ -0,0 +1,111 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+define zeroext i16 @add(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: add:
+; CHECK: add r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = add i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @sub(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: sub:
+; CHECK: sub r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = sub i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @or(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: or:
+; CHECK: or r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = or i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @xor(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: xor:
+; CHECK: xor r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = xor i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
+
+define zeroext i16 @and(i16* nocapture %a, i16 zeroext %n) nounwind readonly {
+entry:
+ %cmp8 = icmp eq i16 %n, 0 ; <i1> [#uses=1]
+ br i1 %cmp8, label %for.end, label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
+ %sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+; CHECK-LABEL: and:
+; CHECK: and r{{[0-9]+}}, r{{[0-9]+}}
+ %tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
+ %add = and i16 %tmp4, %sum.09 ; <i16> [#uses=2]
+ %inc = add i16 %i.010, 1 ; <i16> [#uses=2]
+ %exitcond = icmp eq i16 %inc, %n ; <i1> [#uses=1]
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
+ ret i16 %sum.0.lcssa
+}
diff --git a/llvm/test/CodeGen/BPF/many_args1.ll b/llvm/test/CodeGen/BPF/many_args1.ll
new file mode 100644
index 00000000000..08218f452d0
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/many_args1.ll
@@ -0,0 +1,12 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: too many args
+
+; Function Attrs: nounwind uwtable
+define i32 @foo(i32 %a, i32 %b, i32 %c) #0 {
+entry:
+ %call = tail call i32 @bar(i32 %a, i32 %b, i32 %c, i32 1, i32 2, i32 3) #3
+ ret i32 %call
+}
+
+declare i32 @bar(i32, i32, i32, i32, i32, i32) #1
diff --git a/llvm/test/CodeGen/BPF/many_args2.ll b/llvm/test/CodeGen/BPF/many_args2.ll
new file mode 100644
index 00000000000..a69886c2b20
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/many_args2.ll
@@ -0,0 +1,15 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: too many args
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) #0 {
+entry:
+ ret i32 1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @foo(i32 %a, i32 %b, i32 %c) #0 {
+entry:
+ ret i32 1
+}
diff --git a/llvm/test/CodeGen/BPF/sanity.ll b/llvm/test/CodeGen/BPF/sanity.ll
new file mode 100644
index 00000000000..db63c07b052
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/sanity.ll
@@ -0,0 +1,117 @@
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+@foo_printf.fmt = private unnamed_addr constant [9 x i8] c"hello \0A\00", align 1
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @foo_int(i32 %a, i32 %b) #0 {
+ %1 = add nsw i32 %b, %a
+ ret i32 %1
+; CHECK-LABEL: foo_int:
+; CHECK: add r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_char(i8 signext %a, i8 signext %b) #0 {
+ %1 = add i8 %b, %a
+ ret i8 %1
+; CHECK-LABEL: foo_char:
+; CHECK: add r2, r1
+; CHECK: slli r2, 56
+; CHECK: srai r2, 56
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i64 @foo_ll(i64 %a, i64 %b, i64 %c) #0 {
+ %1 = add nsw i64 %b, %a
+ %2 = sub i64 %1, %c
+ ret i64 %2
+; CHECK-LABEL: foo_ll:
+; CHECK: add r2, r1
+; CHECK: sub r2, r3
+; CHECK: mov r0, r2
+}
+
+; Function Attrs: nounwind uwtable
+define void @foo_call2(i32 %a, i32 %b) #1 {
+ %1 = trunc i32 %b to i8
+ tail call void @foo_2arg(i8 signext %1, i32 %a) #3
+ ret void
+; CHECK-LABEL: foo_call2:
+; CHECK: slli r2, 56
+; CHECK: srai r2, 56
+; CHECK: mov r1, r2
+}
+
+declare void @foo_2arg(i8 signext, i32) #2
+
+; Function Attrs: nounwind uwtable
+define i32 @foo_call5(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #1 {
+ %1 = tail call i32 @bar(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #3
+ ret i32 0
+; CHECK-LABEL: foo_call5:
+; CHECK: call bar
+}
+
+declare i32 @bar(i8 signext, i16 signext, i32, i64) #2
+
+; Function Attrs: nounwind readnone uwtable
+define signext i8 @foo_cmp(i8 signext %a, i8 signext %b) #0 {
+ %1 = icmp slt i8 %a, %b
+ %a.b = select i1 %1, i8 %a, i8 %b
+ ret i8 %a.b
+; CHECK-LABEL: foo_cmp:
+; CHECK: jsgt r2, r1
+}
+
+; Function Attrs: nounwind readnone uwtable
+define i32 @foo_muldiv(i8 signext %a, i16 signext %b, i32 %c, i64 %d) #0 {
+ %1 = icmp eq i8 %a, 0
+ br i1 %1, label %5, label %2
+
+; <label>:2 ; preds = %0
+ %3 = sext i16 %b to i32
+ %4 = mul nsw i32 %3, %c
+ br label %8
+
+; <label>:5 ; preds = %0
+ %6 = trunc i64 %d to i32
+ %7 = udiv i32 %6, %c
+ br label %8
+
+; <label>:8 ; preds = %5, %2
+ %.0 = phi i32 [ %4, %2 ], [ %7, %5 ]
+ ret i32 %.0
+; CHECK-LABEL: foo_muldiv:
+; CHECK: mul r2, r3
+}
+
+; Function Attrs: nounwind uwtable
+define i32 @foo_optimized() #1 {
+ %1 = tail call i32 @manyarg(i32 1, i32 2, i32 3, i32 4, i32 5) #3
+ ret i32 %1
+; CHECK-LABEL: foo_optimized:
+; CHECK: mov r1, 1
+; CHECK: mov r2, 2
+; CHECK: mov r3, 3
+; CHECK: mov r4, 4
+; CHECK: mov r5, 5
+}
+
+declare i32 @manyarg(i32, i32, i32, i32, i32) #2
+
+; Function Attrs: nounwind uwtable
+define void @foo_printf() #1 {
+ %fmt = alloca [9 x i8], align 1
+ %1 = getelementptr inbounds [9 x i8]* %fmt, i64 0, i64 0
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([9 x i8]* @foo_printf.fmt, i64 0, i64 0), i64 9, i32 1, i1 false)
+; CHECK-LABEL: foo_printf:
+; CHECK: ld_64 r1, 729618802566522216
+ %2 = call i32 (i8*, ...)* @printf(i8* %1) #3
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #3
+
+; Function Attrs: nounwind
+declare i32 @printf(i8* nocapture, ...) #4
diff --git a/llvm/test/CodeGen/BPF/setcc.ll b/llvm/test/CodeGen/BPF/setcc.ll
new file mode 100644
index 00000000000..eabb6c9bf2d
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/setcc.ll
@@ -0,0 +1,99 @@
+; RUN: llc -march=bpf < %s | FileCheck %s
+
+define i16 @sccweqand(i16 %a, i16 %b) nounwind {
+ %t1 = and i16 %a, %b
+ %t2 = icmp eq i16 %t1, 0
+ %t3 = zext i1 %t2 to i16
+ ret i16 %t3
+}
+; CHECK-LABEL: sccweqand:
+; CHECK: jeq r1, r2
+
+define i16 @sccwneand(i16 %a, i16 %b) nounwind {
+ %t1 = and i16 %a, %b
+ %t2 = icmp ne i16 %t1, 0
+ %t3 = zext i1 %t2 to i16
+ ret i16 %t3
+}
+; CHECK-LABEL: sccwneand:
+; CHECK: jne r1, r2
+
+define i16 @sccwne(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ne i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwne:
+; CHECK: jne r1, r2
+
+define i16 @sccweq(i16 %a, i16 %b) nounwind {
+ %t1 = icmp eq i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccweq:
+; CHECK: jeq r1, r2
+
+define i16 @sccwugt(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ugt i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwugt:
+; CHECK: jgt r1, r2
+
+define i16 @sccwuge(i16 %a, i16 %b) nounwind {
+ %t1 = icmp uge i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwuge:
+; CHECK: jge r1, r2
+
+define i16 @sccwult(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ult i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwult:
+; CHECK: jgt r2, r1
+
+define i16 @sccwule(i16 %a, i16 %b) nounwind {
+ %t1 = icmp ule i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwule:
+; CHECK: jge r2, r1
+
+define i16 @sccwsgt(i16 %a, i16 %b) nounwind {
+ %t1 = icmp sgt i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwsgt:
+; CHECK: jsgt r1, r2
+
+define i16 @sccwsge(i16 %a, i16 %b) nounwind {
+ %t1 = icmp sge i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwsge:
+; CHECK: jsge r1, r2
+
+define i16 @sccwslt(i16 %a, i16 %b) nounwind {
+ %t1 = icmp slt i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwslt:
+; CHECK: jsgt r2, r1
+
+define i16 @sccwsle(i16 %a, i16 %b) nounwind {
+ %t1 = icmp sle i16 %a, %b
+ %t2 = zext i1 %t1 to i16
+ ret i16 %t2
+}
+; CHECK-LABEL:sccwsle:
+; CHECK: jsge r2, r1
diff --git a/llvm/test/CodeGen/BPF/shifts.ll b/llvm/test/CodeGen/BPF/shifts.ll
new file mode 100644
index 00000000000..898ae2d4612
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/shifts.ll
@@ -0,0 +1,101 @@
+; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+define zeroext i8 @lshr8(i8 zeroext %a, i8 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr8:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = lshr i8 %a, %cnt
+ ret i8 %shr
+}
+
+define signext i8 @ashr8(i8 signext %a, i8 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr8:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i8 %a, %cnt
+ ret i8 %shr
+}
+
+define zeroext i8 @shl8(i8 zeroext %a, i8 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK: shl8
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i8 %a, %cnt
+ ret i8 %shl
+}
+
+define zeroext i16 @lshr16(i16 zeroext %a, i16 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr16:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = lshr i16 %a, %cnt
+ ret i16 %shr
+}
+
+define signext i16 @ashr16(i16 signext %a, i16 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr16:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i16 %a, %cnt
+ ret i16 %shr
+}
+
+define zeroext i16 @shl16(i16 zeroext %a, i16 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: shl16:
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i16 %a, %cnt
+ ret i16 %shl
+}
+
+define zeroext i32 @lshr32(i32 zeroext %a, i32 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr32:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: slli r1, 32 # encoding: [0x67,0x01,0x00,0x00,0x20,0x00,0x00,0x00]
+ %shr = lshr i32 %a, %cnt
+ ret i32 %shr
+}
+
+define signext i32 @ashr32(i32 signext %a, i32 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr32:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i32 %a, %cnt
+ ret i32 %shr
+}
+
+define zeroext i32 @shl32(i32 zeroext %a, i32 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: shl32:
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i32 %a, %cnt
+ ret i32 %shl
+}
+
+define zeroext i64 @lshr64(i64 zeroext %a, i64 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: lshr64:
+; CHECK: srl r1, r2 # encoding: [0x7f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = lshr i64 %a, %cnt
+ ret i64 %shr
+}
+
+define signext i64 @ashr64(i64 signext %a, i64 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: ashr64:
+; CHECK: sra r1, r2 # encoding: [0xcf,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shr = ashr i64 %a, %cnt
+ ret i64 %shr
+}
+
+define zeroext i64 @shl64(i64 zeroext %a, i64 zeroext %cnt) nounwind readnone {
+entry:
+; CHECK-LABEL: shl64:
+; CHECK: sll r1, r2 # encoding: [0x6f,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: mov r0, r1 # encoding: [0xbf,0x10,0x00,0x00,0x00,0x00,0x00,0x00]
+; CHECK: ret # encoding: [0x95,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+ %shl = shl i64 %a, %cnt
+ ret i64 %shl
+}
diff --git a/llvm/test/CodeGen/BPF/sockex2.ll b/llvm/test/CodeGen/BPF/sockex2.ll
new file mode 100644
index 00000000000..6ae5e1c8d6b
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/sockex2.ll
@@ -0,0 +1,326 @@
+; RUN: llc < %s -march=bpf -show-mc-encoding | FileCheck %s
+; test little endian only for now
+
+%struct.bpf_map_def = type { i32, i32, i32, i32 }
+%struct.sk_buff = type opaque
+
+@hash_map = global %struct.bpf_map_def { i32 1, i32 4, i32 8, i32 1024 }, section "maps", align 4
+
+; Function Attrs: nounwind uwtable
+define i32 @bpf_prog2(%struct.sk_buff* %skb) #0 section "socket2" {
+ %key = alloca i32, align 4
+ %val = alloca i64, align 8
+ %1 = bitcast %struct.sk_buff* %skb to i8*
+ %2 = call i64 @llvm.bpf.load.half(i8* %1, i64 12) #2
+ %3 = icmp eq i64 %2, 34984
+ br i1 %3, label %4, label %6
+
+; <label>:4 ; preds = %0
+ %5 = call i64 @llvm.bpf.load.half(i8* %1, i64 16) #2
+ br label %6
+
+; <label>:6 ; preds = %4, %0
+ %proto.0.i = phi i64 [ %5, %4 ], [ %2, %0 ]
+ %nhoff.0.i = phi i64 [ 18, %4 ], [ 14, %0 ]
+ %7 = icmp eq i64 %proto.0.i, 33024
+ br i1 %7, label %8, label %12
+
+; <label>:8 ; preds = %6
+ %9 = add i64 %nhoff.0.i, 2
+ %10 = call i64 @llvm.bpf.load.half(i8* %1, i64 %9) #2
+ %11 = add i64 %nhoff.0.i, 4
+ br label %12
+
+; <label>:12 ; preds = %8, %6
+ %proto.1.i = phi i64 [ %10, %8 ], [ %proto.0.i, %6 ]
+ %nhoff.1.i = phi i64 [ %11, %8 ], [ %nhoff.0.i, %6 ]
+ switch i64 %proto.1.i, label %flow_dissector.exit.thread [
+ i64 2048, label %13
+ i64 34525, label %39
+ ]
+
+; <label>:13 ; preds = %12
+ %14 = add i64 %nhoff.1.i, 6
+ %15 = call i64 @llvm.bpf.load.half(i8* %1, i64 %14) #2
+ %16 = and i64 %15, 16383
+ %17 = icmp eq i64 %16, 0
+ br i1 %17, label %18, label %.thread.i.i
+
+; <label>:18 ; preds = %13
+ %19 = add i64 %nhoff.1.i, 9
+ %20 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %19) #2
+ %21 = icmp eq i64 %20, 47
+ br i1 %21, label %28, label %.thread.i.i
+
+.thread.i.i: ; preds = %18, %13
+ %22 = phi i64 [ %20, %18 ], [ 0, %13 ]
+ %23 = add i64 %nhoff.1.i, 12
+ %24 = call i64 @llvm.bpf.load.word(i8* %1, i64 %23) #2
+ %25 = add i64 %nhoff.1.i, 16
+ %26 = call i64 @llvm.bpf.load.word(i8* %1, i64 %25) #2
+ %27 = trunc i64 %26 to i32
+ br label %28
+
+; <label>:28 ; preds = %.thread.i.i, %18
+ %29 = phi i32 [ %27, %.thread.i.i ], [ undef, %18 ]
+ %30 = phi i64 [ %22, %.thread.i.i ], [ 47, %18 ]
+ %31 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.1.i) #2
+ %32 = icmp eq i64 %31, 69
+ br i1 %32, label %33, label %35
+
+; <label>:33 ; preds = %28
+ %34 = add i64 %nhoff.1.i, 20
+ br label %parse_ip.exit.i
+
+; <label>:35 ; preds = %28
+ %36 = shl i64 %31, 2
+ %37 = and i64 %36, 60
+ %38 = add i64 %37, %nhoff.1.i
+ br label %parse_ip.exit.i
+
+; <label>:39 ; preds = %12
+ %40 = add i64 %nhoff.1.i, 6
+ %41 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %40) #2
+ %42 = add i64 %nhoff.1.i, 8
+ %43 = call i64 @llvm.bpf.load.word(i8* %1, i64 %42) #2
+ %44 = add i64 %nhoff.1.i, 12
+ %45 = call i64 @llvm.bpf.load.word(i8* %1, i64 %44) #2
+ %46 = add i64 %nhoff.1.i, 16
+ %47 = call i64 @llvm.bpf.load.word(i8* %1, i64 %46) #2
+ %48 = add i64 %nhoff.1.i, 20
+ %49 = call i64 @llvm.bpf.load.word(i8* %1, i64 %48) #2
+ %50 = add i64 %nhoff.1.i, 24
+ %51 = call i64 @llvm.bpf.load.word(i8* %1, i64 %50) #2
+ %52 = add i64 %nhoff.1.i, 28
+ %53 = call i64 @llvm.bpf.load.word(i8* %1, i64 %52) #2
+ %54 = add i64 %nhoff.1.i, 32
+ %55 = call i64 @llvm.bpf.load.word(i8* %1, i64 %54) #2
+ %56 = add i64 %nhoff.1.i, 36
+ %57 = call i64 @llvm.bpf.load.word(i8* %1, i64 %56) #2
+ %58 = xor i64 %53, %51
+ %59 = xor i64 %58, %55
+ %60 = xor i64 %59, %57
+ %61 = trunc i64 %60 to i32
+ %62 = add i64 %nhoff.1.i, 40
+ br label %parse_ip.exit.i
+
+parse_ip.exit.i: ; preds = %39, %35, %33
+ %63 = phi i32 [ %61, %39 ], [ %29, %33 ], [ %29, %35 ]
+ %64 = phi i64 [ %41, %39 ], [ %30, %33 ], [ %30, %35 ]
+ %nhoff.2.i = phi i64 [ %62, %39 ], [ %34, %33 ], [ %38, %35 ]
+ switch i64 %64, label %187 [
+ i64 47, label %65
+ i64 4, label %137
+ i64 41, label %163
+ ]
+
+; <label>:65 ; preds = %parse_ip.exit.i
+ %66 = call i64 @llvm.bpf.load.half(i8* %1, i64 %nhoff.2.i) #2
+ %67 = add i64 %nhoff.2.i, 2
+ %68 = call i64 @llvm.bpf.load.half(i8* %1, i64 %67) #2
+ %69 = and i64 %66, 1856
+ %70 = icmp eq i64 %69, 0
+ br i1 %70, label %71, label %187
+
+; <label>:71 ; preds = %65
+ %72 = lshr i64 %66, 5
+ %73 = and i64 %72, 4
+ %74 = add i64 %nhoff.2.i, 4
+ %..i = add i64 %74, %73
+ %75 = and i64 %66, 32
+ %76 = icmp eq i64 %75, 0
+ %77 = add i64 %..i, 4
+ %nhoff.4.i = select i1 %76, i64 %..i, i64 %77
+ %78 = and i64 %66, 16
+ %79 = icmp eq i64 %78, 0
+ %80 = add i64 %nhoff.4.i, 4
+ %nhoff.4..i = select i1 %79, i64 %nhoff.4.i, i64 %80
+ %81 = icmp eq i64 %68, 33024
+ br i1 %81, label %82, label %86
+
+; <label>:82 ; preds = %71
+ %83 = add i64 %nhoff.4..i, 2
+ %84 = call i64 @llvm.bpf.load.half(i8* %1, i64 %83) #2
+ %85 = add i64 %nhoff.4..i, 4
+ br label %86
+
+; <label>:86 ; preds = %82, %71
+ %proto.2.i = phi i64 [ %84, %82 ], [ %68, %71 ]
+ %nhoff.6.i = phi i64 [ %85, %82 ], [ %nhoff.4..i, %71 ]
+ switch i64 %proto.2.i, label %flow_dissector.exit.thread [
+ i64 2048, label %87
+ i64 34525, label %113
+ ]
+
+; <label>:87 ; preds = %86
+ %88 = add i64 %nhoff.6.i, 6
+ %89 = call i64 @llvm.bpf.load.half(i8* %1, i64 %88) #2
+ %90 = and i64 %89, 16383
+ %91 = icmp eq i64 %90, 0
+ br i1 %91, label %92, label %.thread.i4.i
+
+; <label>:92 ; preds = %87
+ %93 = add i64 %nhoff.6.i, 9
+ %94 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %93) #2
+ %95 = icmp eq i64 %94, 47
+ br i1 %95, label %102, label %.thread.i4.i
+
+.thread.i4.i: ; preds = %92, %87
+ %96 = phi i64 [ %94, %92 ], [ 0, %87 ]
+ %97 = add i64 %nhoff.6.i, 12
+ %98 = call i64 @llvm.bpf.load.word(i8* %1, i64 %97) #2
+ %99 = add i64 %nhoff.6.i, 16
+ %100 = call i64 @llvm.bpf.load.word(i8* %1, i64 %99) #2
+ %101 = trunc i64 %100 to i32
+ br label %102
+
+; <label>:102 ; preds = %.thread.i4.i, %92
+ %103 = phi i32 [ %101, %.thread.i4.i ], [ %63, %92 ]
+ %104 = phi i64 [ %96, %.thread.i4.i ], [ 47, %92 ]
+ %105 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.6.i) #2
+ %106 = icmp eq i64 %105, 69
+ br i1 %106, label %107, label %109
+
+; <label>:107 ; preds = %102
+ %108 = add i64 %nhoff.6.i, 20
+ br label %187
+
+; <label>:109 ; preds = %102
+ %110 = shl i64 %105, 2
+ %111 = and i64 %110, 60
+ %112 = add i64 %111, %nhoff.6.i
+ br label %187
+
+; <label>:113 ; preds = %86
+ %114 = add i64 %nhoff.6.i, 6
+ %115 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %114) #2
+ %116 = add i64 %nhoff.6.i, 8
+ %117 = call i64 @llvm.bpf.load.word(i8* %1, i64 %116) #2
+ %118 = add i64 %nhoff.6.i, 12
+ %119 = call i64 @llvm.bpf.load.word(i8* %1, i64 %118) #2
+ %120 = add i64 %nhoff.6.i, 16
+ %121 = call i64 @llvm.bpf.load.word(i8* %1, i64 %120) #2
+ %122 = add i64 %nhoff.6.i, 20
+ %123 = call i64 @llvm.bpf.load.word(i8* %1, i64 %122) #2
+ %124 = add i64 %nhoff.6.i, 24
+ %125 = call i64 @llvm.bpf.load.word(i8* %1, i64 %124) #2
+ %126 = add i64 %nhoff.6.i, 28
+ %127 = call i64 @llvm.bpf.load.word(i8* %1, i64 %126) #2
+ %128 = add i64 %nhoff.6.i, 32
+ %129 = call i64 @llvm.bpf.load.word(i8* %1, i64 %128) #2
+ %130 = add i64 %nhoff.6.i, 36
+ %131 = call i64 @llvm.bpf.load.word(i8* %1, i64 %130) #2
+ %132 = xor i64 %127, %125
+ %133 = xor i64 %132, %129
+ %134 = xor i64 %133, %131
+ %135 = trunc i64 %134 to i32
+ %136 = add i64 %nhoff.6.i, 40
+ br label %187
+
+; <label>:137 ; preds = %parse_ip.exit.i
+ %138 = add i64 %nhoff.2.i, 6
+ %139 = call i64 @llvm.bpf.load.half(i8* %1, i64 %138) #2
+ %140 = and i64 %139, 16383
+ %141 = icmp eq i64 %140, 0
+ br i1 %141, label %142, label %.thread.i1.i
+
+; <label>:142 ; preds = %137
+ %143 = add i64 %nhoff.2.i, 9
+ %144 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %143) #2
+ %145 = icmp eq i64 %144, 47
+ br i1 %145, label %152, label %.thread.i1.i
+
+.thread.i1.i: ; preds = %142, %137
+ %146 = phi i64 [ %144, %142 ], [ 0, %137 ]
+ %147 = add i64 %nhoff.2.i, 12
+ %148 = call i64 @llvm.bpf.load.word(i8* %1, i64 %147) #2
+ %149 = add i64 %nhoff.2.i, 16
+ %150 = call i64 @llvm.bpf.load.word(i8* %1, i64 %149) #2
+ %151 = trunc i64 %150 to i32
+ br label %152
+
+; <label>:152 ; preds = %.thread.i1.i, %142
+ %153 = phi i32 [ %151, %.thread.i1.i ], [ %63, %142 ]
+ %154 = phi i64 [ %146, %.thread.i1.i ], [ 47, %142 ]
+ %155 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %nhoff.2.i) #2
+ %156 = icmp eq i64 %155, 69
+ br i1 %156, label %157, label %159
+
+; <label>:157 ; preds = %152
+ %158 = add i64 %nhoff.2.i, 20
+ br label %187
+
+; <label>:159 ; preds = %152
+ %160 = shl i64 %155, 2
+ %161 = and i64 %160, 60
+ %162 = add i64 %161, %nhoff.2.i
+ br label %187
+
+; <label>:163 ; preds = %parse_ip.exit.i
+ %164 = add i64 %nhoff.2.i, 6
+ %165 = call i64 @llvm.bpf.load.byte(i8* %1, i64 %164) #2
+ %166 = add i64 %nhoff.2.i, 8
+ %167 = call i64 @llvm.bpf.load.word(i8* %1, i64 %166) #2
+ %168 = add i64 %nhoff.2.i, 12
+ %169 = call i64 @llvm.bpf.load.word(i8* %1, i64 %168) #2
+ %170 = add i64 %nhoff.2.i, 16
+ %171 = call i64 @llvm.bpf.load.word(i8* %1, i64 %170) #2
+ %172 = add i64 %nhoff.2.i, 20
+ %173 = call i64 @llvm.bpf.load.word(i8* %1, i64 %172) #2
+ %174 = add i64 %nhoff.2.i, 24
+ %175 = call i64 @llvm.bpf.load.word(i8* %1, i64 %174) #2
+ %176 = add i64 %nhoff.2.i, 28
+ %177 = call i64 @llvm.bpf.load.word(i8* %1, i64 %176) #2
+ %178 = add i64 %nhoff.2.i, 32
+ %179 = call i64 @llvm.bpf.load.word(i8* %1, i64 %178) #2
+ %180 = add i64 %nhoff.2.i, 36
+ %181 = call i64 @llvm.bpf.load.word(i8* %1, i64 %180) #2
+ %182 = xor i64 %177, %175
+ %183 = xor i64 %182, %179
+ %184 = xor i64 %183, %181
+ %185 = trunc i64 %184 to i32
+ %186 = add i64 %nhoff.2.i, 40
+ br label %187
+
+; <label>:187 ; preds = %163, %159, %157, %113, %109, %107, %65, %parse_ip.exit.i
+ %188 = phi i32 [ %63, %parse_ip.exit.i ], [ %185, %163 ], [ %63, %65 ], [ %135, %113 ], [ %103, %107 ], [ %103, %109 ], [ %153, %157 ], [ %153, %159 ]
+ %189 = phi i64 [ %64, %parse_ip.exit.i ], [ %165, %163 ], [ 47, %65 ], [ %115, %113 ], [ %104, %107 ], [ %104, %109 ], [ %154, %157 ], [ %154, %159 ]
+ %nhoff.7.i = phi i64 [ %nhoff.2.i, %parse_ip.exit.i ], [ %186, %163 ], [ %nhoff.2.i, %65 ], [ %136, %113 ], [ %108, %107 ], [ %112, %109 ], [ %158, %157 ], [ %162, %159 ]
+ %cond.i.i = icmp eq i64 %189, 51
+ %190 = select i1 %cond.i.i, i64 4, i64 0
+ %191 = add i64 %190, %nhoff.7.i
+ %192 = call i64 @llvm.bpf.load.word(i8* %1, i64 %191) #2
+ store i32 %188, i32* %key, align 4
+ %193 = bitcast i32* %key to i8*
+ %194 = call i8* inttoptr (i64 1 to i8* (i8*, i8*)*)(i8* bitcast (%struct.bpf_map_def* @hash_map to i8*), i8* %193) #2
+ %195 = icmp eq i8* %194, null
+ br i1 %195, label %199, label %196
+
+; <label>:196 ; preds = %187
+ %197 = bitcast i8* %194 to i64*
+ %198 = atomicrmw add i64* %197, i64 1 seq_cst
+ br label %flow_dissector.exit.thread
+
+; <label>:199 ; preds = %187
+ store i64 1, i64* %val, align 8
+ %200 = bitcast i64* %val to i8*
+ %201 = call i32 inttoptr (i64 2 to i32 (i8*, i8*, i8*, i64)*)(i8* bitcast (%struct.bpf_map_def* @hash_map to i8*), i8* %193, i8* %200, i64 0) #2
+ br label %flow_dissector.exit.thread
+
+flow_dissector.exit.thread: ; preds = %86, %12, %196, %199
+ ret i32 0
+; CHECK-LABEL: bpf_prog2:
+; CHECK: ldabs_h r0, r6.data + 12 # encoding: [0x28,0x00,0x00,0x00,0x0c,0x00,0x00,0x00]
+; CHECK: ldabs_h r0, r6.data + 16 # encoding: [0x28,0x00,0x00,0x00,0x10,0x00,0x00,0x00]
+; CHECK-NOT: implicit
+; CHECK: ld_64 r1
+; CHECK-NOT: ori
+; CHECK: call 1 # encoding: [0x85,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+; CHECK: call 2 # encoding: [0x85,0x00,0x00,0x00,0x02,0x00,0x00,0x00]
+}
+
+declare i64 @llvm.bpf.load.half(i8*, i64) #1
+
+declare i64 @llvm.bpf.load.word(i8*, i64) #1
+
+declare i64 @llvm.bpf.load.byte(i8*, i64) #1
diff --git a/llvm/test/CodeGen/BPF/struct_ret1.ll b/llvm/test/CodeGen/BPF/struct_ret1.ll
new file mode 100644
index 00000000000..1477c565cd9
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/struct_ret1.ll
@@ -0,0 +1,17 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: only integer returns
+
+%struct.S = type { i32, i32, i32 }
+
+@s = common global %struct.S zeroinitializer, align 4
+
+; Function Attrs: nounwind readonly uwtable
+define { i64, i32 } @bar(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) #0 {
+entry:
+ %retval.sroa.0.0.copyload = load i64* bitcast (%struct.S* @s to i64*), align 4
+ %retval.sroa.2.0.copyload = load i32* getelementptr inbounds (%struct.S* @s, i64 0, i32 2), align 4
+ %.fca.0.insert = insertvalue { i64, i32 } undef, i64 %retval.sroa.0.0.copyload, 0
+ %.fca.1.insert = insertvalue { i64, i32 } %.fca.0.insert, i32 %retval.sroa.2.0.copyload, 1
+ ret { i64, i32 } %.fca.1.insert
+}
diff --git a/llvm/test/CodeGen/BPF/struct_ret2.ll b/llvm/test/CodeGen/BPF/struct_ret2.ll
new file mode 100644
index 00000000000..90461205f7c
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/struct_ret2.ll
@@ -0,0 +1,12 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: only small returns
+
+; Function Attrs: nounwind uwtable
+define { i64, i32 } @foo(i32 %a, i32 %b, i32 %c) #0 {
+entry:
+ %call = tail call { i64, i32 } @bar(i32 %a, i32 %b, i32 %c, i32 1, i32 2) #3
+ ret { i64, i32 } %call
+}
+
+declare { i64, i32 } @bar(i32, i32, i32, i32, i32) #1
diff --git a/llvm/test/CodeGen/BPF/vararg1.ll b/llvm/test/CodeGen/BPF/vararg1.ll
new file mode 100644
index 00000000000..4a22db65e69
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/vararg1.ll
@@ -0,0 +1,9 @@
+; RUN: not llc -march=bpf < %s 2> %t1
+; RUN: FileCheck %s < %t1
+; CHECK: with VarArgs
+
+; Function Attrs: nounwind readnone uwtable
+define void @foo(i32 %a, ...) #0 {
+entry:
+ ret void
+}
OpenPOWER on IntegriCloud