diff options
| author | Eli Friedman <efriedma@codeaurora.org> | 2016-12-19 23:09:51 +0000 |
|---|---|---|
| committer | Eli Friedman <efriedma@codeaurora.org> | 2016-12-19 23:09:51 +0000 |
| commit | 1a9a887a290095e805bf1c1b8252296c00862b38 (patch) | |
| tree | c6d3bfa087fdade026e98c47fd11e4d34d7aa5ad /llvm/test/CodeGen | |
| parent | 8beac28564e9769efa9f473bafe6a5ff8d0e88b3 (diff) | |
| download | bcm5719-llvm-1a9a887a290095e805bf1c1b8252296c00862b38.tar.gz bcm5719-llvm-1a9a887a290095e805bf1c1b8252296c00862b38.zip | |
Add ARM support to update_llc_test_checks.py
Just the minimal support to get it working at the moment.
Includes checks for test/CodeGen/ARM/vzip.ll as an example.
Differential Revision: https://reviews.llvm.org/D27829
llvm-svn: 290144
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/ARM/vzip.ll | 98 |
1 files changed, 64 insertions, 34 deletions
diff --git a/llvm/test/CodeGen/ARM/vzip.ll b/llvm/test/CodeGen/ARM/vzip.ll index 10c15fa291d..0e3a10bfe57 100644 --- a/llvm/test/CodeGen/ARM/vzip.ll +++ b/llvm/test/CodeGen/ARM/vzip.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { @@ -20,11 +21,11 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_Qres: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] -; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] -; CHECK-NEXT: vzip.8 [[LDR0]], [[LDR1]] -; CHECK-NEXT: vmov r0, r1, [[LDR0]] -; CHECK-NEXT: vmov r2, r3, [[LDR1]] +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.8 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B @@ -52,11 +53,11 @@ define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vzipi16_Qres: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] -; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] -; CHECK-NEXT: vzip.16 [[LDR0]], [[LDR1]] -; CHECK-NEXT: vmov r0, r1, [[LDR0]] -; CHECK-NEXT: vmov r2, r3, [[LDR1]] +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.16 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B @@ -220,11 +221,11 @@ define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_undef_Qres: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] -; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] -; CHECK-NEXT: vzip.8 [[LDR0]], [[LDR1]] -; CHECK-NEXT: vmov r0, r1, [[LDR0]] -; CHECK-NEXT: vmov r2, r3, [[LDR1]] +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.8 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B @@ -266,9 +267,15 @@ define <32 x i8> @vzipQi8_undef_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { } define <8 x i16> @vzip_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) { +; CHECK-LABEL: vzip_lower_shufflemask_undef: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.16 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_lower_shufflemask_undef - ; CHECK: vzip %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %0 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 6, i32 3, i32 7> @@ -276,30 +283,45 @@ entry: } define <4 x i32> @vzip_lower_shufflemask_zeroed(<2 x i32>* %A) { +; CHECK-LABEL: vzip_lower_shufflemask_zeroed: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vdup.32 q9, d16[0] +; CHECK-NEXT: vzip.32 q8, q9 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_lower_shufflemask_zeroed - ; CHECK-NOT: vtrn - ; CHECK: vzip %tmp1 = load <2 x i32>, <2 x i32>* %A %0 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp1, <4 x i32> <i32 0, i32 0, i32 1, i32 0> ret <4 x i32> %0 } define <4 x i32> @vzip_lower_shufflemask_vuzp(<2 x i32>* %A) { +; CHECK-LABEL: vzip_lower_shufflemask_vuzp: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vdup.32 q9, d16[0] +; CHECK-NEXT: vzip.32 q8, q9 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_lower_shufflemask_vuzp - ; CHECK-NOT: vuzp - ; CHECK: vzip %tmp1 = load <2 x i32>, <2 x i32>* %A %0 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp1, <4 x i32> <i32 0, i32 2, i32 1, i32 0> ret <4 x i32> %0 } define void @vzip_undef_rev_shufflemask_vtrn(<2 x i32>* %A, <4 x i32>* %B) { +; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vorr q9, q8, q8 +; CHECK-NEXT: vzip.32 q8, q9 +; CHECK-NEXT: vext.32 q8, q8, q8, #2 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn - ; CHECK-NOT: vtrn - ; CHECK: vzip %tmp1 = load <2 x i32>, <2 x i32>* %A %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 0> store <4 x i32> %0, <4 x i32>* %B @@ -307,10 +329,16 @@ entry: } define void @vzip_vext_factor(<8 x i16>* %A, <4 x i16>* %B) { +; CHECK-LABEL: vzip_vext_factor: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vext.16 d16, d16, d17, #3 +; CHECK-NEXT: vext.16 d17, d16, d16, #1 +; CHECK-NEXT: vzip.16 d16, d17 +; CHECK-NEXT: vext.16 d16, d16, d16, #1 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_vext_factor - ; CHECK: vext.16 d16, d16, d17, #3 - ; CHECK: vzip %tmp1 = load <8 x i16>, <8 x i16>* %A %0 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 4, i32 5, i32 3> store <4 x i16> %0, <4 x i16>* %B @@ -318,12 +346,14 @@ entry: } define <8 x i8> @vdup_zip(i8* nocapture readonly %x, i8* nocapture readonly %y) { +; CHECK-LABEL: vdup_zip: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vld1.8 {d16[]}, [r1] +; CHECK-NEXT: vld1.8 {d17[]}, [r0] +; CHECK-NEXT: vzip.8 d17, d16 +; CHECK-NEXT: vmov r0, r1, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vdup_zip: - ; CHECK: vld1.8 - ; CHECK-NEXT: vld1.8 - ; CHECK-NEXT: vzip.8 - ; CHECK-NEXT: vmov r0, r1 %0 = load i8, i8* %x, align 1 %1 = insertelement <8 x i8> undef, i8 %0, i32 0 %lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 undef, i32 undef> |

