diff options
| author | Ahmed Bougacha <ahmed.bougacha@gmail.com> | 2015-03-10 20:45:38 +0000 |
|---|---|---|
| committer | Ahmed Bougacha <ahmed.bougacha@gmail.com> | 2015-03-10 20:45:38 +0000 |
| commit | fab5892f8b762a83d151976db4666895e5e4198b (patch) | |
| tree | dfa567ccee7be507bb58fd24a55c5028292e48dd /llvm/test | |
| parent | e6cdf34116305bae21caeff1738625ce375bc196 (diff) | |
| download | bcm5719-llvm-fab5892f8b762a83d151976db4666895e5e4198b.tar.gz bcm5719-llvm-fab5892f8b762a83d151976db4666895e5e4198b.zip | |
[AArch64] Avoid going through GPRs for across-vector instructions.
This adds new node types for each intrinsic.
For instance, for addv, we have AArch64ISD::UADDV, such that:
(v4i32 (uaddv ...))
is the same as
(v4i32 (scalar_to_vector (i32 (int_aarch64_neon_uaddv ...))))
that is,
(v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
(i32 (int_aarch64_neon_uaddv ...)), ssub)
In a combine, we transform all such across-vector-lanes intrinsics to:
(i32 (extract_vector_elt (uaddv ...), 0))
This has one big advantage: by making the extract_element explicit, we
enable the existing patterns for lane-aware instructions to fire.
This lets us avoid needlessly going through the GPRs. Consider:
uint32x4_t test_mul(uint32x4_t a, uint32x4_t b) {
return vmulq_n_u32(a, vaddvq_u32(b));
}
We now generate:
addv.4s s1, v1
mul.4s v0, v0, v1[0]
instead of the previous:
addv.4s s1, v1
fmov w8, s1
dup.4s v1, w8
mul.4s v0, v1, v0
rdar://20044838
llvm-svn: 231840
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-smaxv.ll | 72 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-sminv.ll | 72 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-umaxv.ll | 74 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-uminv.ll | 73 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-vaddv.ll | 164 |
5 files changed, 450 insertions, 5 deletions
diff --git a/llvm/test/CodeGen/AArch64/arm64-smaxv.ll b/llvm/test/CodeGen/AArch64/arm64-smaxv.ll index 183e667643c..8cc4502f6ca 100644 --- a/llvm/test/CodeGen/AArch64/arm64-smaxv.ll +++ b/llvm/test/CodeGen/AArch64/arm64-smaxv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s define signext i8 @test_vmaxv_s8(<8 x i8> %a1) { ; CHECK: test_vmaxv_s8 @@ -65,6 +65,76 @@ entry: ret i32 %vmaxv.i } +define <8 x i8> @test_vmaxv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_vmaxv_s8_used_by_laneop: +; CHECK: smaxv.8b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + +define <4 x i16> @test_vmaxv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { +; CHECK-LABEL: test_vmaxv_s16_used_by_laneop: +; CHECK: smaxv.4h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + +define <2 x i32> @test_vmaxv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_vmaxv_s32_used_by_laneop: +; CHECK: smaxp.2s v[[REGNUM:[0-9]+]], v1, v1 +; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + +define <16 x i8> @test_vmaxvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_vmaxvq_s8_used_by_laneop: +; CHECK: smaxv.16b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + +define <8 x i16> @test_vmaxvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_vmaxvq_s16_used_by_laneop: +; CHECK: smaxv.8h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + +define <4 x i32> @test_vmaxvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_vmaxvq_s32_used_by_laneop: +; CHECK: smaxv.4s s[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>) diff --git a/llvm/test/CodeGen/AArch64/arm64-sminv.ll b/llvm/test/CodeGen/AArch64/arm64-sminv.ll index 195c4e59dc4..c1650b5fb29 100644 --- a/llvm/test/CodeGen/AArch64/arm64-sminv.ll +++ b/llvm/test/CodeGen/AArch64/arm64-sminv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s define signext i8 @test_vminv_s8(<8 x i8> %a1) { ; CHECK: test_vminv_s8 @@ -65,6 +65,76 @@ entry: ret i32 %vminv.i } +define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_vminv_s8_used_by_laneop: +; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + +define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { +; CHECK-LABEL: test_vminv_s16_used_by_laneop: +; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + +define <2 x i32> @test_vminv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_vminv_s32_used_by_laneop: +; CHECK: sminp.2s v[[REGNUM:[0-9]+]], v1, v1 +; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + +define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_vminvq_s8_used_by_laneop: +; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + +define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_vminvq_s16_used_by_laneop: +; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + +define <4 x i32> @test_vminvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_vminvq_s32_used_by_laneop: +; CHECK: sminv.4s s[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>) diff --git a/llvm/test/CodeGen/AArch64/arm64-umaxv.ll b/llvm/test/CodeGen/AArch64/arm64-umaxv.ll index d523f317d08..a77f228cb15 100644 --- a/llvm/test/CodeGen/AArch64/arm64-umaxv.ll +++ b/llvm/test/CodeGen/AArch64/arm64-umaxv.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: vmax_u8x8: @@ -86,7 +86,79 @@ return: ret i32 %retval.0 } +define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_vmaxv_u8_used_by_laneop: +; CHECK: umaxv.8b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + +define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { +; CHECK-LABEL: test_vmaxv_u16_used_by_laneop: +; CHECK: umaxv.4h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + +define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_vmaxv_u32_used_by_laneop: +; CHECK: umaxp.2s v[[REGNUM:[0-9]+]], v1, v1 +; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + +define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_vmaxvq_u8_used_by_laneop: +; CHECK: umaxv.16b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + +define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_vmaxvq_u16_used_by_laneop: +; CHECK: umaxv.8h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + +define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_vmaxvq_u32_used_by_laneop: +; CHECK: umaxv.4s s[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) nounwind readnone declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>) nounwind readnone declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>) nounwind readnone declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) nounwind readnone +declare i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32>) nounwind readnone +declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>) nounwind readnone diff --git a/llvm/test/CodeGen/AArch64/arm64-uminv.ll b/llvm/test/CodeGen/AArch64/arm64-uminv.ll index 3bade4b28b8..2181db46ea9 100644 --- a/llvm/test/CodeGen/AArch64/arm64-uminv.ll +++ b/llvm/test/CodeGen/AArch64/arm64-uminv.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: vmin_u8x8: @@ -86,7 +86,78 @@ return: ret i32 %retval.0 } +define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_vminv_u8_used_by_laneop: +; CHECK: uminv.8b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + +define <4 x i16> @test_vminv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { +; CHECK-LABEL: test_vminv_u16_used_by_laneop: +; CHECK: uminv.4h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + +define <2 x i32> @test_vminv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_vminv_u32_used_by_laneop: +; CHECK: uminp.2s v[[REGNUM:[0-9]+]], v1, v1 +; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + +define <16 x i8> @test_vminvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_vminvq_u8_used_by_laneop: +; CHECK: uminv.16b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + +define <8 x i16> @test_vminvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_vminvq_u16_used_by_laneop: +; CHECK: uminv.8h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + +define <4 x i32> @test_vminvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_vminvq_u32_used_by_laneop: +; CHECK: uminv.4s s[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) nounwind readnone declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>) nounwind readnone declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>) nounwind readnone declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) nounwind readnone +declare i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32>) nounwind readnone +declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>) nounwind readnone diff --git a/llvm/test/CodeGen/AArch64/arm64-vaddv.ll b/llvm/test/CodeGen/AArch64/arm64-vaddv.ll index 2d92ce6ea57..589319bb322 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vaddv.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vaddv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s +; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -asm-verbose=false -mcpu=cyclone | FileCheck %s define signext i8 @test_vaddv_s8(<8 x i8> %a1) { ; CHECK-LABEL: test_vaddv_s8: @@ -11,6 +11,18 @@ entry: ret i8 %0 } +define <8 x i8> @test_vaddv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_vaddv_s8_used_by_laneop: +; CHECK: addv.8b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + define signext i16 @test_vaddv_s16(<4 x i16> %a1) { ; CHECK-LABEL: test_vaddv_s16: ; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0 @@ -22,6 +34,18 @@ entry: ret i16 %0 } +define <4 x i16> @test_vaddv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { +; CHECK-LABEL: test_vaddv_s16_used_by_laneop: +; CHECK: addv.4h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + define i32 @test_vaddv_s32(<2 x i32> %a1) { ; CHECK-LABEL: test_vaddv_s32: ; 2 x i32 is not supported by the ISA, thus, this is a special case @@ -33,6 +57,17 @@ entry: ret i32 %vaddv.i } +define <2 x i32> @test_vaddv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_vaddv_s32_used_by_laneop: +; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1 +; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + define i64 @test_vaddv_s64(<2 x i64> %a1) { ; CHECK-LABEL: test_vaddv_s64: ; CHECK: addp.2d [[REGNUM:d[0-9]+]], v0 @@ -43,6 +78,17 @@ entry: ret i64 %vaddv.i } +define <2 x i64> @test_vaddv_s64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) { +; CHECK-LABEL: test_vaddv_s64_used_by_laneop: +; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.d v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a2) + %1 = insertelement <2 x i64> %a1, i64 %0, i64 1 + ret <2 x i64> %1 +} + define zeroext i8 @test_vaddv_u8(<8 x i8> %a1) { ; CHECK-LABEL: test_vaddv_u8: ; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0 @@ -54,6 +100,18 @@ entry: ret i8 %0 } +define <8 x i8> @test_vaddv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { +; CHECK-LABEL: test_vaddv_u8_used_by_laneop: +; CHECK: addv.8b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 + ret <8 x i8> %2 +} + define i32 @test_vaddv_u8_masked(<8 x i8> %a1) { ; CHECK-LABEL: test_vaddv_u8_masked: ; CHECK: addv.8b b[[REGNUM:[0-9]+]], v0 @@ -76,6 +134,18 @@ entry: ret i16 %0 } +define <4 x i16> @test_vaddv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { +; CHECK-LABEL: test_vaddv_u16_used_by_laneop: +; CHECK: addv.4h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 + ret <4 x i16> %2 +} + define i32 @test_vaddv_u16_masked(<4 x i16> %a1) { ; CHECK-LABEL: test_vaddv_u16_masked: ; CHECK: addv.4h h[[REGNUM:[0-9]+]], v0 @@ -98,6 +168,17 @@ entry: ret i32 %vaddv.i } +define <2 x i32> @test_vaddv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { +; CHECK-LABEL: test_vaddv_u32_used_by_laneop: +; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1 +; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a2) + %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 + ret <2 x i32> %1 +} + define float @test_vaddv_f32(<2 x float> %a1) { ; CHECK-LABEL: test_vaddv_f32: ; CHECK: faddp.2s s0, v0 @@ -136,6 +217,17 @@ entry: ret i64 %vaddv.i } +define <2 x i64> @test_vaddv_u64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) { +; CHECK-LABEL: test_vaddv_u64_used_by_laneop: +; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.d v0[1], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a2) + %1 = insertelement <2 x i64> %a1, i64 %0, i64 1 + ret <2 x i64> %1 +} + define <1 x i64> @test_vaddv_u64_to_vec(<2 x i64> %a1) { ; CHECK-LABEL: test_vaddv_u64_to_vec: ; CHECK: addp.2d d0, v0 @@ -159,6 +251,18 @@ entry: ret i8 %0 } +define <16 x i8> @test_vaddvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_vaddvq_s8_used_by_laneop: +; CHECK: addv.16b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + define signext i16 @test_vaddvq_s16(<8 x i16> %a1) { ; CHECK-LABEL: test_vaddvq_s16: ; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0 @@ -170,6 +274,18 @@ entry: ret i16 %0 } +define <8 x i16> @test_vaddvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_vaddvq_s16_used_by_laneop: +; CHECK: addv.8h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + define i32 @test_vaddvq_s32(<4 x i32> %a1) { ; CHECK-LABEL: test_vaddvq_s32: ; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0 @@ -180,6 +296,17 @@ entry: ret i32 %vaddv.i } +define <4 x i32> @test_vaddvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_vaddvq_s32_used_by_laneop: +; CHECK: addv.4s s[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + define zeroext i8 @test_vaddvq_u8(<16 x i8> %a1) { ; CHECK-LABEL: test_vaddvq_u8: ; CHECK: addv.16b b[[REGNUM:[0-9]+]], v0 @@ -191,6 +318,18 @@ entry: ret i8 %0 } +define <16 x i8> @test_vaddvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { +; CHECK-LABEL: test_vaddvq_u8_used_by_laneop: +; CHECK: addv.16b b[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a2) + %1 = trunc i32 %0 to i8 + %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 + ret <16 x i8> %2 +} + define zeroext i16 @test_vaddvq_u16(<8 x i16> %a1) { ; CHECK-LABEL: test_vaddvq_u16: ; CHECK: addv.8h h[[REGNUM:[0-9]+]], v0 @@ -202,6 +341,18 @@ entry: ret i16 %0 } +define <8 x i16> @test_vaddvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { +; CHECK-LABEL: test_vaddvq_u16_used_by_laneop: +; CHECK: addv.8h h[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a2) + %1 = trunc i32 %0 to i16 + %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 + ret <8 x i16> %2 +} + define i32 @test_vaddvq_u32(<4 x i32> %a1) { ; CHECK-LABEL: test_vaddvq_u32: ; CHECK: addv.4s [[REGNUM:s[0-9]+]], v0 @@ -212,6 +363,17 @@ entry: ret i32 %vaddv.i } +define <4 x i32> @test_vaddvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: test_vaddvq_u32_used_by_laneop: +; CHECK: addv.4s s[[REGNUM:[0-9]+]], v1 +; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a2) + %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 + ret <4 x i32> %1 +} + declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16>) |

