summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/neon-across.ll476
-rw-r--r--llvm/test/MC/AArch64/neon-across.s101
-rw-r--r--llvm/test/MC/AArch64/neon-diagnostics.s163
3 files changed, 740 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/neon-across.ll b/llvm/test/CodeGen/AArch64/neon-across.ll
new file mode 100644
index 00000000000..733db970cf3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/neon-across.ll
@@ -0,0 +1,476 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float>)
+
+declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32>)
+
+declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8>)
+
+declare <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32>)
+
+declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8>)
+
+define i16 @test_vaddlv_s8(<8 x i8> %a) {
+; CHECK: test_vaddlv_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i16> %saddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_s16(<4 x i16> %a) {
+; CHECK: test_vaddlv_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i32> %saddlv.i, i32 0
+ ret i32 %0
+}
+
+define i16 @test_vaddlv_u8(<8 x i8> %a) {
+; CHECK: test_vaddlv_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i16> %uaddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_u16(<4 x i16> %a) {
+; CHECK: test_vaddlv_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i32> %uaddlv.i, i32 0
+ ret i32 %0
+}
+
+define i16 @test_vaddlvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i16> %saddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i32> %saddlv.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vaddlvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_s32:
+; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %saddlv.i = tail call <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i64> %saddlv.i, i32 0
+ ret i64 %0
+}
+
+define i16 @test_vaddlvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i16> %uaddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i32> %uaddlv.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vaddlvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_u32:
+; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uaddlv.i = tail call <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i64> %uaddlv.i, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vmaxv_s8(<8 x i8> %a) {
+; CHECK: test_vmaxv_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %smaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_s16(<4 x i16> %a) {
+; CHECK: test_vmaxv_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %smaxv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vmaxv_u8(<8 x i8> %a) {
+; CHECK: test_vmaxv_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %umaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_u16(<4 x i16> %a) {
+; CHECK: test_vmaxv_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %umaxv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vmaxvq_s8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %smaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_s16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %smaxv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_s32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_s32:
+; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %smaxv.i = tail call <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %smaxv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vmaxvq_u8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %umaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_u16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %umaxv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_u32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_u32:
+; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %umaxv.i = tail call <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %umaxv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vminv_s8(<8 x i8> %a) {
+; CHECK: test_vminv_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %sminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminv_s16(<4 x i16> %a) {
+; CHECK: test_vminv_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %sminv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vminv_u8(<8 x i8> %a) {
+; CHECK: test_vminv_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %uminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminv_u16(<4 x i16> %a) {
+; CHECK: test_vminv_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %uminv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vminvq_s8(<16 x i8> %a) {
+; CHECK: test_vminvq_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %sminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminvq_s16(<8 x i16> %a) {
+; CHECK: test_vminvq_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %sminv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vminvq_s32(<4 x i32> %a) {
+; CHECK: test_vminvq_s32:
+; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sminv.i = tail call <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %sminv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vminvq_u8(<16 x i8> %a) {
+; CHECK: test_vminvq_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %uminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminvq_u16(<8 x i16> %a) {
+; CHECK: test_vminvq_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %uminv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vminvq_u32(<4 x i32> %a) {
+; CHECK: test_vminvq_u32:
+; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uminv.i = tail call <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %uminv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vaddv_s8(<8 x i8> %a) {
+; CHECK: test_vaddv_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddv_s16(<4 x i16> %a) {
+; CHECK: test_vaddv_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vaddv_u8(<8 x i8> %a) {
+; CHECK: test_vaddv_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddv_u16(<4 x i16> %a) {
+; CHECK: test_vaddv_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vaddvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddvq_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddvq_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddvq_s32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %vaddv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vaddvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddvq_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddvq_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddvq_u32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %vaddv.i, i32 0
+ ret i32 %0
+}
+
+define float @test_vmaxvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxvq_f32:
+; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vmaxv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vmaxv.i, i32 0
+ ret float %0
+}
+
+define float @test_vminvq_f32(<4 x float> %a) {
+; CHECK: test_vminvq_f32:
+; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vminv.i = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vminv.i, i32 0
+ ret float %0
+}
+
+define float @test_vmaxnmvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxnmvq_f32:
+; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vmaxnmv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vmaxnmv.i, i32 0
+ ret float %0
+}
+
+define float @test_vminnmvq_f32(<4 x float> %a) {
+; CHECK: test_vminnmvq_f32:
+; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vminnmv.i = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vminnmv.i, i32 0
+ ret float %0
+}
+
diff --git a/llvm/test/MC/AArch64/neon-across.s b/llvm/test/MC/AArch64/neon-across.s
new file mode 100644
index 00000000000..8b1c2d421ba
--- /dev/null
+++ b/llvm/test/MC/AArch64/neon-across.s
@@ -0,0 +1,101 @@
+// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+
+// Check that the assembler can handle the documented syntax for AArch64
+
+//------------------------------------------------------------------------------
+// Instructions across vector registers
+//------------------------------------------------------------------------------
+
+ saddlv h0, v1.8b
+ saddlv h0, v1.16b
+ saddlv s0, v1.4h
+ saddlv s0, v1.8h
+ saddlv d0, v1.4s
+
+// CHECK: saddlv h0, v1.8b // encoding: [0x20,0x38,0x30,0x0e]
+// CHECK: saddlv h0, v1.16b // encoding: [0x20,0x38,0x30,0x4e]
+// CHECK: saddlv s0, v1.4h // encoding: [0x20,0x38,0x70,0x0e]
+// CHECK: saddlv s0, v1.8h // encoding: [0x20,0x38,0x70,0x4e]
+// CHECK: saddlv d0, v1.4s // encoding: [0x20,0x38,0xb0,0x4e]
+
+ uaddlv h0, v1.8b
+ uaddlv h0, v1.16b
+ uaddlv s0, v1.4h
+ uaddlv s0, v1.8h
+ uaddlv d0, v1.4s
+
+// CHECK: uaddlv h0, v1.8b // encoding: [0x20,0x38,0x30,0x2e]
+// CHECK: uaddlv h0, v1.16b // encoding: [0x20,0x38,0x30,0x6e]
+// CHECK: uaddlv s0, v1.4h // encoding: [0x20,0x38,0x70,0x2e]
+// CHECK: uaddlv s0, v1.8h // encoding: [0x20,0x38,0x70,0x6e]
+// CHECK: uaddlv d0, v1.4s // encoding: [0x20,0x38,0xb0,0x6e]
+
+ smaxv b0, v1.8b
+ smaxv b0, v1.16b
+ smaxv h0, v1.4h
+ smaxv h0, v1.8h
+ smaxv s0, v1.4s
+
+// CHECK: smaxv b0, v1.8b // encoding: [0x20,0xa8,0x30,0x0e]
+// CHECK: smaxv b0, v1.16b // encoding: [0x20,0xa8,0x30,0x4e]
+// CHECK: smaxv h0, v1.4h // encoding: [0x20,0xa8,0x70,0x0e]
+// CHECK: smaxv h0, v1.8h // encoding: [0x20,0xa8,0x70,0x4e]
+// CHECK: smaxv s0, v1.4s // encoding: [0x20,0xa8,0xb0,0x4e]
+
+ sminv b0, v1.8b
+ sminv b0, v1.16b
+ sminv h0, v1.4h
+ sminv h0, v1.8h
+ sminv s0, v1.4s
+
+// CHECK: sminv b0, v1.8b // encoding: [0x20,0xa8,0x31,0x0e]
+// CHECK: sminv b0, v1.16b // encoding: [0x20,0xa8,0x31,0x4e]
+// CHECK: sminv h0, v1.4h // encoding: [0x20,0xa8,0x71,0x0e]
+// CHECK: sminv h0, v1.8h // encoding: [0x20,0xa8,0x71,0x4e]
+// CHECK: sminv s0, v1.4s // encoding: [0x20,0xa8,0xb1,0x4e]
+
+ umaxv b0, v1.8b
+ umaxv b0, v1.16b
+ umaxv h0, v1.4h
+ umaxv h0, v1.8h
+ umaxv s0, v1.4s
+
+// CHECK: umaxv b0, v1.8b // encoding: [0x20,0xa8,0x30,0x2e]
+// CHECK: umaxv b0, v1.16b // encoding: [0x20,0xa8,0x30,0x6e]
+// CHECK: umaxv h0, v1.4h // encoding: [0x20,0xa8,0x70,0x2e]
+// CHECK: umaxv h0, v1.8h // encoding: [0x20,0xa8,0x70,0x6e]
+// CHECK: umaxv s0, v1.4s // encoding: [0x20,0xa8,0xb0,0x6e]
+
+ uminv b0, v1.8b
+ uminv b0, v1.16b
+ uminv h0, v1.4h
+ uminv h0, v1.8h
+ uminv s0, v1.4s
+
+// CHECK: uminv b0, v1.8b // encoding: [0x20,0xa8,0x31,0x2e]
+// CHECK: uminv b0, v1.16b // encoding: [0x20,0xa8,0x31,0x6e]
+// CHECK: uminv h0, v1.4h // encoding: [0x20,0xa8,0x71,0x2e]
+// CHECK: uminv h0, v1.8h // encoding: [0x20,0xa8,0x71,0x6e]
+// CHECK: uminv s0, v1.4s // encoding: [0x20,0xa8,0xb1,0x6e]
+
+ addv b0, v1.8b
+ addv b0, v1.16b
+ addv h0, v1.4h
+ addv h0, v1.8h
+ addv s0, v1.4s
+
+// CHECK: addv b0, v1.8b // encoding: [0x20,0xb8,0x31,0x0e]
+// CHECK: addv b0, v1.16b // encoding: [0x20,0xb8,0x31,0x4e]
+// CHECK: addv h0, v1.4h // encoding: [0x20,0xb8,0x71,0x0e]
+// CHECK: addv h0, v1.8h // encoding: [0x20,0xb8,0x71,0x4e]
+// CHECK: addv s0, v1.4s // encoding: [0x20,0xb8,0xb1,0x4e]
+
+ fmaxnmv s0, v1.4s
+ fminnmv s0, v1.4s
+ fmaxv s0, v1.4s
+ fminv s0, v1.4s
+
+// CHECK: fmaxnmv s0, v1.4s // encoding: [0x20,0xc8,0x30,0x6e]
+// CHECK: fminnmv s0, v1.4s // encoding: [0x20,0xc8,0xb0,0x6e]
+// CHECK: fmaxv s0, v1.4s // encoding: [0x20,0xf8,0x30,0x6e]
+// CHECK: fminv s0, v1.4s // encoding: [0x20,0xf8,0xb0,0x6e]
diff --git a/llvm/test/MC/AArch64/neon-diagnostics.s b/llvm/test/MC/AArch64/neon-diagnostics.s
index 211bc9aa5ca..a86796ff2aa 100644
--- a/llvm/test/MC/AArch64/neon-diagnostics.s
+++ b/llvm/test/MC/AArch64/neon-diagnostics.s
@@ -3608,3 +3608,166 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrdmulh v0.2d, v1.2d, v22.d[1]
// CHECK-ERROR: ^
+
+//----------------------------------------------------------------------
+// Across vectors
+//----------------------------------------------------------------------
+
+ saddlv b0, v1.8b
+ saddlv b0, v1.16b
+ saddlv h0, v1.4h
+ saddlv h0, v1.8h
+ saddlv s0, v1.2s
+ saddlv s0, v1.4s
+ saddlv d0, v1.2s
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv b0, v1.8b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv h0, v1.4h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv s0, v1.4s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv d0, v1.2s
+// CHECK-ERROR: ^
+
+ uaddlv b0, v1.8b
+ uaddlv b0, v1.16b
+ uaddlv h0, v1.4h
+ uaddlv h0, v1.8h
+ uaddlv s0, v1.2s
+ uaddlv s0, v1.4s
+ uaddlv d0, v1.2s
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv b0, v1.8b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv h0, v1.4h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv s0, v1.4s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv d0, v1.2s
+// CHECK-ERROR: ^
+
+ smaxv s0, v1.2s
+ sminv s0, v1.2s
+ umaxv s0, v1.2s
+ uminv s0, v1.2s
+ addv s0, v1.2s
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: smaxv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sminv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: umaxv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uminv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: addv s0, v1.2s
+// CHECK-ERROR: ^
+
+ smaxv d0, v1.2d
+ sminv d0, v1.2d
+ umaxv d0, v1.2d
+ uminv d0, v1.2d
+ addv d0, v1.2d
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: smaxv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sminv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: umaxv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uminv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: addv d0, v1.2d
+// CHECK-ERROR: ^
+
+ fmaxnmv b0, v1.16b
+ fminnmv b0, v1.16b
+ fmaxv b0, v1.16b
+ fminv b0, v1.16b
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxnmv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminnmv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminv b0, v1.16b
+// CHECK-ERROR: ^
+
+ fmaxnmv h0, v1.8h
+ fminnmv h0, v1.8h
+ fmaxv h0, v1.8h
+ fminv h0, v1.8h
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxnmv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminnmv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminv h0, v1.8h
+// CHECK-ERROR: ^
+
+ fmaxnmv d0, v1.2d
+ fminnmv d0, v1.2d
+ fmaxv d0, v1.2d
+ fminv d0, v1.2d
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxnmv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminnmv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminv d0, v1.2d
+// CHECK-ERROR: ^
+
OpenPOWER on IntegriCloud