summaryrefslogtreecommitdiffstats
path: root/clang
diff options
context:
space:
mode:
authorMikhail Maltsev <mikhail.maltsev@arm.com>2019-12-09 12:05:59 +0000
committerMikhail Maltsev <mikhail.maltsev@arm.com>2019-12-09 12:05:59 +0000
commit0d1490bf6a68d9397a6402c8f702e30f07adacf1 (patch)
tree0f7db4dee46916f7a5150ea8384e452d2db3e1a6 /clang
parentd6642ed1c867f97fdf951aac751c7854fbc7c51f (diff)
downloadbcm5719-llvm-0d1490bf6a68d9397a6402c8f702e30f07adacf1.tar.gz
bcm5719-llvm-0d1490bf6a68d9397a6402c8f702e30f07adacf1.zip
[ARM][MVE] Add complex vector intrinsics
Summary: This patch adds intrinsics for the following MVE instructions: * VCADD, VHCADD * VCMUL * VCMLA Each of the above 3 groups has a corresponding new LLVM IR intrinsic. Reviewers: simon_tatham, MarkMurrayARM, ostannard, dmgreen Reviewed By: MarkMurrayARM Subscribers: merge_guards_bot, kristof.beyls, hiraditya, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D71190
Diffstat (limited to 'clang')
-rw-r--r--clang/include/clang/Basic/arm_mve.td62
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c742
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/vcmlaq.c246
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/vcmulq.c373
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c281
5 files changed, 1704 insertions, 0 deletions
diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index 5fa9fc00820..19852702c1b 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -596,6 +596,68 @@ def vadciq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
(xval $pair, 0))>;
}
+multiclass VectorComplexAddPred<dag halving, dag angle> {
+ def "" : Intrinsic<Vector, (args Vector:$a, Vector:$b),
+ (IRInt<"vcaddq", [Vector]> halving, angle, $a, $b)>;
+ def _m : Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
+ Predicate:$pred),
+ (IRInt<"vcaddq_predicated", [Vector, Predicate]>
+ halving, angle, $inactive, $a, $b, $pred)>;
+ def _x : Intrinsic<Vector, (args Vector:$a, Vector:$b, Predicate:$pred),
+ (IRInt<"vcaddq_predicated", [Vector, Predicate]>
+ halving, angle, (undef Vector), $a, $b, $pred)>;
+}
+
+multiclass VectorComplexMulPred<dag angle> {
+ def "" : Intrinsic<Vector, (args Vector:$a, Vector:$b),
+ (IRInt<"vcmulq", [Vector]> angle, $a, $b)>;
+ def _m : Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
+ Predicate:$pred),
+ (IRInt<"vcmulq_predicated", [Vector, Predicate]> angle, $inactive, $a, $b,
+ $pred)>;
+ def _x : Intrinsic<Vector, (args Vector:$a, Vector:$b, Predicate:$pred),
+ (IRInt<"vcmulq_predicated", [Vector, Predicate]> angle, (undef Vector), $a,
+ $b, $pred)>;
+}
+
+multiclass VectorComplexMLAPred<dag angle> {
+ def "" : Intrinsic<Vector, (args Vector:$a, Vector:$b, Vector:$c),
+ (IRInt<"vcmlaq", [Vector]> angle, $a, $b, $c)>;
+ def _m : Intrinsic<Vector, (args Vector:$a, Vector:$b, Vector:$c,
+ Predicate:$pred),
+ (IRInt<"vcmlaq_predicated", [Vector, Predicate]> angle, $a, $b, $c, $pred)>;
+}
+
+multiclass VectorComplexAddAngle<dag halving> {
+ defm _rot90 : VectorComplexAddPred<halving, (u32 0)>;
+ defm _rot270 : VectorComplexAddPred<halving, (u32 1)>;
+}
+
+multiclass VectorComplexMulAngle {
+ defm "" : VectorComplexMulPred<(u32 0)>;
+ defm _rot90 : VectorComplexMulPred<(u32 1)>;
+ defm _rot180 : VectorComplexMulPred<(u32 2)>;
+ defm _rot270 : VectorComplexMulPred<(u32 3)>;
+}
+
+multiclass VectorComplexMLAAngle {
+ defm "" : VectorComplexMLAPred<(u32 0)>;
+ defm _rot90 : VectorComplexMLAPred<(u32 1)>;
+ defm _rot180 : VectorComplexMLAPred<(u32 2)>;
+ defm _rot270 : VectorComplexMLAPred<(u32 3)>;
+}
+
+let params = T.Usual in
+defm vcaddq : VectorComplexAddAngle<(u32 0)>;
+
+let params = T.Signed in
+defm vhcaddq : VectorComplexAddAngle<(u32 1)>;
+
+let params = T.Float in {
+defm vcmulq : VectorComplexMulAngle;
+defm vcmlaq : VectorComplexMLAAngle;
+}
+
foreach desttype = T.All in {
// We want a vreinterpretq between every pair of supported vector types
// _except_ that there shouldn't be one from a type to itself.
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c b/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c
new file mode 100644
index 00000000000..3e00384eeb6
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c
@@ -0,0 +1,742 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vcaddq_rot90_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vcaddq_rot90_u8(uint8x16_t a, uint8x16_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_u8(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vcaddq_rot90_u16(uint16x8_t a, uint16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_u16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vcaddq_rot90_u32(uint32x4_t a, uint32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_u32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vcaddq_rot90_s8(int8x16_t a, int8x16_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_s8(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vcaddq_rot90_s16(int16x8_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_s16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vcaddq_rot90_s32(int32x4_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_s32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcaddq_rot90_f16(float16x8_t a, float16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_f16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcaddq_rot90_f32(float32x4_t a, float32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90(a, b);
+#else
+ return vcaddq_rot90_f32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vcaddq_rot270_u8(uint8x16_t a, uint8x16_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_u8(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vcaddq_rot270_u16(uint16x8_t a, uint16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_u16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vcaddq_rot270_u32(uint32x4_t a, uint32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_u32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vcaddq_rot270_s8(int8x16_t a, int8x16_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_s8(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vcaddq_rot270_s16(int16x8_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_s16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vcaddq_rot270_s32(int32x4_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_s32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcaddq_rot270_f16(float16x8_t a, float16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_f16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcaddq_rot270_f32(float32x4_t a, float32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270(a, b);
+#else
+ return vcaddq_rot270_f32(a, b);
+#endif
+}
+
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vcaddq_rot90_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_u8(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcaddq_rot90_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_u16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcaddq_rot90_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_u32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_s8(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_s16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_s32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcaddq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_f16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcaddq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vcaddq_rot90_m_f32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vcaddq_rot270_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_u8(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcaddq_rot270_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_u16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcaddq_rot270_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_u32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_s8(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_s16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_s32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcaddq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_f16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcaddq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vcaddq_rot270_m_f32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vcaddq_rot90_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_u8(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcaddq_rot90_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_u16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcaddq_rot90_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_u32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_s8(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_s16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_s32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcaddq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_f16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot90_x_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcaddq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot90_x(a, b, p);
+#else
+ return vcaddq_rot90_x_f32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+uint8x16_t test_vcaddq_rot270_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_u8(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vcaddq_rot270_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_u16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vcaddq_rot270_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_u32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_s8(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_s16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_s32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcaddq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_f16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcaddq_rot270_x_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcaddq_rot270_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcaddq_rot270_x(a, b, p);
+#else
+ return vcaddq_rot270_x_f32(a, b, p);
+#endif
+}
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vcmlaq.c b/clang/test/CodeGen/arm-mve-intrinsics/vcmlaq.c
new file mode 100644
index 00000000000..f470a3bd188
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vcmlaq.c
@@ -0,0 +1,246 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vcmlaq_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmlaq_f16(float16x8_t a, float16x8_t b, float16x8_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq(a, b, c);
+#else
+ return vcmlaq_f16(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmlaq_f32(float32x4_t a, float32x4_t b, float32x4_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq(a, b, c);
+#else
+ return vcmlaq_f32(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot90_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmlaq_rot90_f16(float16x8_t a, float16x8_t b, float16x8_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot90(a, b, c);
+#else
+ return vcmlaq_rot90_f16(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot90_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmlaq_rot90_f32(float32x4_t a, float32x4_t b, float32x4_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot90(a, b, c);
+#else
+ return vcmlaq_rot90_f32(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot180_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 2, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmlaq_rot180_f16(float16x8_t a, float16x8_t b, float16x8_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot180(a, b, c);
+#else
+ return vcmlaq_rot180_f16(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot180_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 2, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmlaq_rot180_f32(float32x4_t a, float32x4_t b, float32x4_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot180(a, b, c);
+#else
+ return vcmlaq_rot180_f32(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot270_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 3, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmlaq_rot270_f16(float16x8_t a, float16x8_t b, float16x8_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot270(a, b, c);
+#else
+ return vcmlaq_rot270_f16(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot270_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 3, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmlaq_rot270_f32(float32x4_t a, float32x4_t b, float32x4_t c)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot270(a, b, c);
+#else
+ return vcmlaq_rot270_f32(a, b, c);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmlaq_m_f16(float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_m(a, b, c, p);
+#else
+ return vcmlaq_m_f16(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmlaq_m_f32(float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_m(a, b, c, p);
+#else
+ return vcmlaq_m_f32(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot90_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmlaq_rot90_m_f16(float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot90_m(a, b, c, p);
+#else
+ return vcmlaq_rot90_m_f16(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot90_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmlaq_rot90_m_f32(float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot90_m(a, b, c, p);
+#else
+ return vcmlaq_rot90_m_f32(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot180_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 2, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmlaq_rot180_m_f16(float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot180_m(a, b, c, p);
+#else
+ return vcmlaq_rot180_m_f16(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot180_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 2, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmlaq_rot180_m_f32(float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot180_m(a, b, c, p);
+#else
+ return vcmlaq_rot180_m_f32(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot270_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmlaq.predicated.v8f16.v8i1(i32 3, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmlaq_rot270_m_f16(float16x8_t a, float16x8_t b, float16x8_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot270_m(a, b, c, p);
+#else
+ return vcmlaq_rot270_m_f16(a, b, c, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmlaq_rot270_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmlaq.predicated.v4f32.v4i1(i32 3, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmlaq_rot270_m_f32(float32x4_t a, float32x4_t b, float32x4_t c, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmlaq_rot270_m(a, b, c, p);
+#else
+ return vcmlaq_rot270_m_f32(a, b, c, p);
+#endif
+}
+
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vcmulq.c b/clang/test/CodeGen/arm-mve-intrinsics/vcmulq.c
new file mode 100644
index 00000000000..08d6a606efc
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vcmulq.c
@@ -0,0 +1,373 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vcmulq_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmulq_f16(float16x8_t a, float16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq(a, b);
+#else
+ return vcmulq_f16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmulq_f32(float32x4_t a, float32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq(a, b);
+#else
+ return vcmulq_f32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot90_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmulq_rot90_f16(float16x8_t a, float16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot90(a, b);
+#else
+ return vcmulq_rot90_f16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot90_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmulq_rot90_f32(float32x4_t a, float32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot90(a, b);
+#else
+ return vcmulq_rot90_f32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot180_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmulq_rot180_f16(float16x8_t a, float16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot180(a, b);
+#else
+ return vcmulq_rot180_f16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot180_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 2, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmulq_rot180_f32(float32x4_t a, float32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot180(a, b);
+#else
+ return vcmulq_rot180_f32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot270_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT: ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vcmulq_rot270_f16(float16x8_t a, float16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot270(a, b);
+#else
+ return vcmulq_rot270_f16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot270_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 3, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vcmulq_rot270_f32(float32x4_t a, float32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot270(a, b);
+#else
+ return vcmulq_rot270_f32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_m(inactive, a, b, p);
+#else
+ return vcmulq_m_f16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_m(inactive, a, b, p);
+#else
+ return vcmulq_m_f32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot90_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_rot90_m(inactive, a, b, p);
+#else
+ return vcmulq_rot90_m_f16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot90_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_rot90_m(inactive, a, b, p);
+#else
+ return vcmulq_rot90_m_f32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot180_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_rot180_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_rot180_m(inactive, a, b, p);
+#else
+ return vcmulq_rot180_m_f16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot180_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 2, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_rot180_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_rot180_m(inactive, a, b, p);
+#else
+ return vcmulq_rot180_m_f32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot270_m_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_rot270_m(inactive, a, b, p);
+#else
+ return vcmulq_rot270_m_f16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot270_m_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 3, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef polymorphic
+ return vcmulq_rot270_m(inactive, a, b, p);
+#else
+ return vcmulq_rot270_m_f32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_x_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_x(a, b, p);
+#else
+ return vcmulq_x_f16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_x_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_x(a, b, p);
+#else
+ return vcmulq_x_f32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot90_x_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot90_x(a, b, p);
+#else
+ return vcmulq_rot90_x_f16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot90_x_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot90_x(a, b, p);
+#else
+ return vcmulq_rot90_x_f32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot180_x_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_rot180_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot180_x(a, b, p);
+#else
+ return vcmulq_rot180_x_f16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot180_x_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 2, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_rot180_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot180_x(a, b, p);
+#else
+ return vcmulq_rot180_x_f32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot270_x_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x half> [[TMP2]]
+//
+float16x8_t test_vcmulq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot270_x(a, b, p);
+#else
+ return vcmulq_rot270_x_f16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vcmulq_rot270_x_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 3, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x float> [[TMP2]]
+//
+float32x4_t test_vcmulq_rot270_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vcmulq_rot270_x(a, b, p);
+#else
+ return vcmulq_rot270_x_f32(a, b, p);
+#endif
+}
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c b/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c
new file mode 100644
index 00000000000..7da8db5f164
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c
@@ -0,0 +1,281 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vhcaddq_rot90_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vhcaddq_rot90_s8(int8x16_t a, int8x16_t b)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90(a, b);
+#else
+ return vhcaddq_rot90_s8(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vhcaddq_rot90_s16(int16x8_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90(a, b);
+#else
+ return vhcaddq_rot90_s16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vhcaddq_rot90_s32(int32x4_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90(a, b);
+#else
+ return vhcaddq_rot90_s32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vhcaddq_rot270_s8(int8x16_t a, int8x16_t b)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270(a, b);
+#else
+ return vhcaddq_rot270_s8(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vhcaddq_rot270_s16(int16x8_t a, int16x8_t b)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270(a, b);
+#else
+ return vhcaddq_rot270_s16(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vhcaddq_rot270_s32(int32x4_t a, int32x4_t b)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270(a, b);
+#else
+ return vhcaddq_rot270_s32(a, b);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_x_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vhcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90_x(a, b, p);
+#else
+ return vhcaddq_rot90_x_s8(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_x_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vhcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90_x(a, b, p);
+#else
+ return vhcaddq_rot90_x_s16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_x_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vhcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90_x(a, b, p);
+#else
+ return vhcaddq_rot90_x_s32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_x_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vhcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270_x(a, b, p);
+#else
+ return vhcaddq_rot270_x_s8(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_x_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vhcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270_x(a, b, p);
+#else
+ return vhcaddq_rot270_x_s16(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_x_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vhcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270_x(a, b, p);
+#else
+ return vhcaddq_rot270_x_s32(a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_m_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vhcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vhcaddq_rot90_m_s8(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vhcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vhcaddq_rot90_m_s16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot90_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vhcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot90_m(inactive, a, b, p);
+#else
+ return vhcaddq_rot90_m_s32(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_m_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+//
+int8x16_t test_vhcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vhcaddq_rot270_m_s8(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_m_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vhcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vhcaddq_rot270_m_s16(inactive, a, b, p);
+#endif
+}
+
+// CHECK-LABEL: @test_vhcaddq_rot270_m_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vhcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vhcaddq_rot270_m(inactive, a, b, p);
+#else
+ return vhcaddq_rot270_m_s32(inactive, a, b, p);
+#endif
+}
OpenPOWER on IntegriCloud