summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
diff options
context:
space:
mode:
Diffstat (limited to 'clang/test/CodeGen/arm-mve-intrinsics/load-store.c')
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/load-store.c1325
1 files changed, 1325 insertions, 0 deletions
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
new file mode 100644
index 00000000000..5cbf6a3128c
--- /dev/null
+++ b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
@@ -0,0 +1,1325 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
+
+#include <arm_mve.h>
+
+// CHECK-LABEL: @test_vld1q_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, <8 x half>* [[TMP0]], align 2
+// CHECK-NEXT: ret <8 x half> [[TMP1]]
+//
+float16x8_t test_vld1q_f16(const float16_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_f16(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+// CHECK-NEXT: ret <4 x float> [[TMP1]]
+//
+float32x4_t test_vld1q_f32(const float32_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_f32(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+//
+int8x16_t test_vld1q_s8(const int8_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_s8(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_vld1q_s16(const int16_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_s16(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_vld1q_s32(const int32_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_s32(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+//
+uint8x16_t test_vld1q_u8(const uint8_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_u8(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_vld1q_u16(const uint16_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_u16(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_vld1q_u32(const uint32_t *base)
+{
+#ifdef POLYMORPHIC
+ return vld1q(base);
+#else /* POLYMORPHIC */
+ return vld1q_u32(base);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* [[TMP0]], i32 2, <8 x i1> [[TMP2]], <8 x half> zeroinitializer)
+// CHECK-NEXT: ret <8 x half> [[TMP3]]
+//
+float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_f16(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[TMP0]], i32 4, <4 x i1> [[TMP2]], <4 x float> zeroinitializer)
+// CHECK-NEXT: ret <4 x float> [[TMP3]]
+//
+float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_f32(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+//
+int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_s8(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+//
+int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_s16(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+//
+int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_s32(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+//
+uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_u8(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+//
+uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_u16(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vld1q_z_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+//
+uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ return vld1q_z(base, p);
+#else /* POLYMORPHIC */
+ return vld1q_z_u32(base, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vldrbq_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+//
+int8x16_t test_vldrbq_s8(const int8_t *base)
+{
+ return vldrbq_s8(base);
+}
+
+// CHECK-LABEL: @test_vldrbq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i8> [[TMP1]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+int16x8_t test_vldrbq_s16(const int8_t *base)
+{
+ return vldrbq_s16(base);
+}
+
+// CHECK-LABEL: @test_vldrbq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vldrbq_s32(const int8_t *base)
+{
+ return vldrbq_s32(base);
+}
+
+// CHECK-LABEL: @test_vldrbq_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret <16 x i8> [[TMP1]]
+//
+uint8x16_t test_vldrbq_u8(const uint8_t *base)
+{
+ return vldrbq_u8(base);
+}
+
+// CHECK-LABEL: @test_vldrbq_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = zext <8 x i8> [[TMP1]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+//
+uint16x8_t test_vldrbq_u16(const uint8_t *base)
+{
+ return vldrbq_u16(base);
+}
+
+// CHECK-LABEL: @test_vldrbq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vldrbq_u32(const uint8_t *base)
+{
+ return vldrbq_u32(base);
+}
+
+// CHECK-LABEL: @test_vldrbq_z_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+//
+int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
+{
+ return vldrbq_z_s8(base, p);
+}
+
+// CHECK-LABEL: @test_vldrbq_z_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* [[TMP0]], i32 1, <8 x i1> [[TMP2]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[TMP3]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP4]]
+//
+int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p)
+{
+ return vldrbq_z_s16(base, p);
+}
+
+// CHECK-LABEL: @test_vldrbq_z_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* [[TMP0]], i32 1, <4 x i1> [[TMP2]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i8> [[TMP3]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+//
+int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p)
+{
+ return vldrbq_z_s32(base, p);
+}
+
+// CHECK-LABEL: @test_vldrbq_z_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: ret <16 x i8> [[TMP3]]
+//
+uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
+{
+ return vldrbq_z_u8(base, p);
+}
+
+// CHECK-LABEL: @test_vldrbq_z_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* [[TMP0]], i32 1, <8 x i1> [[TMP2]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP4]]
+//
+uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p)
+{
+ return vldrbq_z_u16(base, p);
+}
+
+// CHECK-LABEL: @test_vldrbq_z_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* [[TMP0]], i32 1, <4 x i1> [[TMP2]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP3]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+//
+uint32x4_t test_vldrbq_z_u32(const uint8_t *base, mve_pred16_t p)
+{
+ return vldrbq_z_u32(base, p);
+}
+
+// CHECK-LABEL: @test_vldrhq_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, <8 x half>* [[TMP0]], align 2
+// CHECK-NEXT: ret <8 x half> [[TMP1]]
+//
+float16x8_t test_vldrhq_f16(const float16_t *base)
+{
+ return vldrhq_f16(base);
+}
+
+// CHECK-LABEL: @test_vldrhq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+//
+int16x8_t test_vldrhq_s16(const int16_t *base)
+{
+ return vldrhq_s16(base);
+}
+
+// CHECK-LABEL: @test_vldrhq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+int32x4_t test_vldrhq_s32(const int16_t *base)
+{
+ return vldrhq_s32(base);
+}
+
+// CHECK-LABEL: @test_vldrhq_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret <8 x i16> [[TMP1]]
+//
+uint16x8_t test_vldrhq_u16(const uint16_t *base)
+{
+ return vldrhq_u16(base);
+}
+
+// CHECK-LABEL: @test_vldrhq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+//
+uint32x4_t test_vldrhq_u32(const uint16_t *base)
+{
+ return vldrhq_u32(base);
+}
+
+// CHECK-LABEL: @test_vldrhq_z_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* [[TMP0]], i32 2, <8 x i1> [[TMP2]], <8 x half> zeroinitializer)
+// CHECK-NEXT: ret <8 x half> [[TMP3]]
+//
+float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
+{
+ return vldrhq_z_f16(base, p);
+}
+
+// CHECK-LABEL: @test_vldrhq_z_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+//
+int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
+{
+ return vldrhq_z_s16(base, p);
+}
+
+// CHECK-LABEL: @test_vldrhq_z_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TMP0]], i32 2, <4 x i1> [[TMP2]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[TMP3]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+//
+int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p)
+{
+ return vldrhq_z_s32(base, p);
+}
+
+// CHECK-LABEL: @test_vldrhq_z_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: ret <8 x i16> [[TMP3]]
+//
+uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
+{
+ return vldrhq_z_u16(base, p);
+}
+
+// CHECK-LABEL: @test_vldrhq_z_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TMP0]], i32 2, <4 x i1> [[TMP2]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i16> [[TMP3]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP4]]
+//
+uint32x4_t test_vldrhq_z_u32(const uint16_t *base, mve_pred16_t p)
+{
+ return vldrhq_z_u32(base, p);
+}
+
+// CHECK-LABEL: @test_vldrwq_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
+// CHECK-NEXT: ret <4 x float> [[TMP1]]
+//
+float32x4_t test_vldrwq_f32(const float32_t *base)
+{
+ return vldrwq_f32(base);
+}
+
+// CHECK-LABEL: @test_vldrwq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+//
+int32x4_t test_vldrwq_s32(const int32_t *base)
+{
+ return vldrwq_s32(base);
+}
+
+// CHECK-LABEL: @test_vldrwq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret <4 x i32> [[TMP1]]
+//
+uint32x4_t test_vldrwq_u32(const uint32_t *base)
+{
+ return vldrwq_u32(base);
+}
+
+// CHECK-LABEL: @test_vldrwq_z_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[TMP0]], i32 4, <4 x i1> [[TMP2]], <4 x float> zeroinitializer)
+// CHECK-NEXT: ret <4 x float> [[TMP3]]
+//
+float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
+{
+ return vldrwq_z_f32(base, p);
+}
+
+// CHECK-LABEL: @test_vldrwq_z_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+//
+int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
+{
+ return vldrwq_z_s32(base, p);
+}
+
+// CHECK-LABEL: @test_vldrwq_z_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: ret <4 x i32> [[TMP3]]
+//
+uint32x4_t test_vldrwq_z_u32(const uint32_t *base, mve_pred16_t p)
+{
+ return vldrwq_z_u32(base, p);
+}
+
+// CHECK-LABEL: @test_vst1q_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: store <8 x half> [[VALUE:%.*]], <8 x half>* [[TMP0]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vst1q_f16(float16_t *base, float16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_f16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: store <4 x float> [[VALUE:%.*]], <4 x float>* [[TMP0]], align 4
+// CHECK-NEXT: ret void
+//
+void test_vst1q_f32(float32_t *base, float32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_f32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: store <16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vst1q_s8(int8_t *base, int8x16_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_s8(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: store <8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vst1q_s16(int16_t *base, int16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_s16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: store <4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret void
+//
+void test_vst1q_s32(int32_t *base, int32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_s32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: store <16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vst1q_u8(uint8_t *base, uint8x16_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_u8(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: store <8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vst1q_u16(uint16_t *base, uint16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_u16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: store <4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret void
+//
+void test_vst1q_u32(uint32_t *base, uint32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vst1q(base, value);
+#else /* POLYMORPHIC */
+ vst1q_u32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> [[VALUE:%.*]], <8 x half>* [[TMP0]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_f16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> [[VALUE:%.*]], <4 x float>* [[TMP0]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_f32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_s8(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_s16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_s32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_u8(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_u16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vst1q_p_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vst1q_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vst1q_p(base, value, p);
+#else /* POLYMORPHIC */
+ vst1q_p_u32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: store <16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_s8(int8_t *base, int8x16_t value)
+{
+#ifdef POLYMORPHIC
+ vstrbq(base, value);
+#else /* POLYMORPHIC */
+ vstrbq_s8(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[TMP1]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_s16(int8_t *base, int16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vstrbq(base, value);
+#else /* POLYMORPHIC */
+ vstrbq_s16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[TMP1]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_s32(int8_t *base, int32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrbq(base, value);
+#else /* POLYMORPHIC */
+ vstrbq_s32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: store <16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_u8(uint8_t *base, uint8x16_t value)
+{
+#ifdef POLYMORPHIC
+ vstrbq(base, value);
+#else /* POLYMORPHIC */
+ vstrbq_u8(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[TMP1]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_u16(uint8_t *base, uint16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vstrbq(base, value);
+#else /* POLYMORPHIC */
+ vstrbq_u16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[TMP1]], align 1
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_u32(uint8_t *base, uint32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrbq(base, value);
+#else /* POLYMORPHIC */
+ vstrbq_u32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_p_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrbq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrbq_p_s8(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_p_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> [[TMP0]], <8 x i8>* [[TMP1]], i32 1, <8 x i1> [[TMP3]])
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrbq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrbq_p_s16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_p_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> [[TMP0]], <4 x i8>* [[TMP1]], i32 1, <4 x i1> [[TMP3]])
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrbq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrbq_p_s32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_p_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <16 x i8>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> [[VALUE:%.*]], <16 x i8>* [[TMP0]], i32 1, <16 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrbq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrbq_p_u8(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_p_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <8 x i8>*
+// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> [[TMP0]], <8 x i8>* [[TMP1]], i32 1, <8 x i1> [[TMP3]])
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrbq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrbq_p_u16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrbq_p_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[BASE:%.*]] to <4 x i8>*
+// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0v4i8(<4 x i8> [[TMP0]], <4 x i8>* [[TMP1]], i32 1, <4 x i1> [[TMP3]])
+// CHECK-NEXT: ret void
+//
+void test_vstrbq_p_u32(uint8_t *base, uint32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrbq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrbq_p_u32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: store <8 x half> [[VALUE:%.*]], <8 x half>* [[TMP0]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_f16(float16_t *base, float16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vstrhq(base, value);
+#else /* POLYMORPHIC */
+ vstrhq_f16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: store <8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_s16(int16_t *base, int16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vstrhq(base, value);
+#else /* POLYMORPHIC */
+ vstrhq_s16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: store <4 x i16> [[TMP0]], <4 x i16>* [[TMP1]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_s32(int16_t *base, int32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrhq(base, value);
+#else /* POLYMORPHIC */
+ vstrhq_s32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: store <8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_u16(uint16_t *base, uint16x8_t value)
+{
+#ifdef POLYMORPHIC
+ vstrhq(base, value);
+#else /* POLYMORPHIC */
+ vstrhq_u16(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: store <4 x i16> [[TMP0]], <4 x i16>* [[TMP1]], align 2
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_u32(uint16_t *base, uint32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrhq(base, value);
+#else /* POLYMORPHIC */
+ vstrhq_u32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_p_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <8 x half>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> [[VALUE:%.*]], <8 x half>* [[TMP0]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrhq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrhq_p_f16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_p_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrhq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrhq_p_s16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_p_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> [[TMP0]], <4 x i16>* [[TMP1]], i32 2, <4 x i1> [[TMP3]])
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrhq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrhq_p_s32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_p_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <8 x i16>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> [[VALUE:%.*]], <8 x i16>* [[TMP0]], i32 2, <8 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrhq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrhq_p_u16(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrhq_p_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
+// CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[BASE:%.*]] to <4 x i16>*
+// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0v4i16(<4 x i16> [[TMP0]], <4 x i16>* [[TMP1]], i32 2, <4 x i1> [[TMP3]])
+// CHECK-NEXT: ret void
+//
+void test_vstrhq_p_u32(uint16_t *base, uint32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrhq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrhq_p_u32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrwq_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: store <4 x float> [[VALUE:%.*]], <4 x float>* [[TMP0]], align 4
+// CHECK-NEXT: ret void
+//
+void test_vstrwq_f32(float32_t *base, float32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrwq(base, value);
+#else /* POLYMORPHIC */
+ vstrwq_f32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrwq_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: store <4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret void
+//
+void test_vstrwq_s32(int32_t *base, int32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrwq(base, value);
+#else /* POLYMORPHIC */
+ vstrwq_s32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrwq_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: store <4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], align 4
+// CHECK-NEXT: ret void
+//
+void test_vstrwq_u32(uint32_t *base, uint32x4_t value)
+{
+#ifdef POLYMORPHIC
+ vstrwq(base, value);
+#else /* POLYMORPHIC */
+ vstrwq_u32(base, value);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrwq_p_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <4 x float>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> [[VALUE:%.*]], <4 x float>* [[TMP0]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrwq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrwq_p_f32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrwq_p_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrwq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrwq_p_s32(base, value, p);
+#endif /* POLYMORPHIC */
+}
+
+// CHECK-LABEL: @test_vstrwq_p_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <4 x i32>*
+// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[VALUE:%.*]], <4 x i32>* [[TMP0]], i32 4, <4 x i1> [[TMP2]])
+// CHECK-NEXT: ret void
+//
+void test_vstrwq_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
+{
+#ifdef POLYMORPHIC
+ vstrwq_p(base, value, p);
+#else /* POLYMORPHIC */
+ vstrwq_p_u32(base, value, p);
+#endif /* POLYMORPHIC */
+}
OpenPOWER on IntegriCloud