summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/arm_neon_intrinsics.c
diff options
context:
space:
mode:
authorIvan A. Kosarev <ikosarev@accesssoftek.com>2018-06-27 13:58:43 +0000
committerIvan A. Kosarev <ikosarev@accesssoftek.com>2018-06-27 13:58:43 +0000
commita9f484ac4a96dad51c37ab8ac6b4d1e8dfadf26b (patch)
treeae0a5a404e24cdf429948afce189f4eaacefe3d1 /clang/test/CodeGen/arm_neon_intrinsics.c
parent7231598fce4f89be34a93b328032d3ee3c7bae04 (diff)
downloadbcm5719-llvm-a9f484ac4a96dad51c37ab8ac6b4d1e8dfadf26b.tar.gz
bcm5719-llvm-a9f484ac4a96dad51c37ab8ac6b4d1e8dfadf26b.zip
[NEON] Support vldNq intrinsics in AArch32 (Clang part)
This patch reworks the support for dup NEON intrinsics as described in D48439. Differential Revision: https://reviews.llvm.org/D48440 llvm-svn: 335734
Diffstat (limited to 'clang/test/CodeGen/arm_neon_intrinsics.c')
-rw-r--r--clang/test/CodeGen/arm_neon_intrinsics.c315
1 files changed, 0 insertions, 315 deletions
diff --git a/clang/test/CodeGen/arm_neon_intrinsics.c b/clang/test/CodeGen/arm_neon_intrinsics.c
index 95ac3dca7a8..f6305062e8f 100644
--- a/clang/test/CodeGen/arm_neon_intrinsics.c
+++ b/clang/test/CodeGen/arm_neon_intrinsics.c
@@ -4732,111 +4732,6 @@ poly16x4x2_t test_vld2_p16(poly16_t const * a) {
return vld2_p16(a);
}
-// CHECK-LABEL: @test_vld2_dup_u8(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>
-uint8x8x2_t test_vld2_dup_u8(uint8_t const * a) {
- return vld2_dup_u8(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_u16(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x4x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
-uint16x4x2_t test_vld2_dup_u16(uint16_t const * a) {
- return vld2_dup_u16(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_u32(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x2x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>
-uint32x2x2_t test_vld2_dup_u32(uint32_t const * a) {
- return vld2_dup_u32(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_u64(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x1x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>
-uint64x1x2_t test_vld2_dup_u64(uint64_t const * a) {
- return vld2_dup_u64(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_s8(
-// CHECK: [[__RET:%.*]] = alloca %struct.int8x8x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>
-int8x8x2_t test_vld2_dup_s8(int8_t const * a) {
- return vld2_dup_s8(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_s16(
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x4x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x4x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
-int16x4x2_t test_vld2_dup_s16(int16_t const * a) {
- return vld2_dup_s16(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_s32(
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x2x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x2x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>
-int32x2x2_t test_vld2_dup_s32(int32_t const * a) {
- return vld2_dup_s32(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_s64(
-// CHECK: [[__RET:%.*]] = alloca %struct.int64x1x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x1x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>
-int64x1x2_t test_vld2_dup_s64(int64_t const * a) {
- return vld2_dup_s64(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_f16(
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
-float16x4x2_t test_vld2_dup_f16(float16_t const * a) {
- return vld2_dup_f16(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_f32(
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x2x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x2x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x float>, <2 x float>
-float32x2x2_t test_vld2_dup_f32(float32_t const * a) {
- return vld2_dup_f32(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_p8(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>
-poly8x8x2_t test_vld2_dup_p8(poly8_t const * a) {
- return vld2_dup_p8(a);
-}
-
-// CHECK-LABEL: @test_vld2_dup_p16(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x2_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x4x2_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>
-poly16x4x2_t test_vld2_dup_p16(poly16_t const * a) {
- return vld2_dup_p16(a);
-}
-
// CHECK-LABEL: @test_vld2q_lane_u16(
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16
@@ -5473,111 +5368,6 @@ poly16x4x3_t test_vld3_p16(poly16_t const * a) {
return vld3_p16(a);
}
-// CHECK-LABEL: @test_vld3_dup_u8(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x3_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>
-uint8x8x3_t test_vld3_dup_u8(uint8_t const * a) {
- return vld3_dup_u8(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_u16(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x4x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
-uint16x4x3_t test_vld3_dup_u16(uint16_t const * a) {
- return vld3_dup_u16(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_u32(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x2x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>
-uint32x2x3_t test_vld3_dup_u32(uint32_t const * a) {
- return vld3_dup_u32(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_u64(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x1x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>
-uint64x1x3_t test_vld3_dup_u64(uint64_t const * a) {
- return vld3_dup_u64(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_s8(
-// CHECK: [[__RET:%.*]] = alloca %struct.int8x8x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x3_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>
-int8x8x3_t test_vld3_dup_s8(int8_t const * a) {
- return vld3_dup_s8(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_s16(
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x4x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x4x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
-int16x4x3_t test_vld3_dup_s16(int16_t const * a) {
- return vld3_dup_s16(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_s32(
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x2x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x2x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>
-int32x2x3_t test_vld3_dup_s32(int32_t const * a) {
- return vld3_dup_s32(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_s64(
-// CHECK: [[__RET:%.*]] = alloca %struct.int64x1x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x1x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>
-int64x1x3_t test_vld3_dup_s64(int64_t const * a) {
- return vld3_dup_s64(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_f16(
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
-float16x4x3_t test_vld3_dup_f16(float16_t const * a) {
- return vld3_dup_f16(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_f32(
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x2x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x2x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x float>, <2 x float>, <2 x float>
-float32x2x3_t test_vld3_dup_f32(float32_t const * a) {
- return vld3_dup_f32(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_p8(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x3_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>
-poly8x8x3_t test_vld3_dup_p8(poly8_t const * a) {
- return vld3_dup_p8(a);
-}
-
-// CHECK-LABEL: @test_vld3_dup_p16(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x3_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x4x3_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>
-poly16x4x3_t test_vld3_dup_p16(poly16_t const * a) {
- return vld3_dup_p16(a);
-}
-
// CHECK-LABEL: @test_vld3q_lane_u16(
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16
@@ -6293,111 +6083,6 @@ poly16x4x4_t test_vld4_p16(poly16_t const * a) {
return vld4_p16(a);
}
-// CHECK-LABEL: @test_vld4_dup_u8(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x4_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>
-uint8x8x4_t test_vld4_dup_u8(uint8_t const * a) {
- return vld4_dup_u8(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_u16(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint16x4x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
-uint16x4x4_t test_vld4_dup_u16(uint16_t const * a) {
- return vld4_dup_u16(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_u32(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint32x2x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>
-uint32x2x4_t test_vld4_dup_u32(uint32_t const * a) {
- return vld4_dup_u32(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_u64(
-// CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x1x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>
-uint64x1x4_t test_vld4_dup_u64(uint64_t const * a) {
- return vld4_dup_u64(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_s8(
-// CHECK: [[__RET:%.*]] = alloca %struct.int8x8x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x4_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>
-int8x8x4_t test_vld4_dup_s8(int8_t const * a) {
- return vld4_dup_s8(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_s16(
-// CHECK: [[__RET:%.*]] = alloca %struct.int16x4x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int16x4x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
-int16x4x4_t test_vld4_dup_s16(int16_t const * a) {
- return vld4_dup_s16(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_s32(
-// CHECK: [[__RET:%.*]] = alloca %struct.int32x2x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int32x2x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i32* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>
-int32x2x4_t test_vld4_dup_s32(int32_t const * a) {
- return vld4_dup_s32(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_s64(
-// CHECK: [[__RET:%.*]] = alloca %struct.int64x1x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x1x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i64* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>
-int64x1x4_t test_vld4_dup_s64(int64_t const * a) {
- return vld4_dup_s64(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_f16(
-// CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.float16x4x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast half* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
-float16x4x4_t test_vld4_dup_f16(float16_t const * a) {
- return vld4_dup_f16(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_f32(
-// CHECK: [[__RET:%.*]] = alloca %struct.float32x2x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.float32x2x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast float* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <2 x float>, <2 x float>, <2 x float>, <2 x float>
-float32x2x4_t test_vld4_dup_f32(float32_t const * a) {
- return vld4_dup_f32(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_p8(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x4_t* [[__RET]] to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>
-poly8x8x4_t test_vld4_dup_p8(poly8_t const * a) {
- return vld4_dup_p8(a);
-}
-
-// CHECK-LABEL: @test_vld4_dup_p16(
-// CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x4_t, align 8
-// CHECK: [[TMP0:%.*]] = bitcast %struct.poly16x4x4_t* [[__RET]] to i8*
-// CHECK: [[TMP1:%.*]] = bitcast i16* %a to i8*
-// CHECK: [[VLD_DUP:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>
-poly16x4x4_t test_vld4_dup_p16(poly16_t const * a) {
- return vld4_dup_p16(a);
-}
-
// CHECK-LABEL: @test_vld4q_lane_u16(
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16
OpenPOWER on IntegriCloud