summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/arm_neon_intrinsics.c
diff options
context:
space:
mode:
authorLuke Cheeseman <luke.cheeseman@arm.com>2015-06-12 15:52:39 +0000
committerLuke Cheeseman <luke.cheeseman@arm.com>2015-06-12 15:52:39 +0000
commit7f5571a129bd1c504fe2d7a5e3b5c7e3f6ebea66 (patch)
tree2f0a6d1f3304ca658548fff1eff1035a07f27a48 /clang/test/CodeGen/arm_neon_intrinsics.c
parentf8a5ce35a4e5141fad6737143b7d60abc01d1cb0 (diff)
downloadbcm5719-llvm-7f5571a129bd1c504fe2d7a5e3b5c7e3f6ebea66.tar.gz
bcm5719-llvm-7f5571a129bd1c504fe2d7a5e3b5c7e3f6ebea66.zip
This patch makes the NEON intrinsics vget_lane_f16, vgetq_lane_f16,
vset_lane_f16 and vsetq_lane_f16 available in AArch32. Differential Revision: http://reviews.llvm.org/D10388 llvm-svn: 239610
Diffstat (limited to 'clang/test/CodeGen/arm_neon_intrinsics.c')
-rw-r--r--clang/test/CodeGen/arm_neon_intrinsics.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/clang/test/CodeGen/arm_neon_intrinsics.c b/clang/test/CodeGen/arm_neon_intrinsics.c
index 756e3b43fc5..d92c32c476a 100644
--- a/clang/test/CodeGen/arm_neon_intrinsics.c
+++ b/clang/test/CodeGen/arm_neon_intrinsics.c
@@ -2399,6 +2399,12 @@ float32_t test_vget_lane_f32(float32x2_t a) {
return vget_lane_f32(a, 1);
}
+// CHECK-LABEL: test_vget_lane_f16
+// CHECK: vmov
+float32_t test_vget_lane_f16(float16x4_t a) {
+ return vget_lane_f16(a, 1);
+}
+
// CHECK-LABEL: test_vgetq_lane_u8
// CHECK: vmov
uint8_t test_vgetq_lane_u8(uint8x16_t a) {
@@ -2453,6 +2459,12 @@ float32_t test_vgetq_lane_f32(float32x4_t a) {
return vgetq_lane_f32(a, 3);
}
+// CHECK-LABEL: test_vgetq_lane_f16
+// CHECK: vmov
+float32_t test_vgetq_lane_f16(float16x8_t a) {
+ return vgetq_lane_f16(a, 3);
+}
+
// CHECK-LABEL: test_vget_lane_s64
// The optimizer is able to remove all moves now.
int64_t test_vget_lane_s64(int64x1_t a) {
@@ -9157,6 +9169,12 @@ float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) {
return vset_lane_f32(a, b, 1);
}
+// CHECK-LABEL: test_vset_lane_f16
+// CHECK: mov
+float16x4_t test_vset_lane_f16(float16_t *a, float16x4_t b) {
+ return vset_lane_f16(*a, b, 1);
+}
+
// CHECK-LABEL: test_vsetq_lane_u8
// CHECK: vmov
uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) {
@@ -9211,6 +9229,12 @@ float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) {
return vsetq_lane_f32(a, b, 3);
}
+// CHECK-LABEL: test_vsetq_lane_f16
+// CHECK: vmov
+float16x8_t test_vsetq_lane_f16(float16_t *a, float16x8_t b) {
+ return vsetq_lane_f16(*a, b, 3);
+}
+
// CHECK-LABEL: test_vset_lane_s64
// The optimizer is able to get rid of all moves now.
int64x1_t test_vset_lane_s64(int64_t a, int64x1_t b) {
OpenPOWER on IntegriCloud