summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/aarch64-neon-extract.c
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-03-29 15:09:45 +0000
committerTim Northover <tnorthover@apple.com>2014-03-29 15:09:45 +0000
commita2ee433c8d99632419d4a13a66cc4d06eada4014 (patch)
treee6ab2db8facbc4c5ed2fb11df260db8138572ace /clang/test/CodeGen/aarch64-neon-extract.c
parentaf3698066a1ea2e5ab4cc08ae9a59620cf18adb7 (diff)
downloadbcm5719-llvm-a2ee433c8d99632419d4a13a66cc4d06eada4014.tar.gz
bcm5719-llvm-a2ee433c8d99632419d4a13a66cc4d06eada4014.zip
ARM64: initial clang support commit.
This adds Clang support for the ARM64 backend. There are definitely still some rough edges, so please bring up any issues you see with this patch. As with the LLVM commit though, we think it'll be more useful for merging with AArch64 from within the tree. llvm-svn: 205100
Diffstat (limited to 'clang/test/CodeGen/aarch64-neon-extract.c')
-rw-r--r--clang/test/CodeGen/aarch64-neon-extract.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/clang/test/CodeGen/aarch64-neon-extract.c b/clang/test/CodeGen/aarch64-neon-extract.c
index 6e2d9691c9c..77d574cf0e2 100644
--- a/clang/test/CodeGen/aarch64-neon-extract.c
+++ b/clang/test/CodeGen/aarch64-neon-extract.c
@@ -1,6 +1,9 @@
// REQUIRES: aarch64-registered-target
+// REQUIRES: arm64-registered-target
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +neon \
// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu \
+// RUN: -ffp-contract=fast -S -O3 -o - %s | FileCheck %s
// Test new aarch64 intrinsics and types
@@ -9,19 +12,19 @@
int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) {
// CHECK-LABEL: test_vext_s8
return vext_s8(a, b, 2);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?2}}
}
int16x4_t test_vext_s16(int16x4_t a, int16x4_t b) {
// CHECK-LABEL: test_vext_s16
return vext_s16(a, b, 3);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?6}}
}
int32x2_t test_vext_s32(int32x2_t a, int32x2_t b) {
// CHECK-LABEL: test_vext_s32
return vext_s32(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?4}}
}
int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) {
@@ -32,43 +35,43 @@ int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) {
int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) {
// CHECK-LABEL: test_vextq_s8
return vextq_s8(a, b, 2);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?2}}
}
int16x8_t test_vextq_s16(int16x8_t a, int16x8_t b) {
// CHECK-LABEL: test_vextq_s16
return vextq_s16(a, b, 3);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?6}}
}
int32x4_t test_vextq_s32(int32x4_t a, int32x4_t b) {
// CHECK-LABEL: test_vextq_s32
return vextq_s32(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?4}}
}
int64x2_t test_vextq_s64(int64x2_t a, int64x2_t b) {
// CHECK-LABEL: test_vextq_s64
return vextq_s64(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?8}}
}
uint8x8_t test_vext_u8(uint8x8_t a, uint8x8_t b) {
// CHECK-LABEL: test_vext_u8
return vext_u8(a, b, 2);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?2}}
}
uint16x4_t test_vext_u16(uint16x4_t a, uint16x4_t b) {
// CHECK-LABEL: test_vext_u16
return vext_u16(a, b, 3);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?6}}
}
uint32x2_t test_vext_u32(uint32x2_t a, uint32x2_t b) {
// CHECK-LABEL: test_vext_u32
return vext_u32(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?4}}
}
uint64x1_t test_vext_u64(uint64x1_t a, uint64x1_t b) {
@@ -79,31 +82,31 @@ uint64x1_t test_vext_u64(uint64x1_t a, uint64x1_t b) {
uint8x16_t test_vextq_u8(uint8x16_t a, uint8x16_t b) {
// CHECK-LABEL: test_vextq_u8
return vextq_u8(a, b, 2);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?2}}
}
uint16x8_t test_vextq_u16(uint16x8_t a, uint16x8_t b) {
// CHECK-LABEL: test_vextq_u16
return vextq_u16(a, b, 3);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?6}}
}
uint32x4_t test_vextq_u32(uint32x4_t a, uint32x4_t b) {
// CHECK-LABEL: test_vextq_u32
return vextq_u32(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?4}}
}
uint64x2_t test_vextq_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: test_vextq_u64
return vextq_u64(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?8}}
}
float32x2_t test_vext_f32(float32x2_t a, float32x2_t b) {
// CHECK-LABEL: test_vext_f32
return vext_f32(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?4}}
}
float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) {
@@ -114,35 +117,35 @@ float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) {
float32x4_t test_vextq_f32(float32x4_t a, float32x4_t b) {
// CHECK-LABEL: test_vextq_f32
return vextq_f32(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?4}}
}
float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) {
// CHECK-LABEL: test_vextq_f64
return vextq_f64(a, b, 1);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?8}}
}
poly8x8_t test_vext_p8(poly8x8_t a, poly8x8_t b) {
// CHECK-LABEL: test_vext_p8
return vext_p8(a, b, 2);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?2}}
}
poly16x4_t test_vext_p16(poly16x4_t a, poly16x4_t b) {
// CHECK-LABEL: test_vext_p16
return vext_p16(a, b, 3);
- // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+ // CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #{{(0x)?6}}
}
poly8x16_t test_vextq_p8(poly8x16_t a, poly8x16_t b) {
// CHECK-LABEL: test_vextq_p8
return vextq_p8(a, b, 2);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?2}}
}
poly16x8_t test_vextq_p16(poly16x8_t a, poly16x8_t b) {
// CHECK-LABEL: test_vextq_p16
return vextq_p16(a, b, 3);
- // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+ // CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #{{(0x)?6}}
}
OpenPOWER on IntegriCloud