diff options
author | Craig Topper <craig.topper@intel.com> | 2019-07-10 17:11:23 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@intel.com> | 2019-07-10 17:11:23 +0000 |
commit | f9cb127ca92fddeca203ce01592f10e9a53ef077 (patch) | |
tree | c0b88f0b59f1af54a8bca808c6e6b8051b3032d6 /clang/test/CodeGen/tbm-builtins.c | |
parent | ab5a30ac9dcec8c92d8e964b0701c6f164e3be88 (diff) | |
download | bcm5719-llvm-f9cb127ca92fddeca203ce01592f10e9a53ef077.tar.gz bcm5719-llvm-f9cb127ca92fddeca203ce01592f10e9a53ef077.zip |
[X86] Add guards to some of the x86 intrinsic tests to skip 64-bit mode only intrinsics when compiled for 32-bit mode.
All the command lines are for 64-bit mode, but sometimes I compile
the tests in 32-bit mode to see what assembly we get and we need
to skip these to do that.
llvm-svn: 365668
Diffstat (limited to 'clang/test/CodeGen/tbm-builtins.c')
-rw-r--r-- | clang/test/CodeGen/tbm-builtins.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/clang/test/CodeGen/tbm-builtins.c b/clang/test/CodeGen/tbm-builtins.c index 136a1d41c4e..ad4247be9a4 100644 --- a/clang/test/CodeGen/tbm-builtins.c +++ b/clang/test/CodeGen/tbm-builtins.c @@ -10,6 +10,7 @@ unsigned int test__bextri_u32(unsigned int a) { return __bextri_u32(a, 1); } +#ifdef __x86_64__ unsigned long long test__bextri_u64(unsigned long long a) { // CHECK-LABEL: test__bextri_u64 // CHECK: call i64 @llvm.x86.tbm.bextri.u64(i64 %{{.*}}, i64 2) @@ -21,6 +22,7 @@ unsigned long long test__bextri_u64_bigint(unsigned long long a) { // CHECK: call i64 @llvm.x86.tbm.bextri.u64(i64 %{{.*}}, i64 549755813887) return __bextri_u64(a, 0x7fffffffffLL); } +#endif unsigned int test__blcfill_u32(unsigned int a) { // CHECK-LABEL: test__blcfill_u32 @@ -29,12 +31,14 @@ unsigned int test__blcfill_u32(unsigned int a) { return __blcfill_u32(a); } +#ifdef __x86_64__ unsigned long long test__blcfill_u64(unsigned long long a) { // CHECK-LABEL: test__blcfill_u64 // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1 // CHECK: %{{.*}} = and i64 %{{.*}}, [[TMP]] return __blcfill_u64(a); } +#endif unsigned int test__blci_u32(unsigned int a) { // CHECK-LABEL: test__blci_u32 @@ -44,6 +48,7 @@ unsigned int test__blci_u32(unsigned int a) { return __blci_u32(a); } +#ifdef __x86_64__ unsigned long long test__blci_u64(unsigned long long a) { // CHECK-LABEL: test__blci_u64 // CHECK: [[TMP1:%.*]] = add i64 %{{.*}}, 1 @@ -51,6 +56,7 @@ unsigned long long test__blci_u64(unsigned long long a) { // CHECK: %{{.*}} = or i64 %{{.*}}, [[TMP2]] return __blci_u64(a); } +#endif unsigned int test__blcic_u32(unsigned int a) { // CHECK-LABEL: test__blcic_u32 @@ -60,6 +66,7 @@ unsigned int test__blcic_u32(unsigned int a) { return __blcic_u32(a); } +#ifdef __x86_64__ unsigned long long test__blcic_u64(unsigned long long a) { // CHECK-LABEL: test__blcic_u64 // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 @@ -67,6 +74,7 @@ unsigned long long test__blcic_u64(unsigned long long a) { // CHECK-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]] return __blcic_u64(a); } +#endif unsigned int test__blcmsk_u32(unsigned int a) { // CHECK-LABEL: test__blcmsk_u32 @@ -75,12 +83,14 @@ unsigned int test__blcmsk_u32(unsigned int a) { return __blcmsk_u32(a); } +#ifdef __x86_64__ unsigned long long test__blcmsk_u64(unsigned long long a) { // CHECK-LABEL: test__blcmsk_u64 // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1 // CHECK-NEXT: {{.*}} = xor i64 %{{.*}}, [[TMP]] return __blcmsk_u64(a); } +#endif unsigned int test__blcs_u32(unsigned int a) { // CHECK-LABEL: test__blcs_u32 @@ -89,12 +99,14 @@ unsigned int test__blcs_u32(unsigned int a) { return __blcs_u32(a); } +#ifdef __x86_64__ unsigned long long test__blcs_u64(unsigned long long a) { // CHECK-LABEL: test__blcs_u64 // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1 // CHECK-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]] return __blcs_u64(a); } +#endif unsigned int test__blsfill_u32(unsigned int a) { // CHECK-LABEL: test__blsfill_u32 @@ -103,12 +115,14 @@ unsigned int test__blsfill_u32(unsigned int a) { return __blsfill_u32(a); } +#ifdef __x86_64__ unsigned long long test__blsfill_u64(unsigned long long a) { // CHECK-LABEL: test__blsfill_u64 // CHECK: [[TMP:%.*]] = sub i64 %{{.*}}, 1 // CHECK-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]] return __blsfill_u64(a); } +#endif unsigned int test__blsic_u32(unsigned int a) { // CHECK-LABEL: test__blsic_u32 @@ -118,6 +132,7 @@ unsigned int test__blsic_u32(unsigned int a) { return __blsic_u32(a); } +#ifdef __x86_64__ unsigned long long test__blsic_u64(unsigned long long a) { // CHECK-LABEL: test__blsic_u64 // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 @@ -125,6 +140,7 @@ unsigned long long test__blsic_u64(unsigned long long a) { // CHECK-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]] return __blsic_u64(a); } +#endif unsigned int test__t1mskc_u32(unsigned int a) { // CHECK-LABEL: test__t1mskc_u32 @@ -134,6 +150,7 @@ unsigned int test__t1mskc_u32(unsigned int a) { return __t1mskc_u32(a); } +#ifdef __x86_64__ unsigned long long test__t1mskc_u64(unsigned long long a) { // CHECK-LABEL: test__t1mskc_u64 // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 @@ -141,6 +158,7 @@ unsigned long long test__t1mskc_u64(unsigned long long a) { // CHECK-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]] return __t1mskc_u64(a); } +#endif unsigned int test__tzmsk_u32(unsigned int a) { // CHECK-LABEL: test__tzmsk_u32 @@ -150,6 +168,7 @@ unsigned int test__tzmsk_u32(unsigned int a) { return __tzmsk_u32(a); } +#ifdef __x86_64__ unsigned long long test__tzmsk_u64(unsigned long long a) { // CHECK-LABEL: test__tzmsk_u64 // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 @@ -157,3 +176,4 @@ unsigned long long test__tzmsk_u64(unsigned long long a) { // CHECK-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]] return __tzmsk_u64(a); } +#endif |