diff options
| author | Albert Gutowski <agutowski@google.com> | 2016-09-13 21:51:37 +0000 |
|---|---|---|
| committer | Albert Gutowski <agutowski@google.com> | 2016-09-13 21:51:37 +0000 |
| commit | fc19fa3721927159a1ad7a6767d94b96c841ec03 (patch) | |
| tree | 4dc5e0872c60642b6d8c06000d592ed922566e01 /clang/test/CodeGen/ms-intrinsics.c | |
| parent | f76b56cb9c711e147ebb0407f153a0c4f38cd644 (diff) | |
| download | bcm5719-llvm-fc19fa3721927159a1ad7a6767d94b96c841ec03.tar.gz bcm5719-llvm-fc19fa3721927159a1ad7a6767d94b96c841ec03.zip | |
Temporary fix for MS _Interlocked intrinsics
llvm-svn: 281401
Diffstat (limited to 'clang/test/CodeGen/ms-intrinsics.c')
| -rw-r--r-- | clang/test/CodeGen/ms-intrinsics.c | 66 |
1 files changed, 0 insertions, 66 deletions
diff --git a/clang/test/CodeGen/ms-intrinsics.c b/clang/test/CodeGen/ms-intrinsics.c index 7ab9f0d01d8..8547c63142b 100644 --- a/clang/test/CodeGen/ms-intrinsics.c +++ b/clang/test/CodeGen/ms-intrinsics.c @@ -86,14 +86,6 @@ long test_InterlockedExchange(long volatile *value, long mask) { // CHECK: ret i32 [[RESULT:%[0-9]+]] // CHECK: } -__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) { - return _InterlockedExchange64(value, mask); -} -// CHECK: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ -// CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst -// CHECK: ret i64 [[RESULT:%[0-9]+]] -// CHECK: } - char test_InterlockedExchangeAdd8(char volatile *value, char mask) { return _InterlockedExchangeAdd8(value, mask); } @@ -118,14 +110,6 @@ long test_InterlockedExchangeAdd(long volatile *value, long mask) { // CHECK: ret i32 [[RESULT:%[0-9]+]] // CHECK: } -__int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) { - return _InterlockedExchangeAdd64(value, mask); -} -// CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ -// CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst -// CHECK: ret i64 [[RESULT:%[0-9]+]] -// CHECK: } - char test_InterlockedExchangeSub8(char volatile *value, char mask) { return _InterlockedExchangeSub8(value, mask); } @@ -150,14 +134,6 @@ long test_InterlockedExchangeSub(long volatile *value, long mask) { // CHECK: ret i32 [[RESULT:%[0-9]+]] // CHECK: } -__int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) { - return _InterlockedExchangeSub64(value, mask); -} -// CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ -// CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst -// CHECK: ret i64 [[RESULT:%[0-9]+]] -// CHECK: } - char test_InterlockedOr8(char volatile *value, char mask) { return _InterlockedOr8(value, mask); } @@ -182,14 +158,6 @@ long test_InterlockedOr(long volatile *value, long mask) { // CHECK: ret i32 [[RESULT:%[0-9]+]] // CHECK: } -__int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) { - return _InterlockedOr64(value, mask); -} -// CHECK: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ -// CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst -// CHECK: ret i64 [[RESULT:%[0-9]+]] -// CHECK: } - char test_InterlockedXor8(char volatile *value, char mask) { return _InterlockedXor8(value, mask); } @@ -214,14 +182,6 @@ long test_InterlockedXor(long volatile *value, long mask) { // CHECK: ret i32 [[RESULT:%[0-9]+]] // CHECK: } -__int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) { - return _InterlockedXor64(value, mask); -} -// CHECK: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ -// CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst -// CHECK: ret i64 [[RESULT:%[0-9]+]] -// CHECK: } - char test_InterlockedAnd8(char volatile *value, char mask) { return _InterlockedAnd8(value, mask); } @@ -246,14 +206,6 @@ long test_InterlockedAnd(long volatile *value, long mask) { // CHECK: ret i32 [[RESULT:%[0-9]+]] // CHECK: } -__int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) { - return _InterlockedAnd64(value, mask); -} -// CHECK: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ -// CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst -// CHECK: ret i64 [[RESULT:%[0-9]+]] -// CHECK: } - char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) { return _InterlockedCompareExchange8(Destination, Exchange, Comperand); } @@ -308,15 +260,6 @@ long test_InterlockedIncrement(long volatile *Addend) { // CHECK: ret i32 [[RESULT]] // CHECK: } -__int64 test_InterlockedIncrement64(__int64 volatile *Addend) { - return _InterlockedIncrement64(Addend); -} -// CHECK: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{ -// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst -// CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 -// CHECK: ret i64 [[RESULT]] -// CHECK: } - short test_InterlockedDecrement16(short volatile *Addend) { return _InterlockedDecrement16(Addend); } @@ -334,12 +277,3 @@ long test_InterlockedDecrement(long volatile *Addend) { // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 // CHECK: ret i32 [[RESULT]] // CHECK: } - -__int64 test_InterlockedDecrement64(__int64 volatile *Addend) { - return _InterlockedDecrement64(Addend); -} -// CHECK: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{ -// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst -// CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 -// CHECK: ret i64 [[RESULT]] -// CHECK: } |

