diff options
Diffstat (limited to 'clang/test/CodeGen/bittest-intrin.c')
| -rw-r--r-- | clang/test/CodeGen/bittest-intrin.c | 121 |
1 files changed, 105 insertions, 16 deletions
diff --git a/clang/test/CodeGen/bittest-intrin.c b/clang/test/CodeGen/bittest-intrin.c index 839d5656a9e..21a26efa107 100644 --- a/clang/test/CodeGen/bittest-intrin.c +++ b/clang/test/CodeGen/bittest-intrin.c @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -fms-extensions -triple x86_64-windows-msvc %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -fms-extensions -triple x86_64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=X64 +// RUN: %clang_cc1 -fms-extensions -triple thumbv7-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM +// RUN: %clang_cc1 -fms-extensions -triple aarch64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM volatile unsigned char sink = 0; void test32(long *base, long idx) { @@ -18,18 +20,105 @@ void test64(__int64 *base, __int64 idx) { sink = _interlockedbittestandset64(base, idx); } -// CHECK-LABEL: define dso_local void @test32(i32* %base, i32 %idx) -// CHECK: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) -// CHECK: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) -// CHECK: call i8 asm sideeffect "btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) -// CHECK: call i8 asm sideeffect "btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) -// CHECK: call i8 asm sideeffect "lock btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) -// CHECK: call i8 asm sideeffect "lock btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) - -// CHECK-LABEL: define dso_local void @test64(i64* %base, i64 %idx) -// CHECK: call i8 asm sideeffect "btq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) -// CHECK: call i8 asm sideeffect "btcq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) -// CHECK: call i8 asm sideeffect "btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) -// CHECK: call i8 asm sideeffect "btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) -// CHECK: call i8 asm sideeffect "lock btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) -// CHECK: call i8 asm sideeffect "lock btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) +// X64-LABEL: define dso_local void @test32(i32* %base, i32 %idx) +// X64: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) +// X64: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) +// X64: call i8 asm sideeffect "btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) +// X64: call i8 asm sideeffect "btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) +// X64: call i8 asm sideeffect "lock btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) +// X64: call i8 asm sideeffect "lock btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) + +// X64-LABEL: define dso_local void @test64(i64* %base, i64 %idx) +// X64: call i8 asm sideeffect "btq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) +// X64: call i8 asm sideeffect "btcq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) +// X64: call i8 asm sideeffect "btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) +// X64: call i8 asm sideeffect "btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) +// X64: call i8 asm sideeffect "lock btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) +// X64: call i8 asm sideeffect "lock btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) + +// ARM-LABEL: define dso_local {{.*}}void @test32(i32* %base, i32 %idx) +// ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] +// ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 +// ARM: %[[NEWBYTE:[^ ]*]] = xor i8 %[[BYTE]], %[[MASK]] +// ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1 +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] +// ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 +// ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1 +// ARM: %[[NEWBYTE:[^ ]*]] = and i8 %[[BYTE]], %[[NOTMASK]] +// ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1 +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] +// ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 +// ARM: %[[NEWBYTE:[^ ]*]] = or i8 %[[BYTE]], %[[MASK]] +// ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1 +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] +// ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1 +// ARM: %[[BYTE:[^ ]*]] = atomicrmw and i8* %[[BYTEADDR]], i8 %[[NOTMASK]] seq_cst +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] +// ARM: %[[BYTE:[^ ]*]] = atomicrmw or i8* %[[BYTEADDR]], i8 %[[MASK]] seq_cst +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ARM-LABEL: define dso_local {{.*}}void @test64(i64* %base, i64 %idx) +// ARM: %[[IDXHI:[^ ]*]] = ashr i64 %{{.*}}, 3 +// ARM: %[[BASE:[^ ]*]] = bitcast i64* %{{.*}} to i8* +// ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i64 %[[IDXHI]] +// ARM: %[[IDX8:[^ ]*]] = trunc i64 %{{.*}} to i8 +// ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 +// ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 +// ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] +// ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 +// ARM: store volatile i8 %[[RES]], i8* @sink, align 1 + +// ... the rest is the same, but with i64 instead of i32. |

