summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/ms-intrinsics.c
diff options
context:
space:
mode:
authorReid Kleckner <rnk@google.com>2019-03-28 22:59:09 +0000
committerReid Kleckner <rnk@google.com>2019-03-28 22:59:09 +0000
commit73253bdefccad49caf123a61ddbecf340d0547d4 (patch)
tree288b19d370e00d58fa4431e36d6156e32a0b5800 /clang/test/CodeGen/ms-intrinsics.c
parent6c826957532816b4c7d69306aa184372a1c9c698 (diff)
downloadbcm5719-llvm-73253bdefccad49caf123a61ddbecf340d0547d4.tar.gz
bcm5719-llvm-73253bdefccad49caf123a61ddbecf340d0547d4.zip
[MS] Make __iso_volatile_* available on all targets
Future versions of MSVC make these intrinsics available on x86 & x64, according to: http://lists.llvm.org/pipermail/cfe-dev/2019-March/061711.html The purpose of these builtins is to emit plain, non-atomic, volatile stores when /volatile:ms (-cc1 -fms-volatile) is enabled. llvm-svn: 357220
Diffstat (limited to 'clang/test/CodeGen/ms-intrinsics.c')
-rw-r--r--clang/test/CodeGen/ms-intrinsics.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/clang/test/CodeGen/ms-intrinsics.c b/clang/test/CodeGen/ms-intrinsics.c
index 59ddaf8c107..cf41d23d35e 100644
--- a/clang/test/CodeGen/ms-intrinsics.c
+++ b/clang/test/CodeGen/ms-intrinsics.c
@@ -494,6 +494,35 @@ long test_InterlockedDecrement(long volatile *Addend) {
// CHECK: ret i32 [[RESULT]]
// CHECK: }
+char test_iso_volatile_load8(char volatile *p) { return __iso_volatile_load8(p); }
+short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16(p); }
+int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); }
+__int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); }
+
+// CHECK: define{{.*}}i8 @test_iso_volatile_load8(i8*{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i8, i8* %p
+// CHECK: define{{.*}}i16 @test_iso_volatile_load16(i16*{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i16, i16* %p
+// CHECK: define{{.*}}i32 @test_iso_volatile_load32(i32*{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i32, i32* %p
+// CHECK: define{{.*}}i64 @test_iso_volatile_load64(i64*{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i64, i64* %p
+
+void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); }
+void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); }
+void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); }
+void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); }
+
+// CHECK: define{{.*}}void @test_iso_volatile_store8(i8*{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i8 %v, i8* %p
+// CHECK: define{{.*}}void @test_iso_volatile_store16(i16*{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i16 %v, i16* %p
+// CHECK: define{{.*}}void @test_iso_volatile_store32(i32*{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i32 %v, i32* %p
+// CHECK: define{{.*}}void @test_iso_volatile_store64(i64*{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i64 %v, i64* %p
+
+
#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
return _InterlockedExchange64(value, mask);
OpenPOWER on IntegriCloud