summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Majnemer <david.majnemer@gmail.com>2016-01-22 16:36:44 +0000
committerDavid Majnemer <david.majnemer@gmail.com>2016-01-22 16:36:44 +0000
commitfc80b6e5d816005a6ea33fc9107579a16bb73ae0 (patch)
tree691e7a2dbb09ccdfb0734fbeb199670d96f3b40c
parentf7ed399881ecd7b133ab3b885ba8113b113a0a58 (diff)
downloadbcm5719-llvm-fc80b6e5d816005a6ea33fc9107579a16bb73ae0.tar.gz
bcm5719-llvm-fc80b6e5d816005a6ea33fc9107579a16bb73ae0.zip
[MSVC Compat] Don't provide /volatile:ms semantics to types > pointer
Volatile loads of type wider than a pointer get split by MSVC because the base x86 ISA doesn't provide loads which are wider than pointer width. LLVM assumes that it can emit an cmpxchg8b but this is problematic if the memory is in a CONST memory segment. Instead, provide behavior compatible with MSVC: split loads wider than a pointer. llvm-svn: 258506
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp19
-rw-r--r--clang/test/CodeGen/ms-volatile.c16
2 files changed, 30 insertions, 5 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 1ef5d1035a0..4e52c3630c7 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -1295,10 +1295,23 @@ bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
/// performing such an operation can be performed without a libcall.
bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
bool IsVolatile) const {
+ // The operation must be volatile for us to make it atomic.
+ if (!IsVolatile)
+ return false;
+ // The -fms-volatile flag must be passed for us to adopt this behavior.
+ if (!CGM.getCodeGenOpts().MSVolatile)
+ return false;
+
// An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
- bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
- getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
- return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
+ if (!getContext().getTargetInfo().hasBuiltinAtomic(
+ getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)))
+ return false;
+
+ // MSVC doesn't seem to do this for types wider than a pointer.
+ if (getContext().getTypeSize(Ty) >
+ getContext().getTypeSize(getContext().getIntPtrType()))
+ return false;
+ return true;
}
RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
diff --git a/clang/test/CodeGen/ms-volatile.c b/clang/test/CodeGen/ms-volatile.c
index 87393e794f8..242ce067d62 100644
--- a/clang/test/CodeGen/ms-volatile.c
+++ b/clang/test/CodeGen/ms-volatile.c
@@ -52,11 +52,23 @@ void test7(volatile struct bar *p, volatile struct bar *q) {
void test8(volatile double *p, volatile double *q) {
*p = *q;
// CHECK-LABEL: @test8
- // CHECK: load atomic volatile {{.*}} acquire
- // CHECK: store atomic volatile {{.*}}, {{.*}} release
+ // CHECK: load volatile {{.*}}
+ // CHECK: store volatile {{.*}}, {{.*}}
}
void test9(volatile baz *p, baz *q) {
*p = *q;
// CHECK-LABEL: @test9
// CHECK: store atomic volatile {{.*}}, {{.*}} release
}
+void test10(volatile long long *p, volatile long long *q) {
+ *p = *q;
+ // CHECK-LABEL: @test10
+ // CHECK: load volatile {{.*}}
+ // CHECK: store volatile {{.*}}, {{.*}}
+}
+void test11(volatile float *p, volatile float *q) {
+ *p = *q;
+ // CHECK-LABEL: @test11
+ // CHECK: load atomic volatile {{.*}} acquire
+ // CHECK: store atomic volatile {{.*}}, {{.*}} release
+}
OpenPOWER on IntegriCloud