diff options
author | David Majnemer <david.majnemer@gmail.com> | 2015-02-13 07:55:47 +0000 |
---|---|---|
committer | David Majnemer <david.majnemer@gmail.com> | 2015-02-13 07:55:47 +0000 |
commit | abc482effcabd373f5c96fb8f9dd493968a112dd (patch) | |
tree | 2b1eb59e9ebf337925c18d2ad7537c0d60995c27 /clang/lib/CodeGen | |
parent | 2a903b900d212dfaf5482408042a6f7b9e87c9a6 (diff) | |
download | bcm5719-llvm-abc482effcabd373f5c96fb8f9dd493968a112dd.tar.gz bcm5719-llvm-abc482effcabd373f5c96fb8f9dd493968a112dd.zip |
MS ABI: Implement /volatile:ms
The /volatile:ms semantics turn volatile loads and stores into atomic
acquire and release operations. This distinction is important because
volatile memory operations do not form a happens-before relationship
with non-atomic memory. This means that a volatile store is not
sufficient for implementing a mutex unlock routine.
Differential Revision: http://reviews.llvm.org/D7580
llvm-svn: 229082
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r-- | clang/lib/CodeGen/CGAtomic.cpp | 61 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExpr.cpp | 5 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprAgg.cpp | 8 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprComplex.cpp | 3 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGStmtOpenMP.cpp | 7 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 10 |
6 files changed, 81 insertions, 13 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 927195083d4..a0b6073f6bf 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -1006,9 +1006,45 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, return convertTempToRValue(Temp, ResultSlot, Loc); } +/// An LValue is a candidate for having its loads and stores be made atomic if +/// we are operating under /volatile:ms *and* the LValue itself is volatile and +/// performing such an operation can be performed without a libcall. +bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { + AtomicInfo AI(*this, LV); + bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType()); + // An atomic is inline if we don't need to use a libcall. + bool AtomicIsInline = !AI.shouldUseLibcall(); + return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; +} + +/// An type is a candidate for having its loads and stores be made atomic if +/// we are operating under /volatile:ms *and* we know the access is volatile and +/// performing such an operation can be performed without a libcall. +bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty, + bool IsVolatile) const { + // An atomic is inline if we don't need to use a libcall (e.g. it is builtin). + bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic( + getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)); + return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; +} + +RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, + AggValueSlot Slot) { + llvm::AtomicOrdering AO; + bool IsVolatile = LV.isVolatileQualified(); + if (LV.getType()->isAtomicType()) { + AO = llvm::SequentiallyConsistent; + } else { + AO = llvm::Acquire; + IsVolatile = true; + } + return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot); +} + /// Emit a load from an l-value of atomic type. Note that the r-value /// we produce is an r-value of the atomic *value* type. RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, + llvm::AtomicOrdering AO, bool IsVolatile, AggValueSlot resultSlot) { AtomicInfo atomics(*this, src); LValue LVal = atomics.getAtomicLValue(); @@ -1060,11 +1096,11 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, // Okay, we're doing this natively. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr); llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load"); - load->setAtomic(llvm::SequentiallyConsistent); + load->setAtomic(AO); // Other decoration. load->setAlignment(src.getAlignment().getQuantity()); - if (src.isVolatileQualified()) + if (IsVolatile) load->setVolatile(true); if (src.getTBAAInfo()) CGM.DecorateInstruction(load, src.getTBAAInfo()); @@ -1161,12 +1197,27 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const { getAtomicAlignment().getQuantity()); } +void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue, + bool isInit) { + bool IsVolatile = lvalue.isVolatileQualified(); + llvm::AtomicOrdering AO; + if (lvalue.getType()->isAtomicType()) { + AO = llvm::SequentiallyConsistent; + } else { + AO = llvm::Release; + IsVolatile = true; + } + return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); +} + /// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. -void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { +void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, + llvm::AtomicOrdering AO, bool IsVolatile, + bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || @@ -1209,11 +1260,11 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { llvm::StoreInst *store = Builder.CreateStore(intValue, addr); // Initializations don't need to be atomic. - if (!isInit) store->setAtomic(llvm::SequentiallyConsistent); + if (!isInit) store->setAtomic(AO); // Other decoration. store->setAlignment(dest.getAlignment().getQuantity()); - if (dest.isVolatileQualified()) + if (IsVolatile) store->setVolatile(true); if (dest.getTBAAInfo()) CGM.DecorateInstruction(store, dest.getTBAAInfo()); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 568f949c342..18aa262160b 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1136,7 +1136,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, } // Atomic operations have to be done on integral types. - if (Ty->isAtomicType()) { + if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) { LValue lvalue = LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), getContext(), TBAAInfo); @@ -1255,7 +1255,8 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, Value = EmitToMemory(Value, Ty); - if (Ty->isAtomicType()) { + if (Ty->isAtomicType() || + (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) { EmitAtomicStore(RValue::get(Value), LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 80b16dd5ba3..dd2da23cd85 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -212,7 +212,7 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { LValue LV = CGF.EmitLValue(E); // If the type of the l-value is atomic, then do an atomic load. - if (LV.getType()->isAtomicType()) { + if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) { CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); return; } @@ -865,7 +865,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); // That copy is an atomic copy if the LHS is atomic. - if (LHS.getType()->isAtomicType()) { + if (LHS.getType()->isAtomicType() || + CGF.LValueIsSuitableForInlineAtomic(LHS)) { CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); return; } @@ -882,7 +883,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // If we have an atomic type, evaluate into the destination and then // do an atomic copy. - if (LHS.getType()->isAtomicType()) { + if (LHS.getType()->isAtomicType() || + CGF.LValueIsSuitableForInlineAtomic(LHS)) { EnsureDest(E->getRHS()->getType()); Visit(E->getRHS()); CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp index ceec85a9a89..1fea5a127c0 100644 --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -336,7 +336,8 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue, /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { - if (lvalue.getType()->isAtomicType()) + if (lvalue.getType()->isAtomicType() || + (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue))) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); llvm::Value *Ptr = lvalue.getAddress(); diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp index b4dcadc5fdc..6b3faa14162 100644 --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -829,8 +829,11 @@ static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); LValue XLValue = CGF.EmitLValue(X); LValue VLValue = CGF.EmitLValue(V); - RValue Res = XLValue.isGlobalReg() ? CGF.EmitLoadOfLValue(XLValue, Loc) - : CGF.EmitAtomicLoad(XLValue, Loc); + RValue Res = XLValue.isGlobalReg() + ? CGF.EmitLoadOfLValue(XLValue, Loc) + : CGF.EmitAtomicLoad(XLValue, Loc, + IsSeqCst ? llvm::SequentiallyConsistent + : llvm::Monotonic); // OpenMP, 2.12.6, atomic Construct // Any atomic construct with a seq_cst clause forces the atomically // performed operation to include an implicit flush operation without a diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 12f066bf171..ea8166b2827 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -2147,11 +2147,21 @@ public: void EmitAtomicInit(Expr *E, LValue lvalue); + bool LValueIsSuitableForInlineAtomic(LValue Src); + bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const; + + RValue EmitAtomicLoad(LValue LV, SourceLocation SL, + AggValueSlot Slot = AggValueSlot::ignored()); + RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, + llvm::AtomicOrdering AO, bool IsVolatile = false, AggValueSlot slot = AggValueSlot::ignored()); void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit); + void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, + bool IsVolatile, bool isInit); + std::pair<RValue, RValue> EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, |