summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp61
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp5
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp8
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp3
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp7
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h10
6 files changed, 13 insertions, 81 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index a0b6073f6bf..927195083d4 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -1006,45 +1006,9 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
return convertTempToRValue(Temp, ResultSlot, Loc);
}
-/// An LValue is a candidate for having its loads and stores be made atomic if
-/// we are operating under /volatile:ms *and* the LValue itself is volatile and
-/// performing such an operation can be performed without a libcall.
-bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
- AtomicInfo AI(*this, LV);
- bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
- // An atomic is inline if we don't need to use a libcall.
- bool AtomicIsInline = !AI.shouldUseLibcall();
- return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
-}
-
-/// An type is a candidate for having its loads and stores be made atomic if
-/// we are operating under /volatile:ms *and* we know the access is volatile and
-/// performing such an operation can be performed without a libcall.
-bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
- bool IsVolatile) const {
- // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
- bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
- getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
- return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
-}
-
-RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
- AggValueSlot Slot) {
- llvm::AtomicOrdering AO;
- bool IsVolatile = LV.isVolatileQualified();
- if (LV.getType()->isAtomicType()) {
- AO = llvm::SequentiallyConsistent;
- } else {
- AO = llvm::Acquire;
- IsVolatile = true;
- }
- return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
-}
-
/// Emit a load from an l-value of atomic type. Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
- llvm::AtomicOrdering AO, bool IsVolatile,
AggValueSlot resultSlot) {
AtomicInfo atomics(*this, src);
LValue LVal = atomics.getAtomicLValue();
@@ -1096,11 +1060,11 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
// Okay, we're doing this natively.
llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr);
llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
- load->setAtomic(AO);
+ load->setAtomic(llvm::SequentiallyConsistent);
// Other decoration.
load->setAlignment(src.getAlignment().getQuantity());
- if (IsVolatile)
+ if (src.isVolatileQualified())
load->setVolatile(true);
if (src.getTBAAInfo())
CGM.DecorateInstruction(load, src.getTBAAInfo());
@@ -1197,27 +1161,12 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
getAtomicAlignment().getQuantity());
}
-void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
- bool isInit) {
- bool IsVolatile = lvalue.isVolatileQualified();
- llvm::AtomicOrdering AO;
- if (lvalue.getType()->isAtomicType()) {
- AO = llvm::SequentiallyConsistent;
- } else {
- AO = llvm::Release;
- IsVolatile = true;
- }
- return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
-}
-
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
-void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
- llvm::AtomicOrdering AO, bool IsVolatile,
- bool isInit) {
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
@@ -1260,11 +1209,11 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
// Initializations don't need to be atomic.
- if (!isInit) store->setAtomic(AO);
+ if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
// Other decoration.
store->setAlignment(dest.getAlignment().getQuantity());
- if (IsVolatile)
+ if (dest.isVolatileQualified())
store->setVolatile(true);
if (dest.getTBAAInfo())
CGM.DecorateInstruction(store, dest.getTBAAInfo());
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 18aa262160b..568f949c342 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1136,7 +1136,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
}
// Atomic operations have to be done on integral types.
- if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) {
+ if (Ty->isAtomicType()) {
LValue lvalue = LValue::MakeAddr(Addr, Ty,
CharUnits::fromQuantity(Alignment),
getContext(), TBAAInfo);
@@ -1255,8 +1255,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
Value = EmitToMemory(Value, Ty);
- if (Ty->isAtomicType() ||
- (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) {
+ if (Ty->isAtomicType()) {
EmitAtomicStore(RValue::get(Value),
LValue::MakeAddr(Addr, Ty,
CharUnits::fromQuantity(Alignment),
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index dd2da23cd85..80b16dd5ba3 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -212,7 +212,7 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
LValue LV = CGF.EmitLValue(E);
// If the type of the l-value is atomic, then do an atomic load.
- if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
+ if (LV.getType()->isAtomicType()) {
CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
return;
}
@@ -865,8 +865,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
// That copy is an atomic copy if the LHS is atomic.
- if (LHS.getType()->isAtomicType() ||
- CGF.LValueIsSuitableForInlineAtomic(LHS)) {
+ if (LHS.getType()->isAtomicType()) {
CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
return;
}
@@ -883,8 +882,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// If we have an atomic type, evaluate into the destination and then
// do an atomic copy.
- if (LHS.getType()->isAtomicType() ||
- CGF.LValueIsSuitableForInlineAtomic(LHS)) {
+ if (LHS.getType()->isAtomicType()) {
EnsureDest(E->getRHS()->getType());
Visit(E->getRHS());
CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index 1fea5a127c0..ceec85a9a89 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -336,8 +336,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
/// specified value pointer.
void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
bool isInit) {
- if (lvalue.getType()->isAtomicType() ||
- (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
+ if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
llvm::Value *Ptr = lvalue.getAddress();
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 6b3faa14162..b4dcadc5fdc 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -829,11 +829,8 @@ static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
LValue VLValue = CGF.EmitLValue(V);
- RValue Res = XLValue.isGlobalReg()
- ? CGF.EmitLoadOfLValue(XLValue, Loc)
- : CGF.EmitAtomicLoad(XLValue, Loc,
- IsSeqCst ? llvm::SequentiallyConsistent
- : llvm::Monotonic);
+ RValue Res = XLValue.isGlobalReg() ? CGF.EmitLoadOfLValue(XLValue, Loc)
+ : CGF.EmitAtomicLoad(XLValue, Loc);
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index ea8166b2827..12f066bf171 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -2147,21 +2147,11 @@ public:
void EmitAtomicInit(Expr *E, LValue lvalue);
- bool LValueIsSuitableForInlineAtomic(LValue Src);
- bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const;
-
- RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
- AggValueSlot Slot = AggValueSlot::ignored());
-
RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
- llvm::AtomicOrdering AO, bool IsVolatile = false,
AggValueSlot slot = AggValueSlot::ignored());
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
- void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
- bool IsVolatile, bool isInit);
-
std::pair<RValue, RValue> EmitAtomicCompareExchange(
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
OpenPOWER on IntegriCloud