summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/AliasAnalysis.cpp4
-rw-r--r--llvm/lib/Analysis/AliasSetTracker.cpp4
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp15
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp54
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp39
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp14
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp38
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp2
-rw-r--r--llvm/lib/IR/AsmWriter.cpp36
-rw-r--r--llvm/lib/IR/Core.cpp30
-rw-r--r--llvm/lib/IR/Instruction.cpp4
-rw-r--r--llvm/lib/IR/Instructions.cpp38
-rw-r--r--llvm/lib/IR/Verifier.cpp37
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrAtomics.td11
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp46
-rw-r--r--llvm/lib/Target/ARM/ARMInstrInfo.td4
-rw-r--r--llvm/lib/Target/CppBackend/CPPBackend.cpp15
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp6
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp10
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp6
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp10
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/MergeFunctions.cpp25
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp44
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/EarlyCSE.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LowerAtomic.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/GlobalStatus.cpp12
32 files changed, 284 insertions, 265 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 453bc3dbe67..117f8cb123f 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -389,7 +389,7 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
- if (CX->getSuccessOrdering() > Monotonic)
+ if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
return MRI_ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
@@ -402,7 +402,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
- if (RMW->getOrdering() > Monotonic)
+ if (isStrongerThanMonotonic(RMW->getOrdering()))
return MRI_ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
diff --git a/llvm/lib/Analysis/AliasSetTracker.cpp b/llvm/lib/Analysis/AliasSetTracker.cpp
index e3a055f2d1b..3cafb46f289 100644
--- a/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -300,7 +300,7 @@ bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) {
bool AliasSetTracker::add(LoadInst *LI) {
- if (LI->getOrdering() > Monotonic) return addUnknown(LI);
+ if (isStrongerThanMonotonic(LI->getOrdering())) return addUnknown(LI);
AAMDNodes AAInfo;
LI->getAAMetadata(AAInfo);
@@ -316,7 +316,7 @@ bool AliasSetTracker::add(LoadInst *LI) {
}
bool AliasSetTracker::add(StoreInst *SI) {
- if (SI->getOrdering() > Monotonic) return addUnknown(SI);
+ if (isStrongerThanMonotonic(SI->getOrdering())) return addUnknown(SI);
AAMDNodes AAInfo;
SI->getAAMetadata(AAInfo);
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index ce6519abb4e..e23cd819f07 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -93,7 +93,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
Loc = MemoryLocation::get(LI);
return MRI_Ref;
}
- if (LI->getOrdering() == Monotonic) {
+ if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(LI);
return MRI_ModRef;
}
@@ -106,7 +106,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
Loc = MemoryLocation::get(SI);
return MRI_Mod;
}
- if (SI->getOrdering() == Monotonic) {
+ if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(SI);
return MRI_ModRef;
}
@@ -518,11 +518,11 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// A Monotonic (or higher) load is OK if the query inst is itself not
// atomic.
// FIXME: This is overly conservative.
- if (LI->isAtomic() && LI->getOrdering() > Unordered) {
+ if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(LI);
- if (LI->getOrdering() != Monotonic)
+ if (LI->getOrdering() != AtomicOrdering::Monotonic)
return MemDepResult::getClobber(LI);
}
@@ -588,7 +588,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(SI);
- if (SI->getOrdering() != Monotonic)
+ if (SI->getOrdering() != AtomicOrdering::Monotonic)
return MemDepResult::getClobber(SI);
}
@@ -644,9 +644,9 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// loads. DSE uses this to find preceeding stores to delete and thus we
// can't bypass the fence if the query instruction is a store.
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
- if (isLoad && FI->getOrdering() == Release)
+ if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
continue;
-
+
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
// If necessary, perform additional analysis.
@@ -1708,4 +1708,3 @@ bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
MemDep.emplace(AA, AC, TLI, DT);
return false;
}
-
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index c8543206e21..bf1d2f005ee 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -1810,12 +1810,16 @@ bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
bool LLParser::ParseOrdering(AtomicOrdering &Ordering) {
switch (Lex.getKind()) {
default: return TokError("Expected ordering on atomic instruction");
- case lltok::kw_unordered: Ordering = Unordered; break;
- case lltok::kw_monotonic: Ordering = Monotonic; break;
- case lltok::kw_acquire: Ordering = Acquire; break;
- case lltok::kw_release: Ordering = Release; break;
- case lltok::kw_acq_rel: Ordering = AcquireRelease; break;
- case lltok::kw_seq_cst: Ordering = SequentiallyConsistent; break;
+ case lltok::kw_unordered: Ordering = AtomicOrdering::Unordered; break;
+ case lltok::kw_monotonic: Ordering = AtomicOrdering::Monotonic; break;
+ // Not specified yet:
+ // case lltok::kw_consume: Ordering = AtomicOrdering::Consume; break;
+ case lltok::kw_acquire: Ordering = AtomicOrdering::Acquire; break;
+ case lltok::kw_release: Ordering = AtomicOrdering::Release; break;
+ case lltok::kw_acq_rel: Ordering = AtomicOrdering::AcquireRelease; break;
+ case lltok::kw_seq_cst:
+ Ordering = AtomicOrdering::SequentiallyConsistent;
+ break;
}
Lex.Lex();
return false;
@@ -5884,7 +5888,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
unsigned Alignment = 0;
bool AteExtraComma = false;
bool isAtomic = false;
- AtomicOrdering Ordering = NotAtomic;
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
SynchronizationScope Scope = CrossThread;
if (Lex.getKind() == lltok::kw_atomic) {
@@ -5911,7 +5915,8 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
return Error(Loc, "load operand must be a pointer to a first class type");
if (isAtomic && !Alignment)
return Error(Loc, "atomic load must have explicit non-zero alignment");
- if (Ordering == Release || Ordering == AcquireRelease)
+ if (Ordering == AtomicOrdering::Release ||
+ Ordering == AtomicOrdering::AcquireRelease)
return Error(Loc, "atomic load cannot use Release ordering");
if (Ty != cast<PointerType>(Val->getType())->getElementType())
@@ -5932,7 +5937,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
unsigned Alignment = 0;
bool AteExtraComma = false;
bool isAtomic = false;
- AtomicOrdering Ordering = NotAtomic;
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
SynchronizationScope Scope = CrossThread;
if (Lex.getKind() == lltok::kw_atomic) {
@@ -5961,7 +5966,8 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
return Error(Loc, "stored value and pointer type do not match");
if (isAtomic && !Alignment)
return Error(Loc, "atomic store must have explicit non-zero alignment");
- if (Ordering == Acquire || Ordering == AcquireRelease)
+ if (Ordering == AtomicOrdering::Acquire ||
+ Ordering == AtomicOrdering::AcquireRelease)
return Error(Loc, "atomic store cannot use Acquire ordering");
Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
@@ -5974,8 +5980,8 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
bool AteExtraComma = false;
- AtomicOrdering SuccessOrdering = NotAtomic;
- AtomicOrdering FailureOrdering = NotAtomic;
+ AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic;
+ AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
SynchronizationScope Scope = CrossThread;
bool isVolatile = false;
bool isWeak = false;
@@ -5995,12 +6001,16 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
ParseOrdering(FailureOrdering))
return true;
- if (SuccessOrdering == Unordered || FailureOrdering == Unordered)
+ if (SuccessOrdering == AtomicOrdering::Unordered ||
+ FailureOrdering == AtomicOrdering::Unordered)
return TokError("cmpxchg cannot be unordered");
- if (SuccessOrdering < FailureOrdering)
- return TokError("cmpxchg must be at least as ordered on success as failure");
- if (FailureOrdering == Release || FailureOrdering == AcquireRelease)
- return TokError("cmpxchg failure ordering cannot include release semantics");
+ if (isStrongerThan(FailureOrdering, SuccessOrdering))
+ return TokError("cmpxchg failure argument shall be no stronger than the "
+ "success argument");
+ if (FailureOrdering == AtomicOrdering::Release ||
+ FailureOrdering == AtomicOrdering::AcquireRelease)
+ return TokError(
+ "cmpxchg failure ordering cannot include release semantics");
if (!Ptr->getType()->isPointerTy())
return Error(PtrLoc, "cmpxchg operand must be a pointer");
if (cast<PointerType>(Ptr->getType())->getElementType() != Cmp->getType())
@@ -6023,7 +6033,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
bool AteExtraComma = false;
- AtomicOrdering Ordering = NotAtomic;
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
SynchronizationScope Scope = CrossThread;
bool isVolatile = false;
AtomicRMWInst::BinOp Operation;
@@ -6053,7 +6063,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
return true;
- if (Ordering == Unordered)
+ if (Ordering == AtomicOrdering::Unordered)
return TokError("atomicrmw cannot be unordered");
if (!Ptr->getType()->isPointerTy())
return Error(PtrLoc, "atomicrmw operand must be a pointer");
@@ -6076,14 +6086,14 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
/// ParseFence
/// ::= 'fence' 'singlethread'? AtomicOrdering
int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
- AtomicOrdering Ordering = NotAtomic;
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
SynchronizationScope Scope = CrossThread;
if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
return true;
- if (Ordering == Unordered)
+ if (Ordering == AtomicOrdering::Unordered)
return TokError("fence cannot be unordered");
- if (Ordering == Monotonic)
+ if (Ordering == AtomicOrdering::Monotonic)
return TokError("fence cannot be monotonic");
Inst = new FenceInst(Context, Ordering, Scope);
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 85a331cc00d..0e74fe38d63 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -808,14 +808,14 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
static AtomicOrdering getDecodedOrdering(unsigned Val) {
switch (Val) {
- case bitc::ORDERING_NOTATOMIC: return NotAtomic;
- case bitc::ORDERING_UNORDERED: return Unordered;
- case bitc::ORDERING_MONOTONIC: return Monotonic;
- case bitc::ORDERING_ACQUIRE: return Acquire;
- case bitc::ORDERING_RELEASE: return Release;
- case bitc::ORDERING_ACQREL: return AcquireRelease;
+ case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic;
+ case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered;
+ case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic;
+ case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire;
+ case bitc::ORDERING_RELEASE: return AtomicOrdering::Release;
+ case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease;
default: // Map unknown orderings to sequentially-consistent.
- case bitc::ORDERING_SEQCST: return SequentiallyConsistent;
+ case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent;
}
}
@@ -4936,10 +4936,11 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
Ty = cast<PointerType>(Op->getType())->getElementType();
AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
- if (Ordering == NotAtomic || Ordering == Release ||
- Ordering == AcquireRelease)
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Release ||
+ Ordering == AtomicOrdering::AcquireRelease)
return error("Invalid record");
- if (Ordering != NotAtomic && Record[OpNum] == 0)
+ if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
return error("Invalid record");
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
@@ -4992,11 +4993,12 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
return EC;
AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
- if (Ordering == NotAtomic || Ordering == Acquire ||
- Ordering == AcquireRelease)
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Acquire ||
+ Ordering == AtomicOrdering::AcquireRelease)
return error("Invalid record");
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
- if (Ordering != NotAtomic && Record[OpNum] == 0)
+ if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
return error("Invalid record");
unsigned Align;
@@ -5022,7 +5024,8 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
Record.size() < OpNum + 3 || Record.size() > OpNum + 5)
return error("Invalid record");
AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]);
- if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered)
+ if (SuccessOrdering == AtomicOrdering::NotAtomic ||
+ SuccessOrdering == AtomicOrdering::Unordered)
return error("Invalid record");
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
@@ -5067,7 +5070,8 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
Operation > AtomicRMWInst::LAST_BINOP)
return error("Invalid record");
AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
- if (Ordering == NotAtomic || Ordering == Unordered)
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Unordered)
return error("Invalid record");
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
@@ -5079,8 +5083,9 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
if (2 != Record.size())
return error("Invalid record");
AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
- if (Ordering == NotAtomic || Ordering == Unordered ||
- Ordering == Monotonic)
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Unordered ||
+ Ordering == AtomicOrdering::Monotonic)
return error("Invalid record");
SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
I = new FenceInst(Context, Ordering, SynchScope);
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 99046d993a8..cbaaea8bbc6 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -133,13 +133,13 @@ static unsigned GetEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
static unsigned GetEncodedOrdering(AtomicOrdering Ordering) {
switch (Ordering) {
- case NotAtomic: return bitc::ORDERING_NOTATOMIC;
- case Unordered: return bitc::ORDERING_UNORDERED;
- case Monotonic: return bitc::ORDERING_MONOTONIC;
- case Acquire: return bitc::ORDERING_ACQUIRE;
- case Release: return bitc::ORDERING_RELEASE;
- case AcquireRelease: return bitc::ORDERING_ACQREL;
- case SequentiallyConsistent: return bitc::ORDERING_SEQCST;
+ case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC;
+ case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED;
+ case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC;
+ case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE;
+ case AtomicOrdering::Release: return bitc::ORDERING_RELEASE;
+ case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL;
+ case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST;
}
llvm_unreachable("Invalid ordering");
}
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 40140e41a03..8c0c0f4acba 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -101,37 +101,37 @@ bool AtomicExpand::runOnFunction(Function &F) {
assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
if (TLI->shouldInsertFencesForAtomic(I)) {
- auto FenceOrdering = Monotonic;
+ auto FenceOrdering = AtomicOrdering::Monotonic;
bool IsStore, IsLoad;
- if (LI && isAtLeastAcquire(LI->getOrdering())) {
+ if (LI && isAcquireOrStronger(LI->getOrdering())) {
FenceOrdering = LI->getOrdering();
- LI->setOrdering(Monotonic);
+ LI->setOrdering(AtomicOrdering::Monotonic);
IsStore = false;
IsLoad = true;
- } else if (SI && isAtLeastRelease(SI->getOrdering())) {
+ } else if (SI && isReleaseOrStronger(SI->getOrdering())) {
FenceOrdering = SI->getOrdering();
- SI->setOrdering(Monotonic);
+ SI->setOrdering(AtomicOrdering::Monotonic);
IsStore = true;
IsLoad = false;
- } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
- isAtLeastAcquire(RMWI->getOrdering()))) {
+ } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
+ isAcquireOrStronger(RMWI->getOrdering()))) {
FenceOrdering = RMWI->getOrdering();
- RMWI->setOrdering(Monotonic);
+ RMWI->setOrdering(AtomicOrdering::Monotonic);
IsStore = IsLoad = true;
} else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
- (isAtLeastRelease(CASI->getSuccessOrdering()) ||
- isAtLeastAcquire(CASI->getSuccessOrdering()))) {
+ (isReleaseOrStronger(CASI->getSuccessOrdering()) ||
+ isAcquireOrStronger(CASI->getSuccessOrdering()))) {
// If a compare and swap is lowered to LL/SC, we can do smarter fence
// insertion, with a stronger one on the success path than on the
// failure path. As a result, fence insertion is directly done by
// expandAtomicCmpXchg in that case.
FenceOrdering = CASI->getSuccessOrdering();
- CASI->setSuccessOrdering(Monotonic);
- CASI->setFailureOrdering(Monotonic);
+ CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
+ CASI->setFailureOrdering(AtomicOrdering::Monotonic);
IsStore = IsLoad = true;
}
- if (FenceOrdering != Monotonic) {
+ if (FenceOrdering != AtomicOrdering::Monotonic) {
MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
}
}
@@ -520,7 +520,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// should preserve the ordering.
bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
AtomicOrdering MemOpOrder =
- ShouldInsertFencesForAtomic ? Monotonic : SuccessOrder;
+ ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder;
// In implementations which use a barrier to achieve release semantics, we can
// delay emitting this barrier until we know a store is actually going to be
@@ -532,8 +532,9 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// minimal loop. Unfortunately, this puts too much stress on later
// optimisations so we avoid emitting the extra logic in those cases too.
bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
- SuccessOrder != Monotonic &&
- SuccessOrder != Acquire && !F->optForMinSize();
+ SuccessOrder != AtomicOrdering::Monotonic &&
+ SuccessOrder != AtomicOrdering::Acquire &&
+ !F->optForMinSize();
// There's no overhead for sinking the release barrier in a weak cmpxchg, so
// do it even on minsize.
@@ -767,8 +768,9 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
CreateCmpXchgInstFun CreateCmpXchg) {
assert(AI);
- AtomicOrdering MemOpOrder =
- AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
+ AtomicOrdering MemOpOrder = AI->getOrdering() == AtomicOrdering::Unordered
+ ? AtomicOrdering::Monotonic
+ : AI->getOrdering();
Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent();
Function *F = BB->getParent();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index f6b4262ffd7..5ad42936169 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3903,7 +3903,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Ops[3];
Ops[0] = getRoot();
- Ops[1] = DAG.getConstant(I.getOrdering(), dl,
+ Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
TLI.getPointerTy(DAG.getDataLayout()));
Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
TLI.getPointerTy(DAG.getDataLayout()));
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index a347bc60654..0dd49a43022 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -2110,7 +2110,7 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
SynchronizationScope SynchScope) {
- if (Ordering == NotAtomic)
+ if (Ordering == AtomicOrdering::NotAtomic)
return;
switch (SynchScope) {
@@ -2118,46 +2118,22 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
case CrossThread: break;
}
- switch (Ordering) {
- default: Out << " <bad ordering " << int(Ordering) << ">"; break;
- case Unordered: Out << " unordered"; break;
- case Monotonic: Out << " monotonic"; break;
- case Acquire: Out << " acquire"; break;
- case Release: Out << " release"; break;
- case AcquireRelease: Out << " acq_rel"; break;
- case SequentiallyConsistent: Out << " seq_cst"; break;
- }
+ Out << " " << toIRString(Ordering);
}
void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
- assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
+ assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
+ FailureOrdering != AtomicOrdering::NotAtomic);
switch (SynchScope) {
case SingleThread: Out << " singlethread"; break;
case CrossThread: break;
}
- switch (SuccessOrdering) {
- default: Out << " <bad ordering " << int(SuccessOrdering) << ">"; break;
- case Unordered: Out << " unordered"; break;
- case Monotonic: Out << " monotonic"; break;
- case Acquire: Out << " acquire"; break;
- case Release: Out << " release"; break;
- case AcquireRelease: Out << " acq_rel"; break;
- case SequentiallyConsistent: Out << " seq_cst"; break;
- }
-
- switch (FailureOrdering) {
- default: Out << " <bad ordering " << int(FailureOrdering) << ">"; break;
- case Unordered: Out << " unordered"; break;
- case Monotonic: Out << " monotonic"; break;
- case Acquire: Out << " acquire"; break;
- case Release: Out << " release"; break;
- case AcquireRelease: Out << " acq_rel"; break;
- case SequentiallyConsistent: Out << " seq_cst"; break;
- }
+ Out << " " << toIRString(SuccessOrdering);
+ Out << " " << toIRString(FailureOrdering);
}
void AssemblyWriter::writeParamOperand(const Value *Operand,
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index b2c2740218e..0bdd2d6483c 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -2602,14 +2602,15 @@ LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
switch (Ordering) {
- case LLVMAtomicOrderingNotAtomic: return NotAtomic;
- case LLVMAtomicOrderingUnordered: return Unordered;
- case LLVMAtomicOrderingMonotonic: return Monotonic;
- case LLVMAtomicOrderingAcquire: return Acquire;
- case LLVMAtomicOrderingRelease: return Release;
- case LLVMAtomicOrderingAcquireRelease: return AcquireRelease;
+ case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic;
+ case LLVMAtomicOrderingUnordered: return AtomicOrdering::Unordered;
+ case LLVMAtomicOrderingMonotonic: return AtomicOrdering::Monotonic;
+ case LLVMAtomicOrderingAcquire: return AtomicOrdering::Acquire;
+ case LLVMAtomicOrderingRelease: return AtomicOrdering::Release;
+ case LLVMAtomicOrderingAcquireRelease:
+ return AtomicOrdering::AcquireRelease;
case LLVMAtomicOrderingSequentiallyConsistent:
- return SequentiallyConsistent;
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Invalid LLVMAtomicOrdering value!");
@@ -2617,13 +2618,14 @@ static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
switch (Ordering) {
- case NotAtomic: return LLVMAtomicOrderingNotAtomic;
- case Unordered: return LLVMAtomicOrderingUnordered;
- case Monotonic: return LLVMAtomicOrderingMonotonic;
- case Acquire: return LLVMAtomicOrderingAcquire;
- case Release: return LLVMAtomicOrderingRelease;
- case AcquireRelease: return LLVMAtomicOrderingAcquireRelease;
- case SequentiallyConsistent:
+ case AtomicOrdering::NotAtomic: return LLVMAtomicOrderingNotAtomic;
+ case AtomicOrdering::Unordered: return LLVMAtomicOrderingUnordered;
+ case AtomicOrdering::Monotonic: return LLVMAtomicOrderingMonotonic;
+ case AtomicOrdering::Acquire: return LLVMAtomicOrderingAcquire;
+ case AtomicOrdering::Release: return LLVMAtomicOrderingRelease;
+ case AtomicOrdering::AcquireRelease:
+ return LLVMAtomicOrderingAcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
return LLVMAtomicOrderingSequentiallyConsistent;
}
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 4b33d2e66ea..e2b5e2777fa 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -461,9 +461,9 @@ bool Instruction::isAtomic() const {
case Instruction::Fence:
return true;
case Instruction::Load:
- return cast<LoadInst>(this)->getOrdering() != NotAtomic;
+ return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
case Instruction::Store:
- return cast<StoreInst>(this)->getOrdering() != NotAtomic;
+ return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
}
}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 9c2f765687d..fcc9cc4ab2e 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1209,13 +1209,13 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, Instruction *InsertBef)
- : LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread,
- InsertBef) {}
+ : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
+ CrossThread, InsertBef) {}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, BasicBlock *InsertAE)
- : LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) {
-}
+ : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
+ CrossThread, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
@@ -1247,7 +1247,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
Load, Ptr, InsertBef) {
setVolatile(false);
setAlignment(0);
- setAtomic(NotAtomic);
+ setAtomic(AtomicOrdering::NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -1257,7 +1257,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) {
setVolatile(false);
setAlignment(0);
- setAtomic(NotAtomic);
+ setAtomic(AtomicOrdering::NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -1268,7 +1268,7 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile,
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
setAlignment(0);
- setAtomic(NotAtomic);
+ setAtomic(AtomicOrdering::NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -1279,7 +1279,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(0);
- setAtomic(NotAtomic);
+ setAtomic(AtomicOrdering::NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -1324,13 +1324,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
Instruction *InsertBefore)
- : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
- InsertBefore) {}
+ : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
+ CrossThread, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
BasicBlock *InsertAtEnd)
- : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
- InsertAtEnd) {}
+ : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
+ CrossThread, InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
@@ -1398,13 +1398,15 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
assert(getOperand(2)->getType() ==
cast<PointerType>(getOperand(0)->getType())->getElementType()
&& "Ptr must be a pointer to NewVal type!");
- assert(SuccessOrdering != NotAtomic &&
+ assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
"AtomicCmpXchg instructions must be atomic!");
- assert(FailureOrdering != NotAtomic &&
+ assert(FailureOrdering != AtomicOrdering::NotAtomic &&
"AtomicCmpXchg instructions must be atomic!");
- assert(SuccessOrdering >= FailureOrdering &&
- "AtomicCmpXchg success ordering must be at least as strong as fail");
- assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
+ assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
+ "AtomicCmpXchg failure argument shall be no stronger than the success "
+ "argument");
+ assert(FailureOrdering != AtomicOrdering::Release &&
+ FailureOrdering != AtomicOrdering::AcquireRelease &&
"AtomicCmpXchg failure ordering cannot include release semantics");
}
@@ -1454,7 +1456,7 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
assert(getOperand(1)->getType() ==
cast<PointerType>(getOperand(0)->getType())->getElementType()
&& "Ptr must be a pointer to Val type!");
- assert(Ordering != NotAtomic &&
+ assert(Ordering != AtomicOrdering::NotAtomic &&
"AtomicRMW instructions must be atomic!");
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 212e2bf38cd..24e92e14dfa 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -2919,7 +2919,8 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Assert(LI.getAlignment() <= Value::MaximumAlignment,
"huge alignment values are unsupported", &LI);
if (LI.isAtomic()) {
- Assert(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
+ Assert(LI.getOrdering() != AtomicOrdering::Release &&
+ LI.getOrdering() != AtomicOrdering::AcquireRelease,
"Load cannot have Release ordering", &LI);
Assert(LI.getAlignment() != 0,
"Atomic load must specify explicit alignment", &LI);
@@ -2946,7 +2947,8 @@ void Verifier::visitStoreInst(StoreInst &SI) {
Assert(SI.getAlignment() <= Value::MaximumAlignment,
"huge alignment values are unsupported", &SI);
if (SI.isAtomic()) {
- Assert(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
+ Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
+ SI.getOrdering() != AtomicOrdering::AcquireRelease,
"Store cannot have Acquire ordering", &SI);
Assert(SI.getAlignment() != 0,
"Atomic store must specify explicit alignment", &SI);
@@ -3022,19 +3024,20 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
// FIXME: more conditions???
- Assert(CXI.getSuccessOrdering() != NotAtomic,
+ Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
"cmpxchg instructions must be atomic.", &CXI);
- Assert(CXI.getFailureOrdering() != NotAtomic,
+ Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
"cmpxchg instructions must be atomic.", &CXI);
- Assert(CXI.getSuccessOrdering() != Unordered,
+ Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
"cmpxchg instructions cannot be unordered.", &CXI);
- Assert(CXI.getFailureOrdering() != Unordered,
+ Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
"cmpxchg instructions cannot be unordered.", &CXI);
- Assert(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
- "cmpxchg instructions be at least as constrained on success as fail",
+ Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
+ "cmpxchg instructions failure argument shall be no stronger than the "
+ "success argument",
&CXI);
- Assert(CXI.getFailureOrdering() != Release &&
- CXI.getFailureOrdering() != AcquireRelease,
+ Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
+ CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
"cmpxchg failure ordering cannot include release semantics", &CXI);
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
@@ -3053,9 +3056,9 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
}
void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
- Assert(RMWI.getOrdering() != NotAtomic,
+ Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
"atomicrmw instructions must be atomic.", &RMWI);
- Assert(RMWI.getOrdering() != Unordered,
+ Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
"atomicrmw instructions cannot be unordered.", &RMWI);
PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
@@ -3074,10 +3077,12 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
void Verifier::visitFenceInst(FenceInst &FI) {
const AtomicOrdering Ordering = FI.getOrdering();
- Assert(Ordering == Acquire || Ordering == Release ||
- Ordering == AcquireRelease || Ordering == SequentiallyConsistent,
- "fence instructions may only have "
- "acquire, release, acq_rel, or seq_cst ordering.",
+ Assert(Ordering == AtomicOrdering::Acquire ||
+ Ordering == AtomicOrdering::Release ||
+ Ordering == AtomicOrdering::AcquireRelease ||
+ Ordering == AtomicOrdering::SequentiallyConsistent,
+ "fence instructions may only have acquire, release, acq_rel, or "
+ "seq_cst ordering.",
&FI);
visitInstruction(FI);
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 134f107698d..e4391996041 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -608,7 +608,7 @@ static bool isWorthFoldingADDlow(SDValue N) {
// ldar and stlr have much more restrictive addressing modes (just a
// register).
- if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
+ if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
return false;
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2d3ddaa060f..2efe8473832 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10132,7 +10132,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
- bool IsAcquire = isAtLeastAcquire(Ord);
+ bool IsAcquire = isAcquireOrStronger(Ord);
// Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
// intrinsic must return {i64, i64} and we have to recombine them into a
@@ -10174,7 +10174,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
Value *Val, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- bool IsRelease = isAtLeastRelease(Ord);
+ bool IsRelease = isReleaseOrStronger(Ord);
// Since the intrinsics must have legal type, the i128 intrinsics take two
// parameters: "i64, i64". We must marshal Val into the appropriate form
diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
index 4923a1161df..a88e7e8491a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -29,7 +29,7 @@ def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
class acquiring_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return isAtLeastAcquire(Ordering);
+ return isAcquireOrStronger(Ordering);
}]>;
// An atomic load operation that does not need either acquire or release
@@ -37,7 +37,7 @@ class acquiring_load<PatFrag base>
class relaxed_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return !isAtLeastAcquire(Ordering);
+ return !isAcquireOrStronger(Ordering);
}]>;
// 8-bit loads
@@ -112,15 +112,16 @@ def : Pat<(relaxed_load<atomic_load_64>
class releasing_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- assert(Ordering != AcquireRelease && "unexpected store ordering");
- return isAtLeastRelease(Ordering);
+ assert(Ordering != AtomicOrdering::AcquireRelease &&
+ "unexpected store ordering");
+ return isReleaseOrStronger(Ordering);
}]>;
// An atomic store operation that doesn't actually need to be atomic on AArch64.
class relaxed_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return !isAtLeastRelease(Ordering);
+ return !isReleaseOrStronger(Ordering);
}]>;
// 8-bit stores
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 00446b61ed7..4c006d862fa 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -3011,7 +3011,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
if (Subtarget->isMClass()) {
// Only a full system barrier exists in the M-class architectures.
Domain = ARM_MB::SY;
- } else if (Subtarget->isSwift() && Ord == Release) {
+ } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) {
// Swift happens to implement ISHST barriers in a way that's compatible with
// Release semantics but weaker than ISH so we'd be fools not to use
// it. Beware: other processors probably don't!
@@ -6932,13 +6932,13 @@ void ARMTargetLowering::ExpandDIV_Windows(
}
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
- // Monotonic load/store is legal for all targets
- if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
- return Op;
+ if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
+ // Acquire/Release load/store is not legal for targets without a dmb or
+ // equivalent available.
+ return SDValue();
- // Acquire/Release load/store is not legal for targets without a
- // dmb or equivalent available.
- return SDValue();
+ // Monotonic load/store is legal for all targets.
+ return Op;
}
static void ReplaceREADCYCLECOUNTER(SDNode *N,
@@ -12076,18 +12076,18 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
switch (Ord) {
- case NotAtomic:
- case Unordered:
+ case AtomicOrdering::NotAtomic:
+ case AtomicOrdering::Unordered:
llvm_unreachable("Invalid fence: unordered/non-atomic");
- case Monotonic:
- case Acquire:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Acquire:
return nullptr; // Nothing to do
- case SequentiallyConsistent:
+ case AtomicOrdering::SequentiallyConsistent:
if (!IsStore)
return nullptr; // Nothing to do
/*FALLTHROUGH*/
- case Release:
- case AcquireRelease:
+ case AtomicOrdering::Release:
+ case AtomicOrdering::AcquireRelease:
if (Subtarget->isSwift())
return makeDMB(Builder, ARM_MB::ISHST);
// FIXME: add a comment with a link to documentation justifying this.
@@ -12101,15 +12101,15 @@ Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
switch (Ord) {
- case NotAtomic:
- case Unordered:
+ case AtomicOrdering::NotAtomic:
+ case AtomicOrdering::Unordered:
llvm_unreachable("Invalid fence: unordered/not-atomic");
- case Monotonic:
- case Release:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Release:
return nullptr; // Nothing to do
- case Acquire:
- case AcquireRelease:
- case SequentiallyConsistent:
+ case AtomicOrdering::Acquire:
+ case AtomicOrdering::AcquireRelease:
+ case AtomicOrdering::SequentiallyConsistent:
return makeDMB(Builder, ARM_MB::ISH);
}
llvm_unreachable("Unknown fence ordering in emitTrailingFence");
@@ -12204,7 +12204,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
- bool IsAcquire = isAtLeastAcquire(Ord);
+ bool IsAcquire = isAcquireOrStronger(Ord);
// Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
// intrinsic must return {i32, i32} and we have to recombine them into a
@@ -12248,7 +12248,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- bool IsRelease = isAtLeastRelease(Ord);
+ bool IsRelease = isReleaseOrStronger(Ord);
// Since the intrinsics must have legal type, the i64 intrinsics take two
// parameters: "i32, i32". We must marshal Val into the appropriate form
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index ca3b9d95f45..d116e9d08a3 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -4761,7 +4761,7 @@ def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
class acquiring_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return isAtLeastAcquire(Ordering);
+ return isAcquireOrStronger(Ordering);
}]>;
def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
@@ -4771,7 +4771,7 @@ def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
class releasing_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return isAtLeastRelease(Ordering);
+ return isReleaseOrStronger(Ordering);
}]>;
def atomic_store_release_8 : releasing_store<atomic_store_8>;
diff --git a/llvm/lib/Target/CppBackend/CPPBackend.cpp b/llvm/lib/Target/CppBackend/CPPBackend.cpp
index cfa1f2ce619..a086e710d16 100644
--- a/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -1091,13 +1091,14 @@ std::string CppWriter::getOpName(const Value* V) {
static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) {
switch (Ordering) {
- case NotAtomic: return "NotAtomic";
- case Unordered: return "Unordered";
- case Monotonic: return "Monotonic";
- case Acquire: return "Acquire";
- case Release: return "Release";
- case AcquireRelease: return "AcquireRelease";
- case SequentiallyConsistent: return "SequentiallyConsistent";
+ case AtomicOrdering::NotAtomic: return "NotAtomic";
+ case AtomicOrdering::Unordered: return "Unordered";
+ case AtomicOrdering::Monotonic: return "Monotonic";
+ case AtomicOrdering::Acquire: return "Acquire";
+ case AtomicOrdering::Release: return "Release";
+ case AtomicOrdering::AcquireRelease: return "AcquireRelease";
+ case AtomicOrdering::SequentiallyConsistent:
+ return "SequentiallyConsistent";
}
llvm_unreachable("Unknown ordering");
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 70d00c2682a..0aedb419201 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -8323,9 +8323,9 @@ static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (Ord == SequentiallyConsistent)
+ if (Ord == AtomicOrdering::SequentiallyConsistent)
return callIntrinsic(Builder, Intrinsic::ppc_sync);
- if (isAtLeastRelease(Ord))
+ if (isReleaseOrStronger(Ord))
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
return nullptr;
}
@@ -8333,7 +8333,7 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (IsLoad && isAtLeastAcquire(Ord))
+ if (IsLoad && isAcquireOrStronger(Ord))
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
// FIXME: this is too conservative, a dependent branch + isync is enough.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index ebb77924324..b0fa6a19a9a 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -2929,12 +2929,12 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
- // Monotonic load/stores are legal.
- if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
- return Op;
-
- // Otherwise, expand with a fence.
+ if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
+ // Expand with a fence.
return SDValue();
+
+ // Monotonic load/stores are legal.
+ return Op;
}
SDValue SparcTargetLowering::
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 9848c2a571a..31011063247 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -3130,9 +3130,11 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
- if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
+ if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
+ FenceScope == CrossThread) {
return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
- Op.getOperand(0)), 0);
+ Op.getOperand(0)),
+ 0);
}
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index eb3f0350846..8ff00194b25 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -20464,7 +20464,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
// lowered to just a load without a fence. A mfence flushes the store buffer,
// making the optimization clearly correct.
- // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
+ // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
@@ -20503,7 +20503,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
- if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
+ if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
+ FenceScope == CrossThread) {
if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
@@ -20986,7 +20987,8 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
// FIXME: On 32-bit, store -> fist or movq would be more efficient
// (The only way to get a 16-byte store is cmpxchg16b)
// FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
- if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
+ if (cast<AtomicSDNode>(Node)->getOrdering() ==
+ AtomicOrdering::SequentiallyConsistent ||
!DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 90da23b8cdb..6dbaa84a680 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -970,8 +970,9 @@ SDValue XCoreTargetLowering::
LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op);
assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
- assert(N->getOrdering() <= Monotonic &&
- "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ assert((N->getOrdering() == AtomicOrdering::Unordered ||
+ N->getOrdering() == AtomicOrdering::Monotonic) &&
+ "setInsertFencesForAtomic(true) expects unordered / monotonic");
if (N->getMemoryVT() == MVT::i32) {
if (N->getAlignment() < 4)
report_fatal_error("atomic load must be aligned");
@@ -1000,8 +1001,9 @@ SDValue XCoreTargetLowering::
LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op);
assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
- assert(N->getOrdering() <= Monotonic &&
- "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ assert((N->getOrdering() == AtomicOrdering::Unordered ||
+ N->getOrdering() == AtomicOrdering::Monotonic) &&
+ "setInsertFencesForAtomic(true) expects unordered / monotonic");
if (N->getMemoryVT() == MVT::i32) {
if (N->getAlignment() < 4)
report_fatal_error("atomic store must be aligned");
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index f6458f50696..e793d1b7d7b 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1503,7 +1503,7 @@ static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
// into multiple malloc'd arrays, one for each field. This is basically
// SRoA for malloc'd memory.
- if (Ordering != NotAtomic)
+ if (Ordering != AtomicOrdering::NotAtomic)
return false;
// If this is an allocation of a fixed size array of structs, analyze as a
@@ -1982,7 +1982,7 @@ bool GlobalOpt::processInternalGlobal(GlobalVariable *GV,
// Otherwise, if the global was not a boolean, we can shrink it to be a
// boolean.
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
- if (GS.Ordering == NotAtomic) {
+ if (GS.Ordering == AtomicOrdering::NotAtomic) {
if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
++NumShrunkToBool;
return true;
@@ -2581,4 +2581,3 @@ bool GlobalOpt::runOnModule(Module &M) {
return Changed;
}
-
diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 7024c68c377..719603a38d8 100644
--- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -401,6 +401,7 @@ private:
int cmpTypes(Type *TyL, Type *TyR) const;
int cmpNumbers(uint64_t L, uint64_t R) const;
+ int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
int cmpAPInts(const APInt &L, const APInt &R) const;
int cmpAPFloats(const APFloat &L, const APFloat &R) const;
int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
@@ -477,6 +478,12 @@ int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
return 0;
}
+int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
+ if ((int)L < (int)R) return -1;
+ if ((int)L > (int)R) return 1;
+ return 0;
+}
+
int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
return Res;
@@ -939,7 +946,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
return Res;
if (int Res =
- cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
+ cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
return Res;
if (int Res =
cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
@@ -955,7 +962,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
return Res;
if (int Res =
- cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
+ cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
return Res;
return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
}
@@ -1009,7 +1016,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
}
if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
if (int Res =
- cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
+ cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
return Res;
return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
}
@@ -1021,11 +1028,13 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res = cmpNumbers(CXI->isWeak(),
cast<AtomicCmpXchgInst>(R)->isWeak()))
return Res;
- if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
- cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
+ if (int Res =
+ cmpOrderings(CXI->getSuccessOrdering(),
+ cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
return Res;
- if (int Res = cmpNumbers(CXI->getFailureOrdering(),
- cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
+ if (int Res =
+ cmpOrderings(CXI->getFailureOrdering(),
+ cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
return Res;
return cmpNumbers(CXI->getSynchScope(),
cast<AtomicCmpXchgInst>(R)->getSynchScope());
@@ -1037,7 +1046,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res = cmpNumbers(RMWI->isVolatile(),
cast<AtomicRMWInst>(R)->isVolatile()))
return Res;
- if (int Res = cmpNumbers(RMWI->getOrdering(),
+ if (int Res = cmpOrderings(RMWI->getOrdering(),
cast<AtomicRMWInst>(R)->getOrdering()))
return Res;
return cmpNumbers(RMWI->getSynchScope(),
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 1931e7d8bc1..ed3715a3c4f 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1222,34 +1222,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
switch (a) {
- case NotAtomic:
- return NotAtomic;
- case Unordered:
- case Monotonic:
- case Release:
- return Release;
- case Acquire:
- case AcquireRelease:
- return AcquireRelease;
- case SequentiallyConsistent:
- return SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Release:
+ return AtomicOrdering::Release;
+ case AtomicOrdering::Acquire:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
- case NotAtomic:
- return NotAtomic;
- case Unordered:
- case Monotonic:
- case Acquire:
- return Acquire;
- case Release:
- case AcquireRelease:
- return AcquireRelease;
- case SequentiallyConsistent:
- return SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Acquire:
+ return AtomicOrdering::Acquire;
+ case AtomicOrdering::Release:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 1fac003feeb..f7bf44c662b 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -551,7 +551,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
IRB.CreateCall(SanCovWithCheckFunction, GuardP);
} else {
LoadInst *Load = IRB.CreateLoad(GuardP);
- Load->setAtomic(Monotonic);
+ Load->setAtomic(AtomicOrdering::Monotonic);
Load->setAlignment(4);
SetNoSanitizeMetadata(Load);
Value *Cmp =
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 38d87cb1c58..dad42a81068 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -480,14 +480,16 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
uint32_t v = 0;
switch (ord) {
- case NotAtomic: llvm_unreachable("unexpected atomic ordering!");
- case Unordered: // Fall-through.
- case Monotonic: v = 0; break;
- // case Consume: v = 1; break; // Not specified yet.
- case Acquire: v = 2; break;
- case Release: v = 3; break;
- case AcquireRelease: v = 4; break;
- case SequentiallyConsistent: v = 5; break;
+ case AtomicOrdering::NotAtomic:
+ llvm_unreachable("unexpected atomic ordering!");
+ case AtomicOrdering::Unordered: // Fall-through.
+ case AtomicOrdering::Monotonic: v = 0; break;
+ // Not specified yet:
+ // case AtomicOrdering::Consume: v = 1; break;
+ case AtomicOrdering::Acquire: v = 2; break;
+ case AtomicOrdering::Release: v = 3; break;
+ case AtomicOrdering::AcquireRelease: v = 4; break;
+ case AtomicOrdering::SequentiallyConsistent: v = 5; break;
}
return IRB->getInt32(v);
}
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index e5bbad9f73f..3707cecd014 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -673,7 +673,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// to advance the generation. We do need to prevent DSE across the fence,
// but that's handled above.
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
- if (FI->getOrdering() == Release) {
+ if (FI->getOrdering() == AtomicOrdering::Release) {
assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
continue;
}
diff --git a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 41511bcb7b0..47d374248dc 100644
--- a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -100,12 +100,12 @@ static bool LowerFenceInst(FenceInst *FI) {
}
static bool LowerLoadInst(LoadInst *LI) {
- LI->setAtomic(NotAtomic);
+ LI->setAtomic(AtomicOrdering::NotAtomic);
return true;
}
static bool LowerStoreInst(StoreInst *SI) {
- SI->setAtomic(NotAtomic);
+ SI->setAtomic(AtomicOrdering::NotAtomic);
return true;
}
diff --git a/llvm/lib/Transforms/Utils/GlobalStatus.cpp b/llvm/lib/Transforms/Utils/GlobalStatus.cpp
index 3893a752503..8be42aed972 100644
--- a/llvm/lib/Transforms/Utils/GlobalStatus.cpp
+++ b/llvm/lib/Transforms/Utils/GlobalStatus.cpp
@@ -20,11 +20,11 @@ using namespace llvm;
/// and release, then return AcquireRelease.
///
static AtomicOrdering strongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
- if (X == Acquire && Y == Release)
- return AcquireRelease;
- if (Y == Acquire && X == Release)
- return AcquireRelease;
- return (AtomicOrdering)std::max(X, Y);
+ if (X == AtomicOrdering::Acquire && Y == AtomicOrdering::Release)
+ return AtomicOrdering::AcquireRelease;
+ if (Y == AtomicOrdering::Acquire && X == AtomicOrdering::Release)
+ return AtomicOrdering::AcquireRelease;
+ return (AtomicOrdering)std::max((unsigned)X, (unsigned)Y);
}
/// It is safe to destroy a constant iff it is only used by constants itself.
@@ -185,4 +185,4 @@ GlobalStatus::GlobalStatus()
: IsCompared(false), IsLoaded(false), StoredType(NotStored),
StoredOnceValue(nullptr), AccessingFunction(nullptr),
HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
- Ordering(NotAtomic) {}
+ Ordering(AtomicOrdering::NotAtomic) {}
OpenPOWER on IntegriCloud