summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/MergeFunctions.cpp25
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp44
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp18
-rw-r--r--llvm/lib/Transforms/Scalar/EarlyCSE.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LowerAtomic.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/GlobalStatus.cpp12
8 files changed, 61 insertions, 51 deletions
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index f6458f50696..e793d1b7d7b 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1503,7 +1503,7 @@ static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
// into multiple malloc'd arrays, one for each field. This is basically
// SRoA for malloc'd memory.
- if (Ordering != NotAtomic)
+ if (Ordering != AtomicOrdering::NotAtomic)
return false;
// If this is an allocation of a fixed size array of structs, analyze as a
@@ -1982,7 +1982,7 @@ bool GlobalOpt::processInternalGlobal(GlobalVariable *GV,
// Otherwise, if the global was not a boolean, we can shrink it to be a
// boolean.
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
- if (GS.Ordering == NotAtomic) {
+ if (GS.Ordering == AtomicOrdering::NotAtomic) {
if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
++NumShrunkToBool;
return true;
@@ -2581,4 +2581,3 @@ bool GlobalOpt::runOnModule(Module &M) {
return Changed;
}
-
diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 7024c68c377..719603a38d8 100644
--- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -401,6 +401,7 @@ private:
int cmpTypes(Type *TyL, Type *TyR) const;
int cmpNumbers(uint64_t L, uint64_t R) const;
+ int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
int cmpAPInts(const APInt &L, const APInt &R) const;
int cmpAPFloats(const APFloat &L, const APFloat &R) const;
int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
@@ -477,6 +478,12 @@ int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
return 0;
}
+int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
+ if ((int)L < (int)R) return -1;
+ if ((int)L > (int)R) return 1;
+ return 0;
+}
+
int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
return Res;
@@ -939,7 +946,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
return Res;
if (int Res =
- cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
+ cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
return Res;
if (int Res =
cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
@@ -955,7 +962,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
return Res;
if (int Res =
- cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
+ cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
return Res;
return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
}
@@ -1009,7 +1016,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
}
if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
if (int Res =
- cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
+ cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
return Res;
return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
}
@@ -1021,11 +1028,13 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res = cmpNumbers(CXI->isWeak(),
cast<AtomicCmpXchgInst>(R)->isWeak()))
return Res;
- if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
- cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
+ if (int Res =
+ cmpOrderings(CXI->getSuccessOrdering(),
+ cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
return Res;
- if (int Res = cmpNumbers(CXI->getFailureOrdering(),
- cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
+ if (int Res =
+ cmpOrderings(CXI->getFailureOrdering(),
+ cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
return Res;
return cmpNumbers(CXI->getSynchScope(),
cast<AtomicCmpXchgInst>(R)->getSynchScope());
@@ -1037,7 +1046,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res = cmpNumbers(RMWI->isVolatile(),
cast<AtomicRMWInst>(R)->isVolatile()))
return Res;
- if (int Res = cmpNumbers(RMWI->getOrdering(),
+ if (int Res = cmpOrderings(RMWI->getOrdering(),
cast<AtomicRMWInst>(R)->getOrdering()))
return Res;
return cmpNumbers(RMWI->getSynchScope(),
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 1931e7d8bc1..ed3715a3c4f 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1222,34 +1222,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
switch (a) {
- case NotAtomic:
- return NotAtomic;
- case Unordered:
- case Monotonic:
- case Release:
- return Release;
- case Acquire:
- case AcquireRelease:
- return AcquireRelease;
- case SequentiallyConsistent:
- return SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Release:
+ return AtomicOrdering::Release;
+ case AtomicOrdering::Acquire:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
- case NotAtomic:
- return NotAtomic;
- case Unordered:
- case Monotonic:
- case Acquire:
- return Acquire;
- case Release:
- case AcquireRelease:
- return AcquireRelease;
- case SequentiallyConsistent:
- return SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Acquire:
+ return AtomicOrdering::Acquire;
+ case AtomicOrdering::Release:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 1fac003feeb..f7bf44c662b 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -551,7 +551,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
IRB.CreateCall(SanCovWithCheckFunction, GuardP);
} else {
LoadInst *Load = IRB.CreateLoad(GuardP);
- Load->setAtomic(Monotonic);
+ Load->setAtomic(AtomicOrdering::Monotonic);
Load->setAlignment(4);
SetNoSanitizeMetadata(Load);
Value *Cmp =
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 38d87cb1c58..dad42a81068 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -480,14 +480,16 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
uint32_t v = 0;
switch (ord) {
- case NotAtomic: llvm_unreachable("unexpected atomic ordering!");
- case Unordered: // Fall-through.
- case Monotonic: v = 0; break;
- // case Consume: v = 1; break; // Not specified yet.
- case Acquire: v = 2; break;
- case Release: v = 3; break;
- case AcquireRelease: v = 4; break;
- case SequentiallyConsistent: v = 5; break;
+ case AtomicOrdering::NotAtomic:
+ llvm_unreachable("unexpected atomic ordering!");
+ case AtomicOrdering::Unordered: // Fall-through.
+ case AtomicOrdering::Monotonic: v = 0; break;
+ // Not specified yet:
+ // case AtomicOrdering::Consume: v = 1; break;
+ case AtomicOrdering::Acquire: v = 2; break;
+ case AtomicOrdering::Release: v = 3; break;
+ case AtomicOrdering::AcquireRelease: v = 4; break;
+ case AtomicOrdering::SequentiallyConsistent: v = 5; break;
}
return IRB->getInt32(v);
}
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index e5bbad9f73f..3707cecd014 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -673,7 +673,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// to advance the generation. We do need to prevent DSE across the fence,
// but that's handled above.
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
- if (FI->getOrdering() == Release) {
+ if (FI->getOrdering() == AtomicOrdering::Release) {
assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
continue;
}
diff --git a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 41511bcb7b0..47d374248dc 100644
--- a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -100,12 +100,12 @@ static bool LowerFenceInst(FenceInst *FI) {
}
static bool LowerLoadInst(LoadInst *LI) {
- LI->setAtomic(NotAtomic);
+ LI->setAtomic(AtomicOrdering::NotAtomic);
return true;
}
static bool LowerStoreInst(StoreInst *SI) {
- SI->setAtomic(NotAtomic);
+ SI->setAtomic(AtomicOrdering::NotAtomic);
return true;
}
diff --git a/llvm/lib/Transforms/Utils/GlobalStatus.cpp b/llvm/lib/Transforms/Utils/GlobalStatus.cpp
index 3893a752503..8be42aed972 100644
--- a/llvm/lib/Transforms/Utils/GlobalStatus.cpp
+++ b/llvm/lib/Transforms/Utils/GlobalStatus.cpp
@@ -20,11 +20,11 @@ using namespace llvm;
/// and release, then return AcquireRelease.
///
static AtomicOrdering strongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
- if (X == Acquire && Y == Release)
- return AcquireRelease;
- if (Y == Acquire && X == Release)
- return AcquireRelease;
- return (AtomicOrdering)std::max(X, Y);
+ if (X == AtomicOrdering::Acquire && Y == AtomicOrdering::Release)
+ return AtomicOrdering::AcquireRelease;
+ if (Y == AtomicOrdering::Acquire && X == AtomicOrdering::Release)
+ return AtomicOrdering::AcquireRelease;
+ return (AtomicOrdering)std::max((unsigned)X, (unsigned)Y);
}
/// It is safe to destroy a constant iff it is only used by constants itself.
@@ -185,4 +185,4 @@ GlobalStatus::GlobalStatus()
: IsCompared(false), IsLoaded(false), StoredType(NotStored),
StoredOnceValue(nullptr), AccessingFunction(nullptr),
HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
- Ordering(NotAtomic) {}
+ Ordering(AtomicOrdering::NotAtomic) {}
OpenPOWER on IntegriCloud