summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorAlina Sbirlea <asbirlea@google.com>2017-12-07 22:41:34 +0000
committerAlina Sbirlea <asbirlea@google.com>2017-12-07 22:41:34 +0000
commit193429f0c8fb7ebc19b4293b937ad39db910b277 (patch)
tree4173fe53eff08bdc99332ef18b19db9e6074a2ea /llvm/lib/Analysis
parenta469acac0306069b26377240fb724f6eaa47953c (diff)
downloadbcm5719-llvm-193429f0c8fb7ebc19b4293b937ad39db910b277.tar.gz
bcm5719-llvm-193429f0c8fb7ebc19b4293b937ad39db910b277.zip
[ModRefInfo] Make enum ModRefInfo an enum class [NFC].
Summary: Make enum ModRefInfo an enum class. Changes to ModRefInfo values should be done using inline wrappers. This should prevent future bit-wise opearations from being added, which can be more error-prone. Reviewers: sanjoy, dberlin, hfinkel, george.burgess.iv Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D40933 llvm-svn: 320107
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/AliasAnalysis.cpp96
-rw-r--r--llvm/lib/Analysis/AliasAnalysisEvaluator.cpp16
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp38
-rw-r--r--llvm/lib/Analysis/GlobalsModRef.cpp38
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp32
-rw-r--r--llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/ScopedNoAliasAA.cpp8
-rw-r--r--llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp4
8 files changed, 119 insertions, 115 deletions
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index d9a40b17a1f..dd2db1e5b27 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -119,7 +119,7 @@ bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
}
ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
- ModRefInfo Result = MRI_ModRef;
+ ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx));
@@ -138,8 +138,8 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// Check if the two calls modify the same memory
return getModRefInfo(CS, Call);
} else if (I->isFenceLike()) {
- // If this is a fence, just return MRI_ModRef.
- return MRI_ModRef;
+ // If this is a fence, just return ModRef.
+ return ModRefInfo::ModRef;
} else {
// Otherwise, check if the call modifies or references the
// location this memory access defines. The best we can say
@@ -150,12 +150,12 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
if (isModOrRefSet(MR))
return setModAndRef(MR);
}
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
- ModRefInfo Result = MRI_ModRef;
+ ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc));
@@ -170,7 +170,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
auto MRB = getModRefBehavior(CS);
if (MRB == FMRB_DoesNotAccessMemory ||
MRB == FMRB_OnlyAccessesInaccessibleMem)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
if (onlyReadsMemory(MRB))
Result = clearMod(Result);
@@ -179,7 +179,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
bool DoesAlias = false;
- ModRefInfo AllArgsMask = MRI_NoModRef;
+ ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
if (doesAccessArgPointees(MRB)) {
for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
const Value *Arg = *AI;
@@ -195,9 +195,9 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
}
}
}
- // Return MRI_NoModRef if no alias found with any argument.
+ // Return NoModRef if no alias found with any argument.
if (!DoesAlias)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Logical & between other AA analyses and argument analysis.
Result = intersectModRef(Result, AllArgsMask);
}
@@ -212,7 +212,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) {
- ModRefInfo Result = MRI_ModRef;
+ ModRefInfo Result = ModRefInfo::ModRef;
for (const auto &AA : AAs) {
Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2));
@@ -228,15 +228,15 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// If CS1 or CS2 are readnone, they don't interact.
auto CS1B = getModRefBehavior(CS1);
if (CS1B == FMRB_DoesNotAccessMemory)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
auto CS2B = getModRefBehavior(CS2);
if (CS2B == FMRB_DoesNotAccessMemory)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If they both only read from memory, there is no dependence.
if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
@@ -249,7 +249,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// information from CS1's references to the memory referenced by
// CS2's arguments.
if (onlyAccessesArgPointees(CS2B)) {
- ModRefInfo R = MRI_NoModRef;
+ ModRefInfo R = ModRefInfo::NoModRef;
if (doesAccessArgPointees(CS2B)) {
for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
const Value *Arg = *I;
@@ -263,11 +263,11 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// - If CS2 modifies location, dependence exists if CS1 reads or writes.
// - If CS2 only reads location, dependence exists if CS1 writes.
ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx);
- ModRefInfo ArgMask = MRI_NoModRef;
+ ModRefInfo ArgMask = ModRefInfo::NoModRef;
if (isModSet(ArgModRefCS2))
- ArgMask = MRI_ModRef;
+ ArgMask = ModRefInfo::ModRef;
else if (isRefSet(ArgModRefCS2))
- ArgMask = MRI_Mod;
+ ArgMask = ModRefInfo::Mod;
// ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use
// above ArgMask to update dependence info.
@@ -285,7 +285,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// If CS1 only accesses memory through arguments, check if CS2 references
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
if (onlyAccessesArgPointees(CS1B)) {
- ModRefInfo R = MRI_NoModRef;
+ ModRefInfo R = ModRefInfo::NoModRef;
if (doesAccessArgPointees(CS1B)) {
for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
const Value *Arg = *I;
@@ -349,45 +349,45 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
const MemoryLocation &Loc) {
// Be conservative in the face of atomic.
if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Otherwise, a load just reads.
- return MRI_Ref;
+ return ModRefInfo::Ref;
}
ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
const MemoryLocation &Loc) {
// Be conservative in the face of atomic.
if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
if (Loc.Ptr) {
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
if (!alias(MemoryLocation::get(S), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this store.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a store just writes.
- return MRI_Mod;
+ return ModRefInfo::Mod;
}
ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
// If we know that the location is a constant memory location, the fence
// cannot modify this location.
if (Loc.Ptr && pointsToConstantMemory(Loc))
- return MRI_Ref;
- return MRI_ModRef;
+ return ModRefInfo::Ref;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
@@ -396,16 +396,16 @@ ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
if (!alias(MemoryLocation::get(V), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this va_arg.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a va_arg reads and writes.
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
@@ -414,11 +414,11 @@ ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a catchpad reads and writes.
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
@@ -427,37 +427,37 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
// If the pointer is a pointer to constant memory,
// then it could not have been modified by this catchpad.
if (pointsToConstantMemory(Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// Otherwise, a catchret reads and writes.
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (isStrongerThanMonotonic(RMW->getOrdering()))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
/// \brief Return information about whether a particular call site modifies
@@ -473,26 +473,26 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
DominatorTree *DT,
OrderedBasicBlock *OBB) {
if (!DT)
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
const Value *Object =
GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
ImmutableCallSite CS(I);
if (!CS.getInstruction() || CS.getInstruction() == Object)
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
/* StoreCaptures */ true, I, DT,
/* include Object */ true,
/* OrderedBasicBlock */ OBB))
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
unsigned ArgNo = 0;
- ModRefInfo R = MRI_NoModRef;
+ ModRefInfo R = ModRefInfo::NoModRef;
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture or byval pointer arguments. If this
@@ -512,10 +512,10 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
if (CS.doesNotAccessMemory(ArgNo))
continue;
if (CS.onlyReadsMemory(ArgNo)) {
- R = MRI_Ref;
+ R = ModRefInfo::Ref;
continue;
}
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
return R;
}
@@ -525,7 +525,7 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
///
bool AAResults::canBasicBlockModify(const BasicBlock &BB,
const MemoryLocation &Loc) {
- return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
+ return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod);
}
/// canInstructionRangeModRef - Return true if it is possible for the
diff --git a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index 435c782d97a..423acf739f5 100644
--- a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -244,20 +244,20 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(C, Pointer, Size)) {
- case MRI_NoModRef:
+ case ModRefInfo::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer,
F.getParent());
++NoModRefCount;
break;
- case MRI_Mod:
+ case ModRefInfo::Mod:
PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent());
++ModCount;
break;
- case MRI_Ref:
+ case ModRefInfo::Ref:
PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent());
++RefCount;
break;
- case MRI_ModRef:
+ case ModRefInfo::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, Pointer,
F.getParent());
++ModRefCount;
@@ -272,19 +272,19 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
if (D == C)
continue;
switch (AA.getModRefInfo(*C, *D)) {
- case MRI_NoModRef:
+ case ModRefInfo::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
++NoModRefCount;
break;
- case MRI_Mod:
+ case ModRefInfo::Mod:
PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
++ModCount;
break;
- case MRI_Ref:
+ case ModRefInfo::Ref:
PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
++RefCount;
break;
- case MRI_ModRef:
+ case ModRefInfo::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
++ModRefCount;
break;
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 5522803b174..81b9f842249 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -687,13 +687,13 @@ ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
unsigned ArgIdx) {
// Checking for known builtin intrinsics and target library functions.
if (isWriteOnlyParam(CS, ArgIdx, TLI))
- return MRI_Mod;
+ return ModRefInfo::Mod;
if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
- return MRI_Ref;
+ return ModRefInfo::Ref;
if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getArgModRefInfo(CS, ArgIdx);
}
@@ -770,7 +770,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
if (isa<AllocaInst>(Object))
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// If the pointer is to a locally allocated object that does not escape,
// then the call can not mod/ref the pointer unless the call takes the pointer
@@ -780,7 +780,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// Optimistically assume that call doesn't touch Object and check this
// assumption in the following loop.
- ModRefInfo Result = MRI_NoModRef;
+ ModRefInfo Result = ModRefInfo::NoModRef;
unsigned OperandNo = 0;
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
@@ -818,7 +818,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
continue;
}
// This operand aliases 'Object' and call reads and writes into it.
- Result = MRI_ModRef;
+ Result = ModRefInfo::ModRef;
break;
}
@@ -838,7 +838,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// Be conservative if the accessed pointer may alias the allocation -
// fallback to the generic handling below.
if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
// The semantics of memcpy intrinsics forbid overlap between their respective
@@ -851,14 +851,14 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
Loc)) == MustAlias)
// Loc is exactly the memcpy source thus disjoint from memcpy dest.
- return MRI_Ref;
+ return ModRefInfo::Ref;
if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
Loc)) == MustAlias)
// The converse case.
- return MRI_Mod;
+ return ModRefInfo::Mod;
// It's also possible for Loc to alias both src and dest, or neither.
- ModRefInfo rv = MRI_NoModRef;
+ ModRefInfo rv = ModRefInfo::NoModRef;
if (SrcAA != NoAlias)
rv = setRef(rv);
if (DestAA != NoAlias)
@@ -870,7 +870,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// proper control dependencies will be maintained, it never aliases any
// particular memory location.
if (isIntrinsicCall(CS, Intrinsic::assume))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
// that proper control dependencies are maintained but they never mods any
@@ -880,7 +880,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// heap state at the point the guard is issued needs to be consistent in case
// the guard invokes the "deopt" continuation.
if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
- return MRI_Ref;
+ return ModRefInfo::Ref;
// Like assumes, invariant.start intrinsics were also marked as arbitrarily
// writing so that proper control dependencies are maintained but they never
@@ -906,7 +906,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// rules of invariant.start) and print 40, while the first program always
// prints 50.
if (isIntrinsicCall(CS, Intrinsic::invariant_start))
- return MRI_Ref;
+ return ModRefInfo::Ref;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS, Loc);
@@ -919,7 +919,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
// particular memory location.
if (isIntrinsicCall(CS1, Intrinsic::assume) ||
isIntrinsicCall(CS2, Intrinsic::assume))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
// that proper control dependencies are maintained but they never mod any
@@ -933,12 +933,14 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
// possibilities for guard intrinsics.
if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
- return isModSet(createModRefInfo(getModRefBehavior(CS2))) ? MRI_Ref
- : MRI_NoModRef;
+ return isModSet(createModRefInfo(getModRefBehavior(CS2)))
+ ? ModRefInfo::Ref
+ : ModRefInfo::NoModRef;
if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
- return isModSet(createModRefInfo(getModRefBehavior(CS1))) ? MRI_Mod
- : MRI_NoModRef;
+ return isModSet(createModRefInfo(getModRefBehavior(CS1)))
+ ? ModRefInfo::Mod
+ : ModRefInfo::NoModRef;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS1, CS2);
diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp
index 6dc8b24508d..23109c67e5c 100644
--- a/llvm/lib/Analysis/GlobalsModRef.cpp
+++ b/llvm/lib/Analysis/GlobalsModRef.cpp
@@ -88,9 +88,9 @@ class GlobalsAAResult::FunctionInfo {
enum { MayReadAnyGlobal = 4 };
/// Checks to document the invariants of the bit packing here.
- static_assert((MayReadAnyGlobal & MRI_ModRef) == 0,
+ static_assert((MayReadAnyGlobal & static_cast<int>(ModRefInfo::ModRef)) == 0,
"ModRef and the MayReadAnyGlobal flag bits overlap.");
- static_assert(((MayReadAnyGlobal | MRI_ModRef) >>
+ static_assert(((MayReadAnyGlobal | static_cast<int>(ModRefInfo::ModRef)) >>
AlignedMapPointerTraits::NumLowBitsAvailable) == 0,
"Insufficient low bits to store our flag and ModRef info.");
@@ -127,12 +127,12 @@ public:
/// Returns the \c ModRefInfo info for this function.
ModRefInfo getModRefInfo() const {
- return ModRefInfo(Info.getInt() & MRI_ModRef);
+ return ModRefInfo(Info.getInt() & static_cast<int>(ModRefInfo::ModRef));
}
/// Adds new \c ModRefInfo for this function to its state.
void addModRefInfo(ModRefInfo NewMRI) {
- Info.setInt(Info.getInt() | NewMRI);
+ Info.setInt(Info.getInt() | static_cast<int>(NewMRI));
}
/// Returns whether this function may read any global variable, and we don't
@@ -145,7 +145,8 @@ public:
/// Returns the \c ModRefInfo info for this function w.r.t. a particular
/// global, which may be more precise than the general information above.
ModRefInfo getModRefInfoForGlobal(const GlobalValue &GV) const {
- ModRefInfo GlobalMRI = mayReadAnyGlobal() ? MRI_Ref : MRI_NoModRef;
+ ModRefInfo GlobalMRI =
+ mayReadAnyGlobal() ? ModRefInfo::Ref : ModRefInfo::NoModRef;
if (AlignedMap *P = Info.getPointer()) {
auto I = P->Map.find(&GV);
if (I != P->Map.end())
@@ -155,7 +156,7 @@ public:
}
/// Add mod/ref info from another function into ours, saturating towards
- /// MRI_ModRef.
+ /// ModRef.
void addFunctionInfo(const FunctionInfo &FI) {
addModRefInfo(FI.getModRefInfo());
@@ -298,7 +299,7 @@ void GlobalsAAResult::AnalyzeGlobals(Module &M) {
Handles.emplace_front(*this, Reader);
Handles.front().I = Handles.begin();
}
- FunctionInfos[Reader].addModRefInfoForGlobal(GV, MRI_Ref);
+ FunctionInfos[Reader].addModRefInfoForGlobal(GV, ModRefInfo::Ref);
}
if (!GV.isConstant()) // No need to keep track of writers to constants
@@ -307,7 +308,7 @@ void GlobalsAAResult::AnalyzeGlobals(Module &M) {
Handles.emplace_front(*this, Writer);
Handles.front().I = Handles.begin();
}
- FunctionInfos[Writer].addModRefInfoForGlobal(GV, MRI_Mod);
+ FunctionInfos[Writer].addModRefInfoForGlobal(GV, ModRefInfo::Mod);
}
++NumNonAddrTakenGlobalVars;
@@ -503,13 +504,13 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (F->doesNotAccessMemory()) {
// Can't do better than that!
} else if (F->onlyReadsMemory()) {
- FI.addModRefInfo(MRI_Ref);
+ FI.addModRefInfo(ModRefInfo::Ref);
if (!F->isIntrinsic() && !F->onlyAccessesArgMemory())
// This function might call back into the module and read a global -
// consider every global as possibly being read by this function.
FI.setMayReadAnyGlobal();
} else {
- FI.addModRefInfo(MRI_ModRef);
+ FI.addModRefInfo(ModRefInfo::ModRef);
// Can't say anything useful unless it's an intrinsic - they don't
// read or write global variables of the kind considered here.
KnowNothing = !F->isIntrinsic();
@@ -564,7 +565,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) {
// FIXME: It is completely unclear why this is necessary and not
// handled by the above graph code.
- FI.addModRefInfo(MRI_ModRef);
+ FI.addModRefInfo(ModRefInfo::ModRef);
} else if (Function *Callee = CS.getCalledFunction()) {
// The callgraph doesn't include intrinsic calls.
if (Callee->isIntrinsic()) {
@@ -579,9 +580,9 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
// All non-call instructions we use the primary predicates for whether
// thay read or write memory.
if (I.mayReadFromMemory())
- FI.addModRefInfo(MRI_Ref);
+ FI.addModRefInfo(ModRefInfo::Ref);
if (I.mayWriteToMemory())
- FI.addModRefInfo(MRI_Mod);
+ FI.addModRefInfo(ModRefInfo::Mod);
}
}
@@ -868,8 +869,9 @@ AliasResult GlobalsAAResult::alias(const MemoryLocation &LocA,
ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
const GlobalValue *GV) {
if (CS.doesNotAccessMemory())
- return MRI_NoModRef;
- ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef;
+ return ModRefInfo::NoModRef;
+ ModRefInfo ConservativeResult =
+ CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
// Iterate through all the arguments to the called function. If any argument
// is based on GV, return the conservative result.
@@ -890,12 +892,12 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
}
// We identified all objects in the argument list, and none of them were GV.
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
}
ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
- ModRefInfo Known = MRI_ModRef;
+ ModRefInfo Known = ModRefInfo::ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
@@ -909,7 +911,7 @@ ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
getModRefInfoForArgument(CS, GV));
if (!isModOrRefSet(Known))
- return MRI_NoModRef; // No need to query other mod/ref analyses
+ return ModRefInfo::NoModRef; // No need to query other mod/ref analyses
return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc));
}
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index eafc7061f88..cb0c6a53713 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -119,38 +119,38 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isUnordered()) {
Loc = MemoryLocation::get(LI);
- return MRI_Ref;
+ return ModRefInfo::Ref;
}
if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(LI);
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
Loc = MemoryLocation();
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->isUnordered()) {
Loc = MemoryLocation::get(SI);
- return MRI_Mod;
+ return ModRefInfo::Mod;
}
if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(SI);
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
Loc = MemoryLocation();
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Loc = MemoryLocation::get(V);
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
}
if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
// calls to free() deallocate the entire structure
Loc = MemoryLocation(CI->getArgOperand(0));
- return MRI_Mod;
+ return ModRefInfo::Mod;
}
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
@@ -166,7 +166,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
- return MRI_Mod;
+ return ModRefInfo::Mod;
case Intrinsic::invariant_end:
II->getAAMetadata(AAInfo);
Loc = MemoryLocation(
@@ -174,7 +174,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
- return MRI_Mod;
+ return ModRefInfo::Mod;
default:
break;
}
@@ -182,10 +182,10 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
// Otherwise, just do the coarse-grained thing that always works.
if (Inst->mayWriteToMemory())
- return MRI_ModRef;
+ return ModRefInfo::ModRef;
if (Inst->mayReadFromMemory())
- return MRI_Ref;
- return MRI_NoModRef;
+ return ModRefInfo::Ref;
+ return ModRefInfo::NoModRef;
}
/// Private helper for finding the local dependencies of a call site.
@@ -689,12 +689,12 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
if (isModAndRefSet(MR))
MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
switch (MR) {
- case MRI_NoModRef:
+ case ModRefInfo::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
- case MRI_Mod:
+ case ModRefInfo::Mod:
return MemDepResult::getClobber(Inst);
- case MRI_Ref:
+ case ModRefInfo::Ref:
// If the call is known to never store to the pointer, and if this is a
// load query, we can safely ignore it (scan past it).
if (isLoad)
diff --git a/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp b/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
index ed03406ca8c..096ea661ecb 100644
--- a/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
@@ -123,7 +123,7 @@ ModRefInfo ObjCARCAAResult::getModRefInfo(ImmutableCallSite CS,
// These functions don't access any memory visible to the compiler.
// Note that this doesn't include objc_retainBlock, because it updates
// pointers when it copies block data.
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
default:
break;
}
diff --git a/llvm/lib/Analysis/ScopedNoAliasAA.cpp b/llvm/lib/Analysis/ScopedNoAliasAA.cpp
index b97b08ad074..f12275aff38 100644
--- a/llvm/lib/Analysis/ScopedNoAliasAA.cpp
+++ b/llvm/lib/Analysis/ScopedNoAliasAA.cpp
@@ -102,12 +102,12 @@ ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS,
if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata(
LLVMContext::MD_noalias)))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
if (!mayAliasInScopes(
CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
Loc.AATags.NoAlias))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS, Loc);
}
@@ -120,12 +120,12 @@ ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS1,
if (!mayAliasInScopes(
CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
if (!mayAliasInScopes(
CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS1, CS2);
}
diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 2b21b685c7a..c9ed026a1e3 100644
--- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -371,7 +371,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS,
if (const MDNode *M =
CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(L, M))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS, Loc);
}
@@ -386,7 +386,7 @@ ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS1,
if (const MDNode *M2 =
CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
if (!Aliases(M1, M2))
- return MRI_NoModRef;
+ return ModRefInfo::NoModRef;
return AAResultBase::getModRefInfo(CS1, CS2);
}
OpenPOWER on IntegriCloud