summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2012-10-09 07:45:08 +0000
committerBill Wendling <isanbard@gmail.com>2012-10-09 07:45:08 +0000
commitc9b22d735a921e335bc02aea0bd4695c5a3e52a9 (patch)
treedbe50cfd0e6565cc868b4210a199c95a9fee5fb7 /llvm/lib/Transforms
parent3b95e4da76df2084315368dca15a31a99fd3150e (diff)
downloadbcm5719-llvm-c9b22d735a921e335bc02aea0bd4695c5a3e52a9.tar.gz
bcm5719-llvm-c9b22d735a921e335bc02aea0bd4695c5a3e52a9.zip
Create enums for the different attributes.
We use the enums to query whether an Attributes object has that attribute. The opaque layer is responsible for knowing where that specific attribute is stored. llvm-svn: 165488
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/IPO/ArgumentPromotion.cpp3
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/InlineAlways.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/Inliner.cpp13
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp6
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnswitch.cpp3
9 files changed, 27 insertions, 20 deletions
diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 548965c6d7e..6f6ff9ca2d5 100644
--- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -153,7 +153,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
SmallPtrSet<Argument*, 8> ArgsToPromote;
SmallPtrSet<Argument*, 8> ByValArgsToTransform;
for (unsigned i = 0; i != PointerArgs.size(); ++i) {
- bool isByVal=F->getParamAttributes(PointerArgs[i].second+1).hasByValAttr();
+ bool isByVal=F->getParamAttributes(PointerArgs[i].second+1).
+ hasAttribute(Attributes::ByVal);
Argument *PtrArg = PointerArgs[i].first;
Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 69e2c4458d3..e8e54ec9aef 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -2063,7 +2063,7 @@ static void ChangeCalleesToFastCall(Function *F) {
static AttrListPtr StripNest(const AttrListPtr &Attrs) {
for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
- if (!Attrs.getSlot(i).Attrs.hasNestAttr())
+ if (!Attrs.getSlot(i).Attrs.hasAttribute(Attributes::Nest))
continue;
// There can be only one.
diff --git a/llvm/lib/Transforms/IPO/InlineAlways.cpp b/llvm/lib/Transforms/IPO/InlineAlways.cpp
index 24341c42978..b1c36c15db0 100644
--- a/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -65,7 +65,7 @@ Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) {
/// \brief Minimal filter to detect invalid constructs for inlining.
static bool isInlineViable(Function &F) {
- bool ReturnsTwice = F.getFnAttributes().hasReturnsTwiceAttr();
+ bool ReturnsTwice =F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice);
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
// Disallow inlining of functions which contain an indirect branch.
if (isa<IndirectBrInst>(BI->getTerminator()))
@@ -114,7 +114,7 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
if (Callee->isDeclaration()) return InlineCost::getNever();
// Return never for anything not marked as always inline.
- if (!Callee->getFnAttributes().hasAlwaysInlineAttr())
+ if (!Callee->getFnAttributes().hasAttribute(Attributes::AlwaysInline))
return InlineCost::getNever();
// Do some minimal analysis to preclude non-viable functions.
diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp
index 6ef671a7407..876afd19f18 100644
--- a/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -93,10 +93,10 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// If the inlined function had a higher stack protection level than the
// calling function, then bump up the caller's stack protection level.
- if (Callee->getFnAttributes().hasStackProtectReqAttr())
+ if (Callee->getFnAttributes().hasAttribute(Attributes::StackProtectReq))
Caller->addFnAttr(Attribute::StackProtectReq);
- else if (Callee->getFnAttributes().hasStackProtectAttr() &&
- !Caller->getFnAttributes().hasStackProtectReqAttr())
+ else if (Callee->getFnAttributes().hasAttribute(Attributes::StackProtect) &&
+ !Caller->getFnAttributes().hasAttribute(Attributes::StackProtectReq))
Caller->addFnAttr(Attribute::StackProtect);
// Look at all of the allocas that we inlined through this call site. If we
@@ -209,7 +209,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// would decrease the threshold.
Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() &&
- Caller->getFnAttributes().hasOptimizeForSizeAttr();
+ Caller->getFnAttributes().hasAttribute(Attributes::OptimizeForSize);
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
OptSizeThreshold < thres)
thres = OptSizeThreshold;
@@ -217,7 +217,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// Listen to the inlinehint attribute when it would increase the threshold.
Function *Callee = CS.getCalledFunction();
bool InlineHint = Callee && !Callee->isDeclaration() &&
- Callee->getFnAttributes().hasInlineHintAttr();
+ Callee->getFnAttributes().hasAttribute(Attributes::InlineHint);
if (InlineHint && HintThreshold > thres)
thres = HintThreshold;
@@ -533,7 +533,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
// Handle the case when this function is called and we only want to care
// about always-inline functions. This is a bit of a hack to share code
// between here and the InlineAlways pass.
- if (AlwaysInlineOnly && !F->getFnAttributes().hasAlwaysInlineAttr())
+ if (AlwaysInlineOnly &&
+ !F->getFnAttributes().hasAttribute(Attributes::AlwaysInline))
continue;
// If the only remaining users of the function are dead constants, remove
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index f4802606d33..f92c4baeba7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1072,7 +1072,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
- if (ParamTy != ActTy && Attrs.hasByValAttr()) {
+ if (ParamTy != ActTy && Attrs.hasAttribute(Attributes::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
return false;
@@ -1264,7 +1264,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
for (unsigned I = 0, E = Attrs.getNumAttrs(); I != E; ++I)
- if (Attrs.getAttributesAtIndex(I).hasNestAttr())
+ if (Attrs.getAttributesAtIndex(I).hasAttribute(Attributes::Nest))
return 0;
assert(Tramp &&
@@ -1283,7 +1283,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// Look for a parameter marked with the 'nest' attribute.
for (FunctionType::param_iterator I = NestFTy->param_begin(),
E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
- if (NestAttrs.getParamAttributes(NestIdx).hasNestAttr()) {
+ if (NestAttrs.getParamAttributes(NestIdx).hasAttribute(Attributes::Nest)){
// Record the parameter type and any other attributes.
NestTy = *I;
NestAttr = NestAttrs.getParamAttributes(NestIdx);
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 7699a934070..10ab9cb6039 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -854,12 +854,14 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
// If needed, insert __asan_init before checking for AddressSafety attr.
maybeInsertAsanInitAtFunctionEntry(F);
- if (!F.getFnAttributes().hasAddressSafetyAttr()) return false;
+ if (!F.getFnAttributes().hasAttribute(Attributes::AddressSafety))
+ return false;
if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
return false;
- // We want to instrument every address only once per basic block
- // (unless there are calls between uses).
+
+ // We want to instrument every address only once per basic block (unless there
+ // are calls between uses).
SmallSet<Value*, 16> TempsToInstrument;
SmallVector<Instruction*, 16> ToInstrument;
SmallVector<Instruction*, 8> NoReturnCalls;
diff --git a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index ca06118a62b..2a52580eaad 100644
--- a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -149,7 +149,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLInfo = &getAnalysis<TargetLibraryInfo>();
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
- OptSize = F.getFnAttributes().hasOptimizeForSizeAttr();
+ OptSize = F.getFnAttributes().hasAttribute(Attributes::OptimizeForSize);
/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
@@ -715,7 +715,8 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
// See llvm::isInTailCallPosition().
const Function *F = BB->getParent();
Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if (CallerRetAttr.hasZExtAttr() || CallerRetAttr.hasSExtAttr())
+ if (CallerRetAttr.hasAttribute(Attributes::ZExt) ||
+ CallerRetAttr.hasAttribute(Attributes::SExt))
return false;
// Make sure there are no instructions between the PHI and return, or that the
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index f412c3891d1..0d781ac9772 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -145,7 +145,8 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// not user specified.
unsigned Threshold = CurrentThreshold;
if (!UserThreshold &&
- Header->getParent()->getFnAttributes().hasOptimizeForSizeAttr())
+ Header->getParent()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
Threshold = OptSizeUnrollThreshold;
// Find trip count and trip multiple if count is not available
diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 74c8f43ec20..15b168fe2aa 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -638,7 +638,8 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
// Check to see if it would be profitable to unswitch current loop.
// Do not do non-trivial unswitch while optimizing for size.
- if (OptimizeForSize || F->getFnAttributes().hasOptimizeForSizeAttr())
+ if (OptimizeForSize ||
+ F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize))
return false;
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
OpenPOWER on IntegriCloud