diff options
author | Rafael Espindola <rafael.espindola@gmail.com> | 2014-02-21 00:06:31 +0000 |
---|---|---|
committer | Rafael Espindola <rafael.espindola@gmail.com> | 2014-02-21 00:06:31 +0000 |
commit | 37dc9e19f56be33b139f60478676541519af6103 (patch) | |
tree | b7ae65d84d2000f7bf06a0d16bfc0b27c892165d /llvm/lib/Transforms/Utils | |
parent | c95bd8d88f660d39ff7ffaed4a78b7f2622b5641 (diff) | |
download | bcm5719-llvm-37dc9e19f56be33b139f60478676541519af6103.tar.gz bcm5719-llvm-37dc9e19f56be33b139f60478676541519af6103.zip |
Rename many DataLayout variables from TD to DL.
I am really sorry for the noise, but the current state where some parts of the
code use TD (from the old name: TargetData) and other parts use DL makes it
hard to write a patch that changes where those variables come from and how
they are passed along.
llvm-svn: 201827
Diffstat (limited to 'llvm/lib/Transforms/Utils')
-rw-r--r-- | llvm/lib/Transforms/Utils/CloneFunction.cpp | 14 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 140 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyIndVar.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp | 170 |
4 files changed, 164 insertions, 164 deletions
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp index d105f5e24a2..32da80be65d 100644 --- a/llvm/lib/Transforms/Utils/CloneFunction.cpp +++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp @@ -205,17 +205,17 @@ namespace { bool ModuleLevelChanges; const char *NameSuffix; ClonedCodeInfo *CodeInfo; - const DataLayout *TD; + const DataLayout *DL; public: PruningFunctionCloner(Function *newFunc, const Function *oldFunc, ValueToValueMapTy &valueMap, bool moduleLevelChanges, const char *nameSuffix, ClonedCodeInfo *codeInfo, - const DataLayout *td) + const DataLayout *DL) : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), ModuleLevelChanges(moduleLevelChanges), - NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) { + NameSuffix(nameSuffix), CodeInfo(codeInfo), DL(DL) { } /// CloneBlock - The specified block is found to be reachable, clone it and @@ -272,7 +272,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB, // If we can simplify this instruction to some other value, simply add // a mapping to that value rather than inserting a new instruction into // the basic block. - if (Value *V = SimplifyInstruction(NewInst, TD)) { + if (Value *V = SimplifyInstruction(NewInst, DL)) { // On the off-chance that this simplifies to an instruction in the old // function, map it back into the new function. if (Value *MappedV = VMap.lookup(V)) @@ -368,7 +368,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, SmallVectorImpl<ReturnInst*> &Returns, const char *NameSuffix, ClonedCodeInfo *CodeInfo, - const DataLayout *TD, + const DataLayout *DL, Instruction *TheCall) { assert(NameSuffix && "NameSuffix cannot be null!"); @@ -379,7 +379,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, #endif PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, - NameSuffix, CodeInfo, TD); + NameSuffix, CodeInfo, DL); // Clone the entry block, and anything recursively reachable from it. std::vector<const BasicBlock*> CloneWorklist; @@ -509,7 +509,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, // node). for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx) if (PHINode *PN = dyn_cast<PHINode>(VMap[PHIToResolve[Idx]])) - recursivelySimplifyInstruction(PN, TD); + recursivelySimplifyInstruction(PN, DL); // Now that the inlined function body has been fully constructed, go through // and zap unconditional fall-through branches. This happen all the time when diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index effc08edaa3..e24acfffaef 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -90,7 +90,7 @@ namespace { class SimplifyCFGOpt { const TargetTransformInfo &TTI; - const DataLayout *const TD; + const DataLayout *const DL; Value *isValueEqualityComparison(TerminatorInst *TI); BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI, std::vector<ValueEqualityComparisonCase> &Cases); @@ -109,8 +109,8 @@ class SimplifyCFGOpt { bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder); public: - SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *TD) - : TTI(TTI), TD(TD) {} + SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *DL) + : TTI(TTI), DL(DL) {} bool run(BasicBlock *BB); }; } @@ -306,15 +306,15 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB, /// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr /// and PointerNullValue. Return NULL if value is not a constant int. -static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) { +static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) { // Normal constant int. ConstantInt *CI = dyn_cast<ConstantInt>(V); - if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy()) + if (CI || !DL || !isa<Constant>(V) || !V->getType()->isPointerTy()) return CI; // This is some kind of pointer constant. Turn it into a pointer-sized // ConstantInt if possible. - IntegerType *PtrTy = cast<IntegerType>(TD->getIntPtrType(V->getType())); + IntegerType *PtrTy = cast<IntegerType>(DL->getIntPtrType(V->getType())); // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*). if (isa<ConstantPointerNull>(V)) @@ -340,13 +340,13 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) { /// Values vector. static Value * GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra, - const DataLayout *TD, bool isEQ, unsigned &UsedICmps) { + const DataLayout *DL, bool isEQ, unsigned &UsedICmps) { Instruction *I = dyn_cast<Instruction>(V); if (I == 0) return 0; // If this is an icmp against a constant, handle this as one of the cases. if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) { - if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) { + if (ConstantInt *C = GetConstantInt(I->getOperand(1), DL)) { Value *RHSVal; ConstantInt *RHSC; @@ -405,11 +405,11 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra, unsigned NumValsBeforeLHS = Vals.size(); unsigned UsedICmpsBeforeLHS = UsedICmps; - if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD, + if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, DL, isEQ, UsedICmps)) { unsigned NumVals = Vals.size(); unsigned UsedICmpsBeforeRHS = UsedICmps; - if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD, + if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL, isEQ, UsedICmps)) { if (LHS == RHS) return LHS; @@ -434,7 +434,7 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra, if (Extra == 0 || Extra == I->getOperand(0)) { Value *OldExtra = Extra; Extra = I->getOperand(0); - if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD, + if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL, isEQ, UsedICmps)) return RHS; assert(Vals.size() == NumValsBeforeLHS); @@ -472,14 +472,14 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) { } else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) if (BI->isConditional() && BI->getCondition()->hasOneUse()) if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) - if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), TD)) + if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL)) CV = ICI->getOperand(0); // Unwrap any lossless ptrtoint cast. - if (TD && CV) { + if (DL && CV) { if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) { Value *Ptr = PTII->getPointerOperand(); - if (PTII->getType() == TD->getIntPtrType(Ptr->getType())) + if (PTII->getType() == DL->getIntPtrType(Ptr->getType())) CV = Ptr; } } @@ -504,7 +504,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI, ICmpInst *ICI = cast<ICmpInst>(BI->getCondition()); BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE); Cases.push_back(ValueEqualityComparisonCase(GetConstantInt(ICI->getOperand(1), - TD), + DL), Succ)); return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ); } @@ -930,8 +930,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, Builder.SetInsertPoint(PTI); // Convert pointer to int before we switch. if (CV->getType()->isPointerTy()) { - assert(TD && "Cannot switch on pointer without DataLayout"); - CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()), + assert(DL && "Cannot switch on pointer without DataLayout"); + CV = Builder.CreatePtrToInt(CV, DL->getIntPtrType(CV->getType()), "magicptr"); } @@ -1606,7 +1606,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { /// that is defined in the same block as the branch and if any PHI entries are /// constants, thread edges corresponding to that entry to be branches to their /// ultimate destination. -static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) { +static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) { BasicBlock *BB = BI->getParent(); PHINode *PN = dyn_cast<PHINode>(BI->getCondition()); // NOTE: we currently cannot transform this case if the PHI node is used @@ -1675,7 +1675,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) { } // Check for trivial simplification. - if (Value *V = SimplifyInstruction(N, TD)) { + if (Value *V = SimplifyInstruction(N, DL)) { TranslateMap[BBI] = V; delete N; // Instruction folded away, don't need actual inst } else { @@ -1696,7 +1696,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) { } // Recurse, simplifying any other constants. - return FoldCondBranchOnPHI(BI, TD) | true; + return FoldCondBranchOnPHI(BI, DL) | true; } return false; @@ -1704,7 +1704,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) { /// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry /// PHI node, see if we can eliminate it. -static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) { +static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) { // Ok, this is a two entry PHI node. Check to see if this is a simple "if // statement", which has a very simple dominance structure. Basically, we // are trying to find the condition that is being branched on, which @@ -1738,7 +1738,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) { for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) { PHINode *PN = cast<PHINode>(II++); - if (Value *V = SimplifyInstruction(PN, TD)) { + if (Value *V = SimplifyInstruction(PN, DL)) { PN->replaceAllUsesWith(V); PN->eraseFromParent(); continue; @@ -2634,7 +2634,7 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) { /// the PHI, merging the third icmp into the switch. static bool TryToSimplifyUncondBranchWithICmpInIt( ICmpInst *ICI, IRBuilder<> &Builder, const TargetTransformInfo &TTI, - const DataLayout *TD) { + const DataLayout *DL) { BasicBlock *BB = ICI->getParent(); // If the block has any PHIs in it or the icmp has multiple uses, it is too @@ -2662,12 +2662,12 @@ static bool TryToSimplifyUncondBranchWithICmpInIt( assert(VVal && "Should have a unique destination value"); ICI->setOperand(0, VVal); - if (Value *V = SimplifyInstruction(ICI, TD)) { + if (Value *V = SimplifyInstruction(ICI, DL)) { ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); } // BB is now empty, so it is likely to simplify away. - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } // Ok, the block is reachable from the default dest. If the constant we're @@ -2683,7 +2683,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt( ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); // BB is now empty, so it is likely to simplify away. - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } // The use of the icmp has to be in the 'end' block, by the only PHI node in @@ -2739,7 +2739,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt( /// SimplifyBranchOnICmpChain - The specified branch is a conditional branch. /// Check to see if it is branching on an or/and chain of icmp instructions, and /// fold it into a switch instruction if so. -static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD, +static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL, IRBuilder<> &Builder) { Instruction *Cond = dyn_cast<Instruction>(BI->getCondition()); if (Cond == 0) return false; @@ -2755,10 +2755,10 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD, unsigned UsedICmps = 0; if (Cond->getOpcode() == Instruction::Or) { - CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true, + CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, true, UsedICmps); } else if (Cond->getOpcode() == Instruction::And) { - CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, false, + CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, false, UsedICmps); TrueWhenEqual = false; } @@ -2820,9 +2820,9 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD, Builder.SetInsertPoint(BI); // Convert pointer to int before we switch. if (CompVal->getType()->isPointerTy()) { - assert(TD && "Cannot switch on pointer without DataLayout"); + assert(DL && "Cannot switch on pointer without DataLayout"); CompVal = Builder.CreatePtrToInt(CompVal, - TD->getIntPtrType(CompVal->getType()), + DL->getIntPtrType(CompVal->getType()), "magicptr"); } @@ -3453,7 +3453,7 @@ namespace { ConstantInt *Offset, const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values, Constant *DefaultValue, - const DataLayout *TD); + const DataLayout *DL); /// BuildLookup - Build instructions with Builder to retrieve the value at /// the position given by Index in the lookup table. @@ -3461,7 +3461,7 @@ namespace { /// WouldFitInRegister - Return true if a table with TableSize elements of /// type ElementType would fit in a target-legal register. - static bool WouldFitInRegister(const DataLayout *TD, + static bool WouldFitInRegister(const DataLayout *DL, uint64_t TableSize, const Type *ElementType); @@ -3500,7 +3500,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M, ConstantInt *Offset, const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values, Constant *DefaultValue, - const DataLayout *TD) + const DataLayout *DL) : SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) { assert(Values.size() && "Can't build lookup table without values!"); assert(TableSize >= Values.size() && "Can't fit values in table!"); @@ -3546,7 +3546,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M, } // If the type is integer and the table fits in a register, build a bitmap. - if (WouldFitInRegister(TD, TableSize, ValueType)) { + if (WouldFitInRegister(DL, TableSize, ValueType)) { IntegerType *IT = cast<IntegerType>(ValueType); APInt TableInt(TableSize * IT->getBitWidth(), 0); for (uint64_t I = TableSize; I > 0; --I) { @@ -3611,10 +3611,10 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) { llvm_unreachable("Unknown lookup table kind!"); } -bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD, +bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL, uint64_t TableSize, const Type *ElementType) { - if (!TD) + if (!DL) return false; const IntegerType *IT = dyn_cast<IntegerType>(ElementType); if (!IT) @@ -3625,7 +3625,7 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD, // Avoid overflow, fitsInLegalInteger uses unsigned int for the width. if (TableSize >= UINT_MAX/IT->getBitWidth()) return false; - return TD->fitsInLegalInteger(TableSize * IT->getBitWidth()); + return DL->fitsInLegalInteger(TableSize * IT->getBitWidth()); } /// ShouldBuildLookupTable - Determine whether a lookup table should be built @@ -3634,7 +3634,7 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD, static bool ShouldBuildLookupTable(SwitchInst *SI, uint64_t TableSize, const TargetTransformInfo &TTI, - const DataLayout *TD, + const DataLayout *DL, const SmallDenseMap<PHINode*, Type*>& ResultTypes) { if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10) return false; // TableSize overflowed, or mul below might overflow. @@ -3650,7 +3650,7 @@ static bool ShouldBuildLookupTable(SwitchInst *SI, // Saturate this flag to false. AllTablesFitInRegister = AllTablesFitInRegister && - SwitchLookupTable::WouldFitInRegister(TD, TableSize, Ty); + SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty); // If both flags saturate, we're done. NOTE: This *only* works with // saturating flags, and all flags have to saturate first due to the @@ -3679,7 +3679,7 @@ static bool ShouldBuildLookupTable(SwitchInst *SI, static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder, const TargetTransformInfo &TTI, - const DataLayout* TD) { + const DataLayout* DL) { assert(SI->getNumCases() > 1 && "Degenerate switch?"); // Only build lookup table when we have a target that supports it. @@ -3723,7 +3723,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, typedef SmallVector<std::pair<PHINode*, Constant*>, 4> ResultsTy; ResultsTy Results; if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest, - Results, TD)) + Results, DL)) return false; // Append the result from this case to the list for each phi. @@ -3748,7 +3748,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, // If the table has holes, we need a constant result for the default case. SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList; if (TableHasHoles && !GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest, - DefaultResultsList, TD)) + DefaultResultsList, DL)) return false; for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) { @@ -3757,7 +3757,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, DefaultResults[PHI] = Result; } - if (!ShouldBuildLookupTable(SI, TableSize, TTI, TD, ResultTypes)) + if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes)) return false; // Create the BB that does the lookups. @@ -3801,7 +3801,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, PHINode *PHI = PHIs[I]; SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI], - DefaultResults[PHI], TD); + DefaultResults[PHI], DL); Value *Result = Table.BuildLookup(TableIndex, Builder); @@ -3842,12 +3842,12 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { // see if that predecessor totally determines the outcome of this switch. if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; Value *Cond = SI->getCondition(); if (SelectInst *Select = dyn_cast<SelectInst>(Cond)) if (SimplifySwitchOnSelect(SI, Select)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; // If the block only contains the switch, see if we can fold the block // away into any preds. @@ -3857,22 +3857,22 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { ++BBI; if (SI == &*BBI) if (FoldValueComparisonIntoPredecessors(SI, Builder)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } // Try to transform the switch into an icmp and a branch. if (TurnSwitchRangeIntoICmp(SI, Builder)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; // Remove unreachable cases. if (EliminateDeadSwitchCases(SI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; if (ForwardSwitchConditionToPHI(SI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; - if (SwitchToLookupTable(SI, Builder, TTI, TD)) - return SimplifyCFG(BB, TTI, TD) | true; + if (SwitchToLookupTable(SI, Builder, TTI, DL)) + return SimplifyCFG(BB, TTI, DL) | true; return false; } @@ -3909,7 +3909,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) { if (SimplifyIndirectBrOnSelect(IBI, SI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } return Changed; } @@ -3933,7 +3933,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ for (++I; isa<DbgInfoIntrinsic>(I); ++I) ; if (I->isTerminator() && - TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, TD)) + TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, DL)) return true; } @@ -3942,7 +3942,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ // predecessor and use logical operations to update the incoming value // for PHI nodes in common successor. if (FoldBranchToCommonDest(BI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; return false; } @@ -3957,7 +3957,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { // switch. if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; // This block must be empty, except for the setcond inst, if it exists. // Ignore dbg intrinsics. @@ -3967,26 +3967,26 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { ++I; if (&*I == BI) { if (FoldValueComparisonIntoPredecessors(BI, Builder)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } else if (&*I == cast<Instruction>(BI->getCondition())){ ++I; // Ignore dbg intrinsics. while (isa<DbgInfoIntrinsic>(I)) ++I; if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } } // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction. - if (SimplifyBranchOnICmpChain(BI, TD, Builder)) + if (SimplifyBranchOnICmpChain(BI, DL, Builder)) return true; // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and one of our successors, fold the comparison into the // predecessor and use logical operations to pick the right destination. if (FoldBranchToCommonDest(BI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; // We have a conditional branch to two blocks that are only reachable // from BI. We know that the condbr dominates the two blocks, so see if @@ -3995,7 +3995,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { if (BI->getSuccessor(0)->getSinglePredecessor() != 0) { if (BI->getSuccessor(1)->getSinglePredecessor() != 0) { if (HoistThenElseCodeToIf(BI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } else { // If Successor #1 has multiple preds, we may be able to conditionally // execute Successor #0 if it branches to successor #1. @@ -4003,7 +4003,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { if (Succ0TI->getNumSuccessors() == 1 && Succ0TI->getSuccessor(0) == BI->getSuccessor(1)) if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0))) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } } else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) { // If Successor #0 has multiple preds, we may be able to conditionally @@ -4012,22 +4012,22 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { if (Succ1TI->getNumSuccessors() == 1 && Succ1TI->getSuccessor(0) == BI->getSuccessor(0)) if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1))) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; } // If this is a branch on a phi node in the current block, thread control // through this block if any PHI node entries are constants. if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition())) if (PN->getParent() == BI->getParent()) - if (FoldCondBranchOnPHI(BI, TD)) - return SimplifyCFG(BB, TTI, TD) | true; + if (FoldCondBranchOnPHI(BI, DL)) + return SimplifyCFG(BB, TTI, DL) | true; // Scan predecessor blocks for conditional branches. for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator())) if (PBI != BI && PBI->isConditional()) if (SimplifyCondBranchToCondBranch(PBI, BI)) - return SimplifyCFG(BB, TTI, TD) | true; + return SimplifyCFG(BB, TTI, DL) | true; return false; } @@ -4139,7 +4139,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { // eliminate it, do so now. if (PHINode *PN = dyn_cast<PHINode>(BB->begin())) if (PN->getNumIncomingValues() == 2) - Changed |= FoldTwoEntryPHINode(PN, TD); + Changed |= FoldTwoEntryPHINode(PN, DL); Builder.SetInsertPoint(BB->getTerminator()); if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { @@ -4171,6 +4171,6 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { /// of the CFG. It returns true if a modification was made. /// bool llvm::SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, - const DataLayout *TD) { - return SimplifyCFGOpt(TTI, TD).run(BB); + const DataLayout *DL) { + return SimplifyCFGOpt(TTI, DL).run(BB); } diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp index 0b21d52b94b..a75bb94ff27 100644 --- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -48,7 +48,7 @@ namespace { Loop *L; LoopInfo *LI; ScalarEvolution *SE; - const DataLayout *TD; // May be NULL + const DataLayout *DL; // May be NULL SmallVectorImpl<WeakVH> &DeadInsts; @@ -60,7 +60,7 @@ namespace { L(Loop), LI(LPM->getAnalysisIfAvailable<LoopInfo>()), SE(SE), - TD(LPM->getAnalysisIfAvailable<DataLayout>()), + DL(LPM->getAnalysisIfAvailable<DataLayout>()), DeadInsts(Dead), Changed(false) { assert(LI && "IV simplification requires LoopInfo"); diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index 2ffd03668c8..126160dbbc3 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -43,7 +43,7 @@ namespace { class LibCallOptimization { protected: Function *Caller; - const DataLayout *TD; + const DataLayout *DL; const TargetLibraryInfo *TLI; const LibCallSimplifier *LCS; LLVMContext* Context; @@ -63,11 +63,11 @@ public: /// change the calling convention. virtual bool ignoreCallingConv() { return false; } - Value *optimizeCall(CallInst *CI, const DataLayout *TD, + Value *optimizeCall(CallInst *CI, const DataLayout *DL, const TargetLibraryInfo *TLI, const LibCallSimplifier *LCS, IRBuilder<> &B) { Caller = CI->getParent()->getParent(); - this->TD = TD; + this->DL = DL; this->TLI = TLI; this->LCS = LCS; if (CI->getCalledFunction()) @@ -184,8 +184,8 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization { if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isPointerTy() || !FT->getParamType(1)->isPointerTy() || - FT->getParamType(2) != TD->getIntPtrType(Context) || - FT->getParamType(3) != TD->getIntPtrType(Context)) + FT->getParamType(2) != DL->getIntPtrType(Context) || + FT->getParamType(3) != DL->getIntPtrType(Context)) return 0; if (isFoldable(3, 2, false)) { @@ -207,8 +207,8 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization { if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isPointerTy() || !FT->getParamType(1)->isPointerTy() || - FT->getParamType(2) != TD->getIntPtrType(Context) || - FT->getParamType(3) != TD->getIntPtrType(Context)) + FT->getParamType(2) != DL->getIntPtrType(Context) || + FT->getParamType(3) != DL->getIntPtrType(Context)) return 0; if (isFoldable(3, 2, false)) { @@ -230,8 +230,8 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization { if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isPointerTy() || !FT->getParamType(1)->isIntegerTy() || - FT->getParamType(2) != TD->getIntPtrType(Context) || - FT->getParamType(3) != TD->getIntPtrType(Context)) + FT->getParamType(2) != DL->getIntPtrType(Context) || + FT->getParamType(3) != DL->getIntPtrType(Context)) return 0; if (isFoldable(3, 2, false)) { @@ -256,7 +256,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization { FT->getReturnType() != FT->getParamType(0) || FT->getParamType(0) != FT->getParamType(1) || FT->getParamType(0) != Type::getInt8PtrTy(Context) || - FT->getParamType(2) != TD->getIntPtrType(Context)) + FT->getParamType(2) != DL->getIntPtrType(Context)) return 0; Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); @@ -269,7 +269,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization { // TODO: It might be nice to get a maximum length out of the possible // string lengths for varying. if (isFoldable(2, 1, true)) { - Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6)); + Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6)); return Ret; } else { // Maybe we can stil fold __strcpy_chk to __memcpy_chk. @@ -277,12 +277,12 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization { if (Len == 0) return 0; // This optimization require DataLayout. - if (!TD) return 0; + if (!DL) return 0; Value *Ret = EmitMemCpyChk(Dst, Src, - ConstantInt::get(TD->getIntPtrType(Context), Len), - CI->getArgOperand(2), B, TD, TLI); + ConstantInt::get(DL->getIntPtrType(Context), Len), + CI->getArgOperand(2), B, DL, TLI); return Ret; } return 0; @@ -301,12 +301,12 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization { FT->getReturnType() != FT->getParamType(0) || FT->getParamType(0) != FT->getParamType(1) || FT->getParamType(0) != Type::getInt8PtrTy(Context) || - FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0))) + FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0))) return 0; Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x) - Value *StrLen = EmitStrLen(Src, B, TD, TLI); + Value *StrLen = EmitStrLen(Src, B, DL, TLI); return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0; } @@ -316,7 +316,7 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization { // TODO: It might be nice to get a maximum length out of the possible // string lengths for varying. if (isFoldable(2, 1, true)) { - Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6)); + Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6)); return Ret; } else { // Maybe we can stil fold __stpcpy_chk to __memcpy_chk. @@ -324,14 +324,14 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization { if (Len == 0) return 0; // This optimization require DataLayout. - if (!TD) return 0; + if (!DL) return 0; Type *PT = FT->getParamType(0); - Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len); + Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len); Value *DstEnd = B.CreateGEP(Dst, - ConstantInt::get(TD->getIntPtrType(PT), + ConstantInt::get(DL->getIntPtrType(PT), Len - 1)); - if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD, TLI)) + if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, DL, TLI)) return 0; return DstEnd; } @@ -351,12 +351,12 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization { FT->getParamType(0) != FT->getParamType(1) || FT->getParamType(0) != Type::getInt8PtrTy(Context) || !FT->getParamType(2)->isIntegerTy() || - FT->getParamType(3) != TD->getIntPtrType(Context)) + FT->getParamType(3) != DL->getIntPtrType(Context)) return 0; if (isFoldable(3, 2, false)) { Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1), - CI->getArgOperand(2), B, TD, TLI, + CI->getArgOperand(2), B, DL, TLI, Name.substr(2, 7)); return Ret; } @@ -392,7 +392,7 @@ struct StrCatOpt : public LibCallOptimization { return Dst; // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; return emitStrLenMemCpy(Src, Dst, Len, B); } @@ -401,7 +401,7 @@ struct StrCatOpt : public LibCallOptimization { IRBuilder<> &B) { // We need to find the end of the destination string. That's where the // memory is to be moved to. We just generate a call to strlen. - Value *DstLen = EmitStrLen(Dst, B, TD, TLI); + Value *DstLen = EmitStrLen(Dst, B, DL, TLI); if (!DstLen) return 0; @@ -413,7 +413,7 @@ struct StrCatOpt : public LibCallOptimization { // We have enough information to now generate the memcpy call to do the // concatenation for us. Make a memcpy to copy the nul byte with align = 1. B.CreateMemCpy(CpyDst, Src, - ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1); + ConstantInt::get(DL->getIntPtrType(*Context), Len + 1), 1); return Dst; } }; @@ -451,7 +451,7 @@ struct StrNCatOpt : public StrCatOpt { if (SrcLen == 0 || Len == 0) return Dst; // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; // We don't optimize this case if (Len < SrcLen) return 0; @@ -479,23 +479,23 @@ struct StrChrOpt : public LibCallOptimization { ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1)); if (CharC == 0) { // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; uint64_t Len = GetStringLength(SrcStr); if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32. return 0; return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul. - ConstantInt::get(TD->getIntPtrType(*Context), Len), - B, TD, TLI); + ConstantInt::get(DL->getIntPtrType(*Context), Len), + B, DL, TLI); } // Otherwise, the character is a constant, see if the first argument is // a string literal. If so, we can constant fold. StringRef Str; if (!getConstantStringInfo(SrcStr, Str)) { - if (TD && CharC->isZero()) // strchr(p, 0) -> p + strlen(p) - return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, TD, TLI), "strchr"); + if (DL && CharC->isZero()) // strchr(p, 0) -> p + strlen(p) + return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, DL, TLI), "strchr"); return 0; } @@ -531,8 +531,8 @@ struct StrRChrOpt : public LibCallOptimization { StringRef Str; if (!getConstantStringInfo(SrcStr, Str)) { // strrchr(s, 0) -> strchr(s, 0) - if (TD && CharC->isZero()) - return EmitStrChr(SrcStr, '\0', B, TD, TLI); + if (DL && CharC->isZero()) + return EmitStrChr(SrcStr, '\0', B, DL, TLI); return 0; } @@ -581,11 +581,11 @@ struct StrCmpOpt : public LibCallOptimization { uint64_t Len2 = GetStringLength(Str2P); if (Len1 && Len2) { // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; return EmitMemCmp(Str1P, Str2P, - ConstantInt::get(TD->getIntPtrType(*Context), - std::min(Len1, Len2)), B, TD, TLI); + ConstantInt::get(DL->getIntPtrType(*Context), + std::min(Len1, Len2)), B, DL, TLI); } return 0; @@ -617,8 +617,8 @@ struct StrNCmpOpt : public LibCallOptimization { if (Length == 0) // strncmp(x,y,0) -> 0 return ConstantInt::get(CI->getType(), 0); - if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1) - return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI); + if (DL && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1) + return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI); StringRef Str1, Str2; bool HasStr1 = getConstantStringInfo(Str1P, Str1); @@ -657,7 +657,7 @@ struct StrCpyOpt : public LibCallOptimization { return Src; // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; // See if we can get the length of the input string. uint64_t Len = GetStringLength(Src); @@ -666,7 +666,7 @@ struct StrCpyOpt : public LibCallOptimization { // We have enough information to now generate the memcpy call to do the // copy for us. Make a memcpy to copy the nul byte with align = 1. B.CreateMemCpy(Dst, Src, - ConstantInt::get(TD->getIntPtrType(*Context), Len), 1); + ConstantInt::get(DL->getIntPtrType(*Context), Len), 1); return Dst; } }; @@ -682,11 +682,11 @@ struct StpCpyOpt: public LibCallOptimization { return 0; // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x) - Value *StrLen = EmitStrLen(Src, B, TD, TLI); + Value *StrLen = EmitStrLen(Src, B, DL, TLI); return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0; } @@ -695,9 +695,9 @@ struct StpCpyOpt: public LibCallOptimization { if (Len == 0) return 0; Type *PT = FT->getParamType(0); - Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len); + Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len); Value *DstEnd = B.CreateGEP(Dst, - ConstantInt::get(TD->getIntPtrType(PT), + ConstantInt::get(DL->getIntPtrType(PT), Len - 1)); // We have enough information to now generate the memcpy call to do the @@ -740,7 +740,7 @@ struct StrNCpyOpt : public LibCallOptimization { if (Len == 0) return Dst; // strncpy(x, y, 0) -> x // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; // Let strncpy handle the zero padding if (Len > SrcLen+1) return 0; @@ -748,7 +748,7 @@ struct StrNCpyOpt : public LibCallOptimization { Type *PT = FT->getParamType(0); // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant] B.CreateMemCpy(Dst, Src, - ConstantInt::get(TD->getIntPtrType(PT), Len), 1); + ConstantInt::get(DL->getIntPtrType(PT), Len), 1); return Dst; } @@ -805,8 +805,8 @@ struct StrPBrkOpt : public LibCallOptimization { } // strpbrk(s, "a") -> strchr(s, 'a') - if (TD && HasS2 && S2.size() == 1) - return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI); + if (DL && HasS2 && S2.size() == 1) + return EmitStrChr(CI->getArgOperand(0), S2[0], B, DL, TLI); return 0; } @@ -885,8 +885,8 @@ struct StrCSpnOpt : public LibCallOptimization { } // strcspn(s, "") -> strlen(s) - if (TD && HasS2 && S2.empty()) - return EmitStrLen(CI->getArgOperand(0), B, TD, TLI); + if (DL && HasS2 && S2.empty()) + return EmitStrLen(CI->getArgOperand(0), B, DL, TLI); return 0; } @@ -906,12 +906,12 @@ struct StrStrOpt : public LibCallOptimization { return B.CreateBitCast(CI->getArgOperand(0), CI->getType()); // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0 - if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) { - Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI); + if (DL && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) { + Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, DL, TLI); if (!StrLen) return 0; Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1), - StrLen, B, TD, TLI); + StrLen, B, DL, TLI); if (!StrNCmp) return 0; for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end(); @@ -949,7 +949,7 @@ struct StrStrOpt : public LibCallOptimization { // fold strstr(x, "y") -> strchr(x, 'y'). if (HasStr2 && ToFindStr.size() == 1) { - Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI); + Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, DL, TLI); return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0; } return 0; @@ -1011,13 +1011,13 @@ struct MemCmpOpt : public LibCallOptimization { struct MemCpyOpt : public LibCallOptimization { virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; FunctionType *FT = Callee->getFunctionType(); if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isPointerTy() || !FT->getParamType(1)->isPointerTy() || - FT->getParamType(2) != TD->getIntPtrType(*Context)) + FT->getParamType(2) != DL->getIntPtrType(*Context)) return 0; // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1) @@ -1030,13 +1030,13 @@ struct MemCpyOpt : public LibCallOptimization { struct MemMoveOpt : public LibCallOptimization { virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; FunctionType *FT = Callee->getFunctionType(); if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isPointerTy() || !FT->getParamType(1)->isPointerTy() || - FT->getParamType(2) != TD->getIntPtrType(*Context)) + FT->getParamType(2) != DL->getIntPtrType(*Context)) return 0; // memmove(x, y, n) -> llvm.memmove(x, y, n, 1) @@ -1049,13 +1049,13 @@ struct MemMoveOpt : public LibCallOptimization { struct MemSetOpt : public LibCallOptimization { virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; FunctionType *FT = Callee->getFunctionType(); if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isPointerTy() || !FT->getParamType(1)->isIntegerTy() || - FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0))) + FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0))) return 0; // memset(p, v, n) -> llvm.memset(p, v, n, 1) @@ -1632,7 +1632,7 @@ struct PrintFOpt : public LibCallOptimization { // printf("x") -> putchar('x'), even for '%'. if (FormatStr.size() == 1) { - Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD, TLI); + Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, DL, TLI); if (CI->use_empty() || !Res) return Res; return B.CreateIntCast(Res, CI->getType(), true); } @@ -1644,7 +1644,7 @@ struct PrintFOpt : public LibCallOptimization { // pass to be run after this pass, to merge duplicate strings. FormatStr = FormatStr.drop_back(); Value *GV = B.CreateGlobalString(FormatStr, "str"); - Value *NewCI = EmitPutS(GV, B, TD, TLI); + Value *NewCI = EmitPutS(GV, B, DL, TLI); return (CI->use_empty() || !NewCI) ? NewCI : ConstantInt::get(CI->getType(), FormatStr.size()+1); @@ -1654,7 +1654,7 @@ struct PrintFOpt : public LibCallOptimization { // printf("%c", chr) --> putchar(chr) if (FormatStr == "%c" && CI->getNumArgOperands() > 1 && CI->getArgOperand(1)->getType()->isIntegerTy()) { - Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD, TLI); + Value *Res = EmitPutChar(CI->getArgOperand(1), B, DL, TLI); if (CI->use_empty() || !Res) return Res; return B.CreateIntCast(Res, CI->getType(), true); @@ -1663,7 +1663,7 @@ struct PrintFOpt : public LibCallOptimization { // printf("%s\n", str) --> puts(str) if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 && CI->getArgOperand(1)->getType()->isPointerTy()) { - return EmitPutS(CI->getArgOperand(1), B, TD, TLI); + return EmitPutS(CI->getArgOperand(1), B, DL, TLI); } return 0; } @@ -1712,11 +1712,11 @@ struct SPrintFOpt : public LibCallOptimization { return 0; // we found a format specifier, bail out. // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; // sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1) B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), - ConstantInt::get(TD->getIntPtrType(*Context), // Copy the + ConstantInt::get(DL->getIntPtrType(*Context), // Copy the FormatStr.size() + 1), 1); // nul byte. return ConstantInt::get(CI->getType(), FormatStr.size()); } @@ -1742,12 +1742,12 @@ struct SPrintFOpt : public LibCallOptimization { if (FormatStr[1] == 's') { // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; // sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1) if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0; - Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD, TLI); + Value *Len = EmitStrLen(CI->getArgOperand(2), B, DL, TLI); if (!Len) return 0; Value *IncLen = B.CreateAdd(Len, @@ -1812,12 +1812,12 @@ struct FPrintFOpt : public LibCallOptimization { return 0; // We found a format specifier. // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; return EmitFWrite(CI->getArgOperand(1), - ConstantInt::get(TD->getIntPtrType(*Context), + ConstantInt::get(DL->getIntPtrType(*Context), FormatStr.size()), - CI->getArgOperand(0), B, TD, TLI); + CI->getArgOperand(0), B, DL, TLI); } // The remaining optimizations require the format string to be "%s" or "%c" @@ -1830,14 +1830,14 @@ struct FPrintFOpt : public LibCallOptimization { if (FormatStr[1] == 'c') { // fprintf(F, "%c", chr) --> fputc(chr, F) if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0; - return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI); + return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI); } if (FormatStr[1] == 's') { // fprintf(F, "%s", str) --> fputs(str, F) if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0; - return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI); + return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI); } return 0; } @@ -1897,7 +1897,7 @@ struct FWriteOpt : public LibCallOptimization { // This optimisation is only valid, if the return value is unused. if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F) Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char"); - Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, TD, TLI); + Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, DL, TLI); return NewCI ? ConstantInt::get(CI->getType(), 1) : 0; } @@ -1911,7 +1911,7 @@ struct FPutsOpt : public LibCallOptimization { (void) ER.callOptimizer(Callee, CI, B); // These optimizations require DataLayout. - if (!TD) return 0; + if (!DL) return 0; // Require two pointers. Also, we can't optimize if return value is used. FunctionType *FT = Callee->getFunctionType(); @@ -1925,8 +1925,8 @@ struct FPutsOpt : public LibCallOptimization { if (!Len) return 0; // Known to have no uses (see above). return EmitFWrite(CI->getArgOperand(0), - ConstantInt::get(TD->getIntPtrType(*Context), Len-1), - CI->getArgOperand(1), B, TD, TLI); + ConstantInt::get(DL->getIntPtrType(*Context), Len-1), + CI->getArgOperand(1), B, DL, TLI); } }; @@ -1946,7 +1946,7 @@ struct PutsOpt : public LibCallOptimization { if (Str.empty() && CI->use_empty()) { // puts("") -> putchar('\n') - Value *Res = EmitPutChar(B.getInt32('\n'), B, TD, TLI); + Value *Res = EmitPutChar(B.getInt32('\n'), B, DL, TLI); if (CI->use_empty() || !Res) return Res; return B.CreateIntCast(Res, CI->getType(), true); } @@ -1960,7 +1960,7 @@ struct PutsOpt : public LibCallOptimization { namespace llvm { class LibCallSimplifierImpl { - const DataLayout *TD; + const DataLayout *DL; const TargetLibraryInfo *TLI; const LibCallSimplifier *LCS; bool UnsafeFPShrink; @@ -1970,11 +1970,11 @@ class LibCallSimplifierImpl { PowOpt Pow; Exp2Opt Exp2; public: - LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo *TLI, + LibCallSimplifierImpl(const DataLayout *DL, const TargetLibraryInfo *TLI, const LibCallSimplifier *LCS, bool UnsafeFPShrink = false) : Cos(UnsafeFPShrink), Pow(UnsafeFPShrink), Exp2(UnsafeFPShrink) { - this->TD = TD; + this->DL = DL; this->TLI = TLI; this->LCS = LCS; this->UnsafeFPShrink = UnsafeFPShrink; @@ -2233,15 +2233,15 @@ Value *LibCallSimplifierImpl::optimizeCall(CallInst *CI) { LibCallOptimization *LCO = lookupOptimization(CI); if (LCO) { IRBuilder<> Builder(CI); - return LCO->optimizeCall(CI, TD, TLI, LCS, Builder); + return LCO->optimizeCall(CI, DL, TLI, LCS, Builder); } return 0; } -LibCallSimplifier::LibCallSimplifier(const DataLayout *TD, +LibCallSimplifier::LibCallSimplifier(const DataLayout *DL, const TargetLibraryInfo *TLI, bool UnsafeFPShrink) { - Impl = new LibCallSimplifierImpl(TD, TLI, this, UnsafeFPShrink); + Impl = new LibCallSimplifierImpl(DL, TLI, this, UnsafeFPShrink); } LibCallSimplifier::~LibCallSimplifier() { |