diff options
author | Peter Collingbourne <peter@pcc.me.uk> | 2016-12-02 02:24:42 +0000 |
---|---|---|
committer | Peter Collingbourne <peter@pcc.me.uk> | 2016-12-02 02:24:42 +0000 |
commit | ab85225be49b2abf5112823d84525e91d469d6bd (patch) | |
tree | 409fc728e31d6bd0afc63bdb4ba06b7c96f6a816 /llvm/lib/Transforms | |
parent | 6afcab35887245f36751790ba33debaf74b0d2ee (diff) | |
download | bcm5719-llvm-ab85225be49b2abf5112823d84525e91d469d6bd.tar.gz bcm5719-llvm-ab85225be49b2abf5112823d84525e91d469d6bd.zip |
IR: Change the gep_type_iterator API to avoid always exposing the "current" type.
Instead, expose whether the current type is an array or a struct, if an array
what the upper bound is, and if a struct the struct type itself. This is
in preparation for a later change which will make PointerType derive from
Type rather than SequentialType.
Differential Revision: https://reviews.llvm.org/D26594
llvm-svn: 288458
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r-- | llvm/lib/Transforms/IPO/GlobalOpt.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/NaryReassociate.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 2 |
9 files changed, 27 insertions, 32 deletions
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index f1686a0a615..1df9ee7a94f 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -371,14 +371,14 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { ++GEPI; // Skip over the pointer index. // If this is a use of an array allocation, do a bit more checking for sanity. - if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { - uint64_t NumElements = AT->getNumElements(); + if (GEPI.isSequential()) { ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); // Check to make sure that index falls within the array. If not, // something funny is going on, so we won't do the optimization. // - if (Idx->getZExtValue() >= NumElements) + if (GEPI.isBoundedSequential() && + Idx->getZExtValue() >= GEPI.getSequentialNumElements()) return false; // We cannot scalar repl this level of the array unless any array @@ -391,19 +391,13 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { for (++GEPI; // Skip array index. GEPI != E; ++GEPI) { - uint64_t NumElements; - if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) - NumElements = SubArrayTy->getNumElements(); - else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) - NumElements = SubVectorTy->getNumElements(); - else { - assert((*GEPI)->isStructTy() && - "Indexed GEP type is not array, vector, or struct!"); + if (GEPI.isStruct()) continue; - } ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); - if (!IdxVal || IdxVal->getZExtValue() >= NumElements) + if (!IdxVal || + (GEPI.isBoundedSequential() && + IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) return false; } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index bacb9ba5d59..c6100027abc 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -517,7 +517,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast<StructType>(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); @@ -547,7 +547,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast<StructType>(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index cdbc8eb4585..90eba686966 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1389,7 +1389,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; ++I, ++GTI) { // Skip indices into struct types. - if (isa<StructType>(*GTI)) + if (GTI.isStruct()) continue; // Index type should have the same width as IntPtr @@ -1546,7 +1546,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { bool EndsWithSequential = false; for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); I != E; ++I) - EndsWithSequential = !(*I)->isStructTy(); + EndsWithSequential = I.isSequential(); // Can we combine the two pointer arithmetics offsets? if (EndsWithSequential) { diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index c74d9111dc5..b19e663cc05 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -52,7 +52,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. - if (StructType *STy = dyn_cast<StructType>(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp index 81744c08cd3..0a3bf7b4c31 100644 --- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp @@ -281,9 +281,10 @@ Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { return nullptr; gep_type_iterator GTI = gep_type_begin(*GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (isa<SequentialType>(*GTI++)) { - if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) { + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isSequential()) { + if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, + GTI.getIndexedType())) { return NewGEP; } } diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 54ce1d8fb60..66de397676e 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -692,7 +692,7 @@ private: break; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast<StructType>(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); GEPOffset += diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index 62c9f46089a..4d594532c36 100644 --- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -722,7 +722,7 @@ bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize( for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E; ++I, ++GTI) { // Skip struct member indices which must be i32. - if (isa<SequentialType>(*GTI)) { + if (GTI.isSequential()) { if ((*I)->getType() != IntPtrTy) { *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP); Changed = true; @@ -739,7 +739,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP, int64_t AccumulativeByteOffset = 0; gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa<SequentialType>(*GTI)) { + if (GTI.isSequential()) { // Tries to extract a constant offset from this GEP index. int64_t ConstantOffset = ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT); @@ -752,7 +752,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP, ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); } } else if (LowerGEP) { - StructType *StTy = cast<StructType>(*GTI); + StructType *StTy = GTI.getStructType(); uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue(); // Skip field 0 as the offset is always 0. if (Field != 0) { @@ -787,7 +787,7 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs( // Create an ugly GEP for each sequential index. We don't create GEPs for // structure indices, as they are accumulated in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa<SequentialType>(*GTI)) { + if (GTI.isSequential()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) @@ -848,7 +848,7 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic, // don't create arithmetics for structure indices, as they are accumulated // in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa<SequentialType>(*GTI)) { + if (GTI.isSequential()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) @@ -928,7 +928,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) { // handle the constant offset and won't need a new structure index. gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa<SequentialType>(*GTI)) { + if (GTI.isSequential()) { // Splits this GEP index into a variadic part and a constant offset, and // uses the variadic part as the new index. Value *OldIdx = GEP->getOperand(I); diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp index c4d18f1c156..2be3f5c533b 100644 --- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -490,8 +490,8 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( IndexExprs.push_back(SE->getSCEV(*I)); gep_type_iterator GTI = gep_type_begin(GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (!isa<SequentialType>(*GTI++)) + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isStruct()) continue; const SCEV *OrigIndexExpr = IndexExprs[I - 1]; @@ -501,7 +501,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( // indices except this current one. const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs); Value *ArrayIdx = GEP->getOperand(I); - uint64_t ElementSize = DL->getTypeAllocSize(*GTI); + uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); if (ArrayIdx->getType()->getIntegerBitWidth() <= DL->getPointerSizeInBits(GEP->getAddressSpace())) { // Skip factoring if ArrayIdx is wider than the pointer size, because diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 323b81ca815..2f4e3e99b3f 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1416,7 +1416,7 @@ static bool canReplaceOperandWithVariable(const Instruction *I, if (OpIdx == 0) return true; gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1); - return !It->isStructTy(); + return It.isSequential(); } } |