summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp8
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp4
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp4
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FastISel.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp2
-rw-r--r--llvm/lib/ExecutionEngine/Interpreter/Execution.cpp5
-rw-r--r--llvm/lib/IR/ConstantFold.cpp33
-rw-r--r--llvm/lib/IR/Constants.cpp13
-rw-r--r--llvm/lib/IR/DataLayout.cpp9
-rw-r--r--llvm/lib/IR/Operator.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMFastISel.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsFastISel.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCFastISel.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp20
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/NaryReassociate.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp12
-rw-r--r--llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
28 files changed, 70 insertions, 93 deletions
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 942f03620b9..761c6d92f7d 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -412,10 +412,10 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
// Assume all GEP operands are constants until proven otherwise.
bool GepHasConstantOffset = true;
for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
- I != E; ++I) {
+ I != E; ++I, ++GTI) {
const Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
- if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0)
@@ -431,13 +431,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
if (CIdx->isZero())
continue;
Decomposed.OtherOffset +=
- DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
+ DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
continue;
}
GepHasConstantOffset = false;
- uint64_t Scale = DL.getTypeAllocSize(*GTI);
+ uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
unsigned ZExtBits = 0, SExtBits = 0;
// If the integer type is smaller than the pointer size, it is implicitly
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 6c3525fad4e..02a27536276 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -318,7 +318,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 234d42dfa91..e7220f875e5 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1231,7 +1231,7 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
Value *Index = I->getOperand(i);
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
// Handle struct member offset arithmetic.
// Handle case when index is vector zeroinitializer
@@ -1730,7 +1730,7 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
GTI != GTE; ++GTI) {
// Struct types are easy -- they must always be indexed by a constant.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = Q.DL.getStructLayout(STy);
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 5a51fd8a524..7e598f435ff 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -107,11 +107,11 @@ unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
// Find the type we're currently indexing into.
gep_type_iterator GEPTI = gep_type_begin(Gep);
- std::advance(GEPTI, LastOperand - 1);
+ std::advance(GEPTI, LastOperand - 2);
// If it's a type with the same allocation size as the result of the GEP we
// can peel off the zero index.
- if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize)
+ if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
break;
--LastOperand;
}
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 60ac557c49e..0ae57ce0c36 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -3261,7 +3261,7 @@ bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
int64_t ConstantOffset = 0;
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx =
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 7bc0c9d0183..e2f33bb433b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -488,7 +488,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
- if (auto *StTy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *StTy = GTI.getStructTypeOrNull()) {
uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 01c7c149f29..09f2bbcba6f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3274,7 +3274,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
- if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 1eb4f7d1934..923f6e7147d 100644
--- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -999,7 +999,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
uint64_t Total = 0;
for (; I != E; ++I) {
- if (StructType *STy = dyn_cast<StructType>(*I)) {
+ if (StructType *STy = I.getStructTypeOrNull()) {
const StructLayout *SLO = getDataLayout().getStructLayout(STy);
const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
@@ -1007,7 +1007,6 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
Total += SLO->getElementOffset(Index);
} else {
- SequentialType *ST = cast<SequentialType>(*I);
// Get the index number for the array... which must be long type...
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
@@ -1020,7 +1019,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
assert(BitWidth == 64 && "Invalid index type for getelementptr");
Idx = (int64_t)IdxGV.IntVal.getZExtValue();
}
- Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx;
+ Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
}
}
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index 37bab3a0d28..60f6fbb6f65 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -2019,22 +2019,8 @@ static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
}
/// Test whether a given ConstantInt is in-range for a SequentialType.
-static bool isIndexInRangeOfSequentialType(SequentialType *STy,
- const ConstantInt *CI) {
- // And indices are valid when indexing along a pointer
- if (isa<PointerType>(STy))
- return true;
-
- uint64_t NumElements = 0;
- // Determine the number of elements in our sequential type.
- if (auto *ATy = dyn_cast<ArrayType>(STy))
- NumElements = ATy->getNumElements();
- else if (auto *VTy = dyn_cast<VectorType>(STy))
- NumElements = VTy->getNumElements();
-
- assert((isa<ArrayType>(STy) || NumElements > 0) &&
- "didn't expect non-array type to have zero elements!");
-
+static bool isIndexInRangeOfArrayType(uint64_t NumElements,
+ const ConstantInt *CI) {
// We cannot bounds check the index if it doesn't fit in an int64_t.
if (CI->getValue().getActiveBits() > 64)
return false;
@@ -2089,10 +2075,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
// getelementptr instructions into a single instruction.
//
if (CE->getOpcode() == Instruction::GetElementPtr) {
- Type *LastTy = nullptr;
+ gep_type_iterator LastI = gep_type_end(CE);
for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
I != E; ++I)
- LastTy = *I;
+ LastI = I;
// We cannot combine indices if doing so would take us outside of an
// array or vector. Doing otherwise could trick us if we evaluated such a
@@ -2115,9 +2101,11 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
bool PerformFold = false;
if (Idx0->isNullValue())
PerformFold = true;
- else if (SequentialType *STy = dyn_cast_or_null<SequentialType>(LastTy))
+ else if (LastI.isSequential())
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
- PerformFold = isIndexInRangeOfSequentialType(STy, CI);
+ PerformFold =
+ !LastI.isBoundedSequential() ||
+ isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI);
if (PerformFold) {
SmallVector<Value*, 16> NewIndices;
@@ -2228,7 +2216,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
Unknown = true;
continue;
}
- if (isIndexInRangeOfSequentialType(STy, CI))
+ if (isIndexInRangeOfArrayType(isa<ArrayType>(STy)
+ ? cast<ArrayType>(STy)->getNumElements()
+ : cast<VectorType>(STy)->getNumElements(),
+ CI))
// It's in range, skip to the next index.
continue;
if (!isa<SequentialType>(Prev)) {
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index 0e5fa248caa..b6af6ed111a 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -1073,19 +1073,14 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
User::const_op_iterator OI = std::next(this->op_begin());
- // Skip the first index, as it has no static limit.
- ++GEPI;
- ++OI;
-
// The remaining indices must be compile-time known integers within the
// bounds of the corresponding notional static array types.
for (; GEPI != E; ++GEPI, ++OI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*OI);
- if (!CI) return false;
- if (ArrayType *ATy = dyn_cast<ArrayType>(*GEPI))
- if (CI->getValue().getActiveBits() > 64 ||
- CI->getZExtValue() >= ATy->getNumElements())
- return false;
+ if (GEPI.isBoundedSequential() &&
+ (CI->getValue().getActiveBits() > 64 ||
+ CI->getZExtValue() >= GEPI.getSequentialNumElements()))
+ return false;
}
// All the indices checked out.
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index 3de1889996e..d15a34c0b93 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -737,15 +737,12 @@ int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy,
ArrayRef<Value *> Indices) const {
int64_t Result = 0;
- // We can use 0 as the address space as we don't need
- // to get pointer types back from gep_type_iterator.
- unsigned AS = 0;
generic_gep_type_iterator<Value* const*>
- GTI = gep_type_begin(ElemTy, AS, Indices),
- GTE = gep_type_end(ElemTy, AS, Indices);
+ GTI = gep_type_begin(ElemTy, Indices),
+ GTE = gep_type_end(ElemTy, Indices);
for (; GTI != GTE; ++GTI) {
Value *Idx = GTI.getOperand();
- if (auto *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx");
unsigned FieldNo = cast<ConstantInt>(Idx)->getZExtValue();
diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp
index 8a94053a72c..2fba24d99b3 100644
--- a/llvm/lib/IR/Operator.cpp
+++ b/llvm/lib/IR/Operator.cpp
@@ -33,7 +33,7 @@ bool GEPOperator::accumulateConstantOffset(const DataLayout &DL,
continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 405e7d0a211..fe2c2d4550a 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -557,7 +557,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
GTI != E; ++GTI) {
const Value *Op = GTI.getOperand();
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
@@ -4885,7 +4885,7 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
- if (auto *StTy = dyn_cast<StructType>(*GTI)) {
+ if (auto *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
// N = N + Offset
if (Field)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 019d72da221..581958c3e0f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7157,8 +7157,8 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
case Instruction::GetElementPtr: {
gep_type_iterator GTI = gep_type_begin(Instr);
auto &DL = Ext->getModule()->getDataLayout();
- std::advance(GTI, U.getOperandNo());
- Type *IdxTy = *GTI;
+ std::advance(GTI, U.getOperandNo()-1);
+ Type *IdxTy = GTI.getIndexedType();
// This extension will end up with a shift because of the scaling factor.
// 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
// Get the shift amount based on the scaling factor:
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index 9b50458dd9b..df4dcb37575 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -733,7 +733,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index cfce60cad51..29f3e2c07e0 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -445,7 +445,7 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index b7f36035bb3..9b91b9ab8f8 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -358,7 +358,7 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end();
II != IE; ++II, ++GTI) {
const Value *Op = *II;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 628b5a501f7..4bdf06aaaf9 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -241,7 +241,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
GTI != E; ++GTI) {
const Value *Op = GTI.getOperand();
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 197943349cc..0cc061501e4 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -936,7 +936,7 @@ redo_gep:
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index f1686a0a615..1df9ee7a94f 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -371,14 +371,14 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
++GEPI; // Skip over the pointer index.
// If this is a use of an array allocation, do a bit more checking for sanity.
- if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
- uint64_t NumElements = AT->getNumElements();
+ if (GEPI.isSequential()) {
ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
// Check to make sure that index falls within the array. If not,
// something funny is going on, so we won't do the optimization.
//
- if (Idx->getZExtValue() >= NumElements)
+ if (GEPI.isBoundedSequential() &&
+ Idx->getZExtValue() >= GEPI.getSequentialNumElements())
return false;
// We cannot scalar repl this level of the array unless any array
@@ -391,19 +391,13 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
for (++GEPI; // Skip array index.
GEPI != E;
++GEPI) {
- uint64_t NumElements;
- if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
- NumElements = SubArrayTy->getNumElements();
- else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
- NumElements = SubVectorTy->getNumElements();
- else {
- assert((*GEPI)->isStructTy() &&
- "Indexed GEP type is not array, vector, or struct!");
+ if (GEPI.isStruct())
continue;
- }
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
- if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
+ if (!IdxVal ||
+ (GEPI.isBoundedSequential() &&
+ IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
return false;
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index bacb9ba5d59..c6100027abc 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -517,7 +517,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
@@ -547,7 +547,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index cdbc8eb4585..90eba686966 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1389,7 +1389,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
++I, ++GTI) {
// Skip indices into struct types.
- if (isa<StructType>(*GTI))
+ if (GTI.isStruct())
continue;
// Index type should have the same width as IntPtr
@@ -1546,7 +1546,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
bool EndsWithSequential = false;
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
I != E; ++I)
- EndsWithSequential = !(*I)->isStructTy();
+ EndsWithSequential = I.isSequential();
// Can we combine the two pointer arithmetics offsets?
if (EndsWithSequential) {
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index c74d9111dc5..b19e663cc05 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -52,7 +52,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
if (OpC->isZero()) continue; // No offset.
// Handle struct indices, which add their field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index 81744c08cd3..0a3bf7b4c31 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -281,9 +281,10 @@ Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) {
return nullptr;
gep_type_iterator GTI = gep_type_begin(*GEP);
- for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
- if (isa<SequentialType>(*GTI++)) {
- if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) {
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
+ if (GTI.isSequential()) {
+ if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
+ GTI.getIndexedType())) {
return NewGEP;
}
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 54ce1d8fb60..66de397676e 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -692,7 +692,7 @@ private:
break;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
GEPOffset +=
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 62c9f46089a..4d594532c36 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -722,7 +722,7 @@ bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
I != E; ++I, ++GTI) {
// Skip struct member indices which must be i32.
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
if ((*I)->getType() != IntPtrTy) {
*I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
Changed = true;
@@ -739,7 +739,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
int64_t AccumulativeByteOffset = 0;
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
// Tries to extract a constant offset from this GEP index.
int64_t ConstantOffset =
ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
@@ -752,7 +752,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
}
} else if (LowerGEP) {
- StructType *StTy = cast<StructType>(*GTI);
+ StructType *StTy = GTI.getStructType();
uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
// Skip field 0 as the offset is always 0.
if (Field != 0) {
@@ -787,7 +787,7 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
// Create an ugly GEP for each sequential index. We don't create GEPs for
// structure indices, as they are accumulated in the constant offset index.
for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
Value *Idx = Variadic->getOperand(I);
// Skip zero indices.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
@@ -848,7 +848,7 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
// don't create arithmetics for structure indices, as they are accumulated
// in the constant offset index.
for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
Value *Idx = Variadic->getOperand(I);
// Skip zero indices.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
@@ -928,7 +928,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
// handle the constant offset and won't need a new structure index.
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
// Splits this GEP index into a variadic part and a constant offset, and
// uses the variadic part as the new index.
Value *OldIdx = GEP->getOperand(I);
diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index c4d18f1c156..2be3f5c533b 100644
--- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -490,8 +490,8 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
IndexExprs.push_back(SE->getSCEV(*I));
gep_type_iterator GTI = gep_type_begin(GEP);
- for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
- if (!isa<SequentialType>(*GTI++))
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
+ if (GTI.isStruct())
continue;
const SCEV *OrigIndexExpr = IndexExprs[I - 1];
@@ -501,7 +501,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
// indices except this current one.
const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
Value *ArrayIdx = GEP->getOperand(I);
- uint64_t ElementSize = DL->getTypeAllocSize(*GTI);
+ uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
if (ArrayIdx->getType()->getIntegerBitWidth() <=
DL->getPointerSizeInBits(GEP->getAddressSpace())) {
// Skip factoring if ArrayIdx is wider than the pointer size, because
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 323b81ca815..2f4e3e99b3f 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1416,7 +1416,7 @@ static bool canReplaceOperandWithVariable(const Instruction *I,
if (OpIdx == 0)
return true;
gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1);
- return !It->isStructTy();
+ return It.isSequential();
}
}
OpenPOWER on IntegriCloud