summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorElena Demikhovsky <elena.demikhovsky@intel.com>2018-02-14 06:58:08 +0000
committerElena Demikhovsky <elena.demikhovsky@intel.com>2018-02-14 06:58:08 +0000
commit945b7e5aa639353f5660415935e58601d01cf270 (patch)
treef986734d50dea15ca61f00eb2b248af7e61c0159 /llvm/lib
parent5ecea9fff5832d85115387189297bcde38f0a43c (diff)
downloadbcm5719-llvm-945b7e5aa639353f5660415935e58601d01cf270.tar.gz
bcm5719-llvm-945b7e5aa639353f5660415935e58601d01cf270.zip
Adding a width of the GEP index to the Data Layout.
Making a width of GEP Index, which is used for address calculation, to be one of the pointer properties in the Data Layout. p[address space]:size:memory_size:alignment:pref_alignment:index_size_in_bits. The index size parameter is optional, if not specified, it is equal to the pointer size. Till now, the InstCombiner normalized GEPs and extended the Index operand to the pointer width. It works fine if you can convert pointer to integer for address calculation and all registered targets do this. But some ISAs have very restricted instruction set for the pointer calculation. During discussions were desided to retrieve information for GEP index from the Data Layout. http://lists.llvm.org/pipermail/llvm-dev/2018-January/120416.html I added an interface to the Data Layout and I changed the InstCombiner and some other passes to take the Index width into account. This change does not affect any in-tree target. I added tests to cover data layouts with explicitly specified index size. Differential Revision: https://reviews.llvm.org/D42123 llvm-svn: 325102
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp40
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp4
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp10
-rw-r--r--llvm/lib/Analysis/Loads.cpp4
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp6
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp25
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp13
-rw-r--r--llvm/lib/IR/DataLayout.cpp61
-rw-r--r--llvm/lib/IR/Operator.cpp4
-rw-r--r--llvm/lib/IR/Value.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp6
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp6
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp57
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp3
20 files changed, 162 insertions, 103 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index e88b8f14d54..ace7131e7d5 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -286,7 +286,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
APInt &Offset, const DataLayout &DL) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
- unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
+ unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
Offset = APInt(BitWidth, 0);
return true;
}
@@ -305,7 +305,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
if (!GEP)
return false;
- unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
+ unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
APInt TmpOffset(BitWidth, 0);
// If the base isn't a global+constant, we aren't either.
@@ -808,26 +808,26 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- if (!isa<ConstantInt>(Ops[i])) {
-
- // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
- // "inttoptr (sub (ptrtoint Ptr), V)"
- if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
- auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
- assert((!CE || CE->getType() == IntPtrTy) &&
- "CastGEPIndices didn't canonicalize index types!");
- if (CE && CE->getOpcode() == Instruction::Sub &&
- CE->getOperand(0)->isNullValue()) {
- Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
- Res = ConstantExpr::getSub(Res, CE->getOperand(1));
- Res = ConstantExpr::getIntToPtr(Res, ResTy);
- if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
- Res = FoldedRes;
- return Res;
+ if (!isa<ConstantInt>(Ops[i])) {
+
+ // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
+ // "inttoptr (sub (ptrtoint Ptr), V)"
+ if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
+ auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
+ assert((!CE || CE->getType() == IntPtrTy) &&
+ "CastGEPIndices didn't canonicalize index types!");
+ if (CE && CE->getOpcode() == Instruction::Sub &&
+ CE->getOperand(0)->isNullValue()) {
+ Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
+ Res = ConstantExpr::getSub(Res, CE->getOperand(1));
+ Res = ConstantExpr::getIntToPtr(Res, ResTy);
+ if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
+ Res = FoldedRes;
+ return Res;
+ }
}
+ return nullptr;
}
- return nullptr;
- }
unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
APInt Offset =
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index b0e0acd3629..ff69b2cb4f7 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -372,7 +372,7 @@ void CallAnalyzer::disableLoadElimination() {
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
- unsigned IntPtrWidth = DL.getPointerTypeSizeInBits(GEP.getType());
+ unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@@ -1619,7 +1619,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
return nullptr;
unsigned AS = V->getType()->getPointerAddressSpace();
- unsigned IntPtrWidth = DL.getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 43fbeb88ea2..e81d7672b88 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -3762,7 +3762,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
// The following transforms are only safe if the ptrtoint cast
// doesn't truncate the pointers.
if (Ops[1]->getType()->getScalarSizeInBits() ==
- Q.DL.getPointerSizeInBits(AS)) {
+ Q.DL.getIndexSizeInBits(AS)) {
auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
if (match(P, m_Zero()))
return Constant::getNullValue(GEPTy);
@@ -3802,10 +3802,10 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
if (Q.DL.getTypeAllocSize(LastType) == 1 &&
all_of(Ops.slice(1).drop_back(1),
[](Value *Idx) { return match(Idx, m_Zero()); })) {
- unsigned PtrWidth =
- Q.DL.getPointerSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
- if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == PtrWidth) {
- APInt BasePtrOffset(PtrWidth, 0);
+ unsigned IdxWidth =
+ Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
+ if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
+ APInt BasePtrOffset(IdxWidth, 0);
Value *StrippedBasePtr =
Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
BasePtrOffset);
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 834727c9224..9be8456963d 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -80,7 +80,7 @@ static bool isDereferenceableAndAlignedPointer(
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
const Value *Base = GEP->getPointerOperand();
- APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
+ APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
!Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
return false;
@@ -146,7 +146,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
SmallPtrSet<const Value *, 32> Visited;
return ::isDereferenceableAndAlignedPointer(
- V, Align, APInt(DL.getTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
+ V, Align, APInt(DL.getIndexTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
CtxI, DT, Visited);
}
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index e141d6c58b6..beef7007651 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1127,11 +1127,11 @@ bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
if (CheckType && PtrA->getType() != PtrB->getType())
return false;
- unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
+ unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
- APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty));
+ APInt Size(IdxWidth, DL.getTypeStoreSize(Ty));
- APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
+ APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index f95b68bb31d..46e7c9d72b6 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -3672,6 +3672,8 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const {
/// return true.
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
+ if (Ty->isPointerTy())
+ return getDataLayout().getIndexTypeSizeInBits(Ty);
return getDataLayout().getTypeSizeInBits(Ty);
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index ada31e09cbd..e4255997715 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -89,7 +89,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
- return DL.getPointerTypeSizeInBits(Ty);
+ return DL.getIndexTypeSizeInBits(Ty);
}
namespace {
@@ -1101,7 +1101,10 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
// which fall through here.
- SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
+ Type *ScalarTy = SrcTy->getScalarType();
+ SrcBitWidth = ScalarTy->isPointerTy() ?
+ Q.DL.getIndexTypeSizeInBits(ScalarTy) :
+ Q.DL.getTypeSizeInBits(ScalarTy);
assert(SrcBitWidth && "SrcBitWidth can't be zero");
Known = Known.zextOrTrunc(SrcBitWidth);
@@ -1555,9 +1558,13 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
V->getType()->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!");
- assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
- "V and Known should have same BitWidth");
+
+ Type *ScalarTy = V->getType()->getScalarType();
+ unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
+ Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
+ assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
(void)BitWidth;
+ (void)ExpectedWidth;
const APInt *C;
if (match(V, m_APInt(C))) {
@@ -2194,7 +2201,11 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// in V, so for undef we have to conservatively return 1. We don't have the
// same behavior for poison though -- that's a FIXME today.
- unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
+ Type *ScalarTy = V->getType()->getScalarType();
+ unsigned TyBits = ScalarTy->isPointerTy() ?
+ Q.DL.getIndexTypeSizeInBits(ScalarTy) :
+ Q.DL.getTypeSizeInBits(ScalarTy);
+
unsigned Tmp, Tmp2;
unsigned FirstAnswer = 1;
@@ -3091,7 +3102,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
/// pointer plus a constant offset. Return the base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const DataLayout &DL) {
- unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
+ unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
APInt ByteOffset(BitWidth, 0);
// We walk up the defs but use a visited set to handle unreachable code. In
@@ -3109,7 +3120,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
// means when we construct GEPOffset, we need to use the size
// of GEP's pointer type rather than the size of the original
// pointer type.
- APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
+ APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
if (!GEP->accumulateConstantOffset(DL, GEPOffset))
break;
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 78b1ec02ba3..1b1bad1ac93 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1581,7 +1581,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
// if size - offset meets the size threshold.
if (!Arg->getType()->isPointerTy())
continue;
- APInt Offset(DL->getPointerSizeInBits(
+ APInt Offset(DL->getIndexSizeInBits(
cast<PointerType>(Arg->getType())->getAddressSpace()),
0);
Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 0dabfdf374e..72c53608bdd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7977,8 +7977,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
const GlobalValue *GV;
int64_t GVOffset = 0;
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
- unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
- KnownBits Known(PtrWidth);
+ unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType());
+ KnownBits Known(IdxWidth);
llvm::computeKnownBits(GV, Known, getDataLayout());
unsigned AlignBits = Known.countMinTrailingZeros();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3563c423e4f..0da491039fd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3424,10 +3424,9 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
DAG.getConstant(Offset, dl, N.getValueType()), Flags);
}
} else {
- MVT PtrTy =
- DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS);
- unsigned PtrSize = PtrTy.getSizeInBits();
- APInt ElementSize(PtrSize, DL->getTypeAllocSize(GTI.getIndexedType()));
+ unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
+ MVT IdxTy = MVT::getIntegerVT(IdxSize);
+ APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
// If this is a scalar constant or a splat vector of constants,
// handle it quickly.
@@ -3439,11 +3438,11 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
if (CI) {
if (CI->isZero())
continue;
- APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
+ APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
LLVMContext &Context = *DAG.getContext();
SDValue OffsVal = VectorWidth ?
- DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, PtrTy, VectorWidth)) :
- DAG.getConstant(Offs, dl, PtrTy);
+ DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
+ DAG.getConstant(Offs, dl, IdxTy);
// In an inbouds GEP with an offset that is nonnegative even when
// interpreted as signed, assume there is no unsigned overflow.
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index f4dddeb30d0..289001d2020 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -129,13 +129,15 @@ LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
PointerAlignElem
PointerAlignElem::get(uint32_t AddressSpace, unsigned ABIAlign,
- unsigned PrefAlign, uint32_t TypeByteWidth) {
+ unsigned PrefAlign, uint32_t TypeByteWidth,
+ uint32_t IndexWidth) {
assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
PointerAlignElem retval;
retval.AddressSpace = AddressSpace;
retval.ABIAlign = ABIAlign;
retval.PrefAlign = PrefAlign;
retval.TypeByteWidth = TypeByteWidth;
+ retval.IndexWidth = IndexWidth;
return retval;
}
@@ -144,7 +146,8 @@ PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
return (ABIAlign == rhs.ABIAlign
&& AddressSpace == rhs.AddressSpace
&& PrefAlign == rhs.PrefAlign
- && TypeByteWidth == rhs.TypeByteWidth);
+ && TypeByteWidth == rhs.TypeByteWidth
+ && IndexWidth == rhs.IndexWidth);
}
//===----------------------------------------------------------------------===//
@@ -189,7 +192,7 @@ void DataLayout::reset(StringRef Desc) {
setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign,
E.TypeBitWidth);
}
- setPointerAlignment(0, 8, 8, 8);
+ setPointerAlignment(0, 8, 8, 8, 8);
parseSpecifier(Desc);
}
@@ -287,6 +290,10 @@ void DataLayout::parseSpecifier(StringRef Desc) {
report_fatal_error(
"Pointer ABI alignment must be a power of 2");
+ // Size of index used in GEP for address calculation.
+ // The parameter is optional. By default it is equal to size of pointer.
+ unsigned IndexSize = PointerMemSize;
+
// Preferred alignment.
unsigned PointerPrefAlign = PointerABIAlign;
if (!Rest.empty()) {
@@ -295,10 +302,17 @@ void DataLayout::parseSpecifier(StringRef Desc) {
if (!isPowerOf2_64(PointerPrefAlign))
report_fatal_error(
"Pointer preferred alignment must be a power of 2");
- }
+ // Now read the index. It is the second optional parameter here.
+ if (!Rest.empty()) {
+ Split = split(Rest, ':');
+ IndexSize = inBytes(getInt(Tok));
+ if (!IndexSize)
+ report_fatal_error("Invalid index size of 0 bytes");
+ }
+ }
setPointerAlignment(AddrSpace, PointerABIAlign, PointerPrefAlign,
- PointerMemSize);
+ PointerMemSize, IndexSize);
break;
}
case 'i':
@@ -467,8 +481,8 @@ DataLayout::findPointerLowerBound(uint32_t AddressSpace) {
}
void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
- unsigned PrefAlign,
- uint32_t TypeByteWidth) {
+ unsigned PrefAlign, uint32_t TypeByteWidth,
+ uint32_t IndexWidth) {
if (PrefAlign < ABIAlign)
report_fatal_error(
"Preferred alignment cannot be less than the ABI alignment");
@@ -476,11 +490,12 @@ void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
PointersTy::iterator I = findPointerLowerBound(AddrSpace);
if (I == Pointers.end() || I->AddressSpace != AddrSpace) {
Pointers.insert(I, PointerAlignElem::get(AddrSpace, ABIAlign, PrefAlign,
- TypeByteWidth));
+ TypeByteWidth, IndexWidth));
} else {
I->ABIAlign = ABIAlign;
I->PrefAlign = PrefAlign;
I->TypeByteWidth = TypeByteWidth;
+ I->IndexWidth = IndexWidth;
}
}
@@ -618,6 +633,22 @@ unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
return getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace());
}
+unsigned DataLayout::getIndexSize(unsigned AS) const {
+ PointersTy::const_iterator I = findPointerLowerBound(AS);
+ if (I == Pointers.end() || I->AddressSpace != AS) {
+ I = findPointerLowerBound(0);
+ assert(I->AddressSpace == 0);
+ }
+ return I->IndexWidth;
+}
+
+unsigned DataLayout::getIndexTypeSizeInBits(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "This should only be called with a pointer or pointer vector type");
+ Ty = Ty->getScalarType();
+ return getIndexSizeInBits(cast<PointerType>(Ty)->getAddressSpace());
+}
+
/*!
\param abi_or_pref Flag that determines which alignment is returned. true
returns the ABI alignment, false returns the preferred alignment.
@@ -701,13 +732,13 @@ unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const {
IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
unsigned AddressSpace) const {
- return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
+ return IntegerType::get(C, getIndexSizeInBits(AddressSpace));
}
Type *DataLayout::getIntPtrType(Type *Ty) const {
assert(Ty->isPtrOrPtrVectorTy() &&
"Expected a pointer or pointer vector type.");
- unsigned NumBits = getPointerTypeSizeInBits(Ty);
+ unsigned NumBits = getIndexTypeSizeInBits(Ty);
IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
return VectorType::get(IntTy, VecTy->getNumElements());
@@ -726,6 +757,16 @@ unsigned DataLayout::getLargestLegalIntTypeSizeInBits() const {
return Max != LegalIntWidths.end() ? *Max : 0;
}
+Type *DataLayout::getIndexType(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "Expected a pointer or pointer vector type.");
+ unsigned NumBits = getIndexTypeSizeInBits(Ty);
+ IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
+ return VectorType::get(IntTy, VecTy->getNumElements());
+ return IntTy;
+}
+
int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy,
ArrayRef<Value *> Indices) const {
int64_t Result = 0;
diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp
index 7d819f3aae8..5b4c7524b67 100644
--- a/llvm/lib/IR/Operator.cpp
+++ b/llvm/lib/IR/Operator.cpp
@@ -35,8 +35,8 @@ Type *GEPOperator::getResultElementType() const {
bool GEPOperator::accumulateConstantOffset(const DataLayout &DL,
APInt &Offset) const {
assert(Offset.getBitWidth() ==
- DL.getPointerSizeInBits(getPointerAddressSpace()) &&
- "The offset must have exactly as many bits as our pointer.");
+ DL.getIndexSizeInBits(getPointerAddressSpace()) &&
+ "The offset bit width does not match DL specification.");
for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
GTI != GTE; ++GTI) {
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 01b7aff0f15..8c6658fd523 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -587,9 +587,9 @@ Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
if (!getType()->isPointerTy())
return this;
- assert(Offset.getBitWidth() == DL.getPointerSizeInBits(cast<PointerType>(
+ assert(Offset.getBitWidth() == DL.getIndexSizeInBits(cast<PointerType>(
getType())->getAddressSpace()) &&
- "The offset must have exactly as many bits as our pointer.");
+ "The offset bit width does not match the DL specification.");
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 4eaf2fd72f4..7026b24f4df 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1761,7 +1761,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
Type *Ty = CI.getType();
unsigned AS = CI.getPointerAddressSpace();
- if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
+ if (Ty->getScalarSizeInBits() == DL.getIndexSizeInBits(AS))
return commonPointerCastTransforms(CI);
Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);
@@ -2014,13 +2014,13 @@ static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
!match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
!BO->isBitwiseLogicOp())
return nullptr;
-
+
// FIXME: This transform is restricted to vector types to avoid backend
// problems caused by creating potentially illegal operations. If a fix-up is
// added to handle that situation, we can remove this check.
if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
return nullptr;
-
+
Value *X;
if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
X->getType() == DestTy && !isa<Constant>(X)) {
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index f90bb08b045..6d053d162b9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -682,7 +682,7 @@ static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
// 4. Emit GEPs to get the original pointers.
// 5. Remove the original instructions.
Type *IndexType = IntegerType::get(
- Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType()));
+ Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
DenseMap<Value *, Value *> NewInsts;
NewInsts[Base] = ConstantInt::getNullValue(IndexType);
@@ -790,7 +790,7 @@ static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
static std::pair<Value *, Value *>
getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
Type *IndexType = IntegerType::get(V->getContext(),
- DL.getPointerTypeSizeInBits(V->getType()));
+ DL.getIndexTypeSizeInBits(V->getType()));
Constant *Index = ConstantInt::getNullValue(IndexType);
while (true) {
@@ -4031,7 +4031,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
// Get scalar or pointer size.
unsigned BitWidth = Ty->isIntOrIntVectorTy()
? Ty->getScalarSizeInBits()
- : DL.getTypeSizeInBits(Ty->getScalarType());
+ : DL.getIndexTypeSizeInBits(Ty->getScalarType());
if (!BitWidth)
return nullptr;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 538d37192bd..8ac2325932d 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1115,7 +1115,7 @@ Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- Type *IntPtrTy = DL.getIntPtrType(PtrTy);
+ Type *IndexTy = DL.getIndexType(PtrTy);
int64_t FirstIdx = 0;
if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
@@ -1130,7 +1130,7 @@ Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
}
- NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
+ NewIndices.push_back(ConstantInt::get(IndexTy, FirstIdx));
// Index into the types. If we fail, set OrigBase to null.
while (Offset) {
@@ -1152,7 +1152,7 @@ Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
- NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
+ NewIndices.push_back(ConstantInt::get(IndexTy,Offset/EltSize));
Offset %= EltSize;
Ty = AT->getElementType();
} else {
@@ -1515,8 +1515,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Eliminate unneeded casts for indices, and replace indices which displace
// by multiples of a zero size type with zero.
bool MadeChange = false;
- Type *IntPtrTy =
- DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());
+
+ // Index width may not be the same width as pointer width.
+ // Data layout chooses the right type based on supported integer types.
+ Type *NewScalarIndexTy =
+ DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
@@ -1525,10 +1528,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (GTI.isStruct())
continue;
- // Index type should have the same width as IntPtr
Type *IndexTy = (*I)->getType();
- Type *NewIndexType = IndexTy->isVectorTy() ?
- VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy;
+ Type *NewIndexType =
+ IndexTy->isVectorTy()
+ ? VectorType::get(NewScalarIndexTy, IndexTy->getVectorNumElements())
+ : NewScalarIndexTy;
// If the element type has zero size then any index over it is equivalent
// to an index of zero, so replace it with zero if it is not zero already.
@@ -1731,7 +1735,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (GEP.getNumIndices() == 1) {
unsigned AS = GEP.getPointerAddressSpace();
if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
- DL.getPointerSizeInBits(AS)) {
+ DL.getIndexSizeInBits(AS)) {
Type *Ty = GEP.getSourceElementType();
uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
@@ -1857,7 +1861,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (SrcElTy->isArrayTy() &&
DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
DL.getTypeAllocSize(ResElTy)) {
- Type *IdxType = DL.getIntPtrType(GEP.getType());
+ Type *IdxType = DL.getIndexType(GEP.getType());
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP =
GEP.isInBounds()
@@ -1884,10 +1888,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
uint64_t Scale = SrcSize / ResSize;
- // Earlier transforms ensure that the index has type IntPtrType, which
- // considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
- "Index not cast to pointer width?");
+ // Earlier transforms ensure that the index has the right type
+ // according to Data Layout, which considerably simplifies the
+ // logic by eliminating implicit casts.
+ assert(Idx->getType() == DL.getIndexType(GEP.getType()) &&
+ "Index type does not match the Data Layout preferences");
bool NSW;
if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
@@ -1923,19 +1928,19 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
uint64_t Scale = ArrayEltSize / ResSize;
- // Earlier transforms ensure that the index has type IntPtrType, which
- // considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
- "Index not cast to pointer width?");
+ // Earlier transforms ensure that the index has the right type
+ // according to the Data Layout, which considerably simplifies
+ // the logic by eliminating implicit casts.
+ assert(Idx->getType() == DL.getIndexType(GEP.getType()) &&
+ "Index type does not match the Data Layout preferences");
bool NSW;
if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
// Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
// If the multiplication NewIdx * Scale may overflow then the new
// GEP may not be "inbounds".
- Value *Off[2] = {
- Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
- NewIdx};
+ Type *IndTy = DL.getIndexType(GEP.getType());
+ Value *Off[2] = {Constant::getNullValue(IndTy), NewIdx};
Value *NewGEP = GEP.isInBounds() && NSW
? Builder.CreateInBoundsGEP(
@@ -1971,7 +1976,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
Value *Operand = BCI->getOperand(0);
PointerType *OpType = cast<PointerType>(Operand->getType());
- unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
+ unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEP.getType());
APInt Offset(OffsetBits, 0);
if (!isa<BitCastInst>(Operand) &&
GEP.accumulateConstantOffset(DL, Offset)) {
@@ -2020,16 +2025,16 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
if (!GEP.isInBounds()) {
- unsigned PtrWidth =
- DL.getPointerSizeInBits(PtrOp->getType()->getPointerAddressSpace());
- APInt BasePtrOffset(PtrWidth, 0);
+ unsigned IdxWidth =
+ DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
+ APInt BasePtrOffset(IdxWidth, 0);
Value *UnderlyingPtrOp =
PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
BasePtrOffset);
if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
BasePtrOffset.isNonNegative()) {
- APInt AllocSize(PtrWidth, DL.getTypeAllocSize(AI->getAllocatedType()));
+ APInt AllocSize(IdxWidth, DL.getTypeAllocSize(AI->getAllocatedType()));
if (BasePtrOffset.ule(AllocSize)) {
return GetElementPtrInst::CreateInBounds(
PtrOp, makeArrayRef(Ops).slice(1), GEP.getName());
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index fd9b19485b4..6089fc81f87 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -3648,7 +3648,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
auto *PartPtrTy = PartTy->getPointerTo(AS);
LoadInst *PLoad = IRB.CreateAlignedLoad(
getAdjustedPtr(IRB, DL, BasePtr,
- APInt(DL.getPointerSizeInBits(AS), PartOffset),
+ APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, BasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
@@ -3704,7 +3704,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
StoreInst *PStore = IRB.CreateAlignedStore(
PLoad,
getAdjustedPtr(IRB, DL, StoreBasePtr,
- APInt(DL.getPointerSizeInBits(AS), PartOffset),
+ APInt(DL.getIndexSizeInBits(AS), PartOffset),
PartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
PStore->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access);
@@ -3786,7 +3786,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
auto AS = LI->getPointerAddressSpace();
PLoad = IRB.CreateAlignedLoad(
getAdjustedPtr(IRB, DL, LoadBasePtr,
- APInt(DL.getPointerSizeInBits(AS), PartOffset),
+ APInt(DL.getIndexSizeInBits(AS), PartOffset),
LoadPartPtrTy, LoadBasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
@@ -3798,7 +3798,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
StoreInst *PStore = IRB.CreateAlignedStore(
PLoad,
getAdjustedPtr(IRB, DL, StoreBasePtr,
- APInt(DL.getPointerSizeInBits(AS), PartOffset),
+ APInt(DL.getIndexSizeInBits(AS), PartOffset),
StorePartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 4a96e0ddca1..a5e924d0ed3 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -1295,7 +1295,7 @@ void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
// We changed p+o+c to p+c+o, p+c may not be inbound anymore.
const DataLayout &DAL = First->getModule()->getDataLayout();
- APInt Offset(DAL.getPointerSizeInBits(
+ APInt Offset(DAL.getIndexSizeInBits(
cast<PointerType>(First->getType())->getAddressSpace()),
0);
Value *NewBase =
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index e170b22b740..34f7d418ced 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1530,7 +1530,7 @@ void llvm::salvageDebugInfo(Instruction &I) {
}
} else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
unsigned BitWidth =
- M.getDataLayout().getPointerSizeInBits(GEP->getPointerAddressSpace());
+ M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace());
// Rewrite a constant GEP into a DIExpression. Since we are performing
// arithmetic to compute the variable's *value* in the DIExpression, we
// need to mark the expression with a DW_OP_stack_value.
@@ -2157,7 +2157,7 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
if (!NewTy->isPointerTy())
return;
- unsigned BitWidth = DL.getTypeSizeInBits(NewTy);
+ unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy);
if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
MDNode *NN = MDNode::get(OldLI.getContext(), None);
NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 2fd39766bd8..1b8a79ad41b 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -323,7 +323,8 @@ bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
- APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
+ unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
+ APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
OpenPOWER on IntegriCloud