diff options
author | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2019-12-02 11:13:43 +0000 |
---|---|---|
committer | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2019-12-12 10:07:01 +0000 |
commit | 5f6208778ff92567c57d7c1e2e740c284d7e69a5 (patch) | |
tree | 3806abe49eac6f431d22660d6e8a248377cabc84 /llvm/lib | |
parent | 7d7789899f4d4684dac51f265a47b049db4d09f2 (diff) | |
download | bcm5719-llvm-5f6208778ff92567c57d7c1e2e740c284d7e69a5.tar.gz bcm5719-llvm-5f6208778ff92567c57d7c1e2e740c284d7e69a5.zip |
[DataLayout] Fix occurrences that size and range of pointers are assumed to be the same.
GEP index size can be specified in the DataLayout, introduced in D42123. However, there were still places
in which getIndexSizeInBits was used interchangeably with getPointerSizeInBits. This notably caused issues
with Instcombine's visitPtrToInt; but the unit tests was incorrect, so this remained undiscovered.
Differential Revision: https://reviews.llvm.org/D68328
Patch by Joseph Faulls!
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Analysis/ConstantFolding.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Analysis/InlineCost.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Analysis/InstructionSimplify.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Analysis/Loads.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Analysis/MemoryBuiltins.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolution.cpp | 23 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolutionExpander.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Analysis/ValueTracking.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/IR/DataLayout.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/Local.cpp | 2 |
14 files changed, 64 insertions, 54 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 23e4b9b86fd..b32924e6497 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -766,8 +766,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, Type *ResultTy, Optional<unsigned> InRangeIndex, const DataLayout &DL, const TargetLibraryInfo *TLI) { - Type *IntPtrTy = DL.getIntPtrType(ResultTy); - Type *IntPtrScalarTy = IntPtrTy->getScalarType(); + Type *IntIdxTy = DL.getIndexType(ResultTy); + Type *IntIdxScalarTy = IntIdxTy->getScalarType(); bool Any = false; SmallVector<Constant*, 32> NewIdxs; @@ -775,11 +775,11 @@ Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, if ((i == 1 || !isa<StructType>(GetElementPtrInst::getIndexedType( SrcElemTy, Ops.slice(1, i - 1)))) && - Ops[i]->getType()->getScalarType() != IntPtrScalarTy) { + Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { Any = true; Type *NewType = Ops[i]->getType()->isVectorTy() - ? IntPtrTy - : IntPtrTy->getScalarType(); + ? IntIdxTy + : IntIdxScalarTy; NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], true, NewType, @@ -839,7 +839,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, if (!Ptr->getType()->isPointerTy()) return nullptr; - Type *IntPtrTy = DL.getIntPtrType(Ptr->getType()); + Type *IntIdxTy = DL.getIndexType(Ptr->getType()); // If this is a constant expr gep that is effectively computing an // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' @@ -850,7 +850,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, // "inttoptr (sub (ptrtoint Ptr), V)" if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { auto *CE = dyn_cast<ConstantExpr>(Ops[1]); - assert((!CE || CE->getType() == IntPtrTy) && + assert((!CE || CE->getType() == IntIdxTy) && "CastGEPIndices didn't canonicalize index types!"); if (CE && CE->getOpcode() == Instruction::Sub && CE->getOperand(0)->isNullValue()) { @@ -865,7 +865,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, return nullptr; } - unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy); + unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); APInt Offset = APInt(BitWidth, DL.getIndexedOffsetInType( @@ -945,7 +945,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, // The element size is 0. This may be [0 x Ty]*, so just use a zero // index for this level and proceed to the next level to see if it can // accommodate the offset. - NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); + NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0)); } else { // The element size is non-zero divide the offset by the element // size (rounding down), to compute the index at this level. @@ -954,7 +954,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, if (Overflow) break; Offset -= NewIdx * ElemSize; - NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); + NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx)); } } else { auto *STy = cast<StructType>(Ty); diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 55ce940bc3a..24af43af468 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -1678,8 +1678,8 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { assert(V->getType()->isPointerTy() && "Unexpected operand type!"); } while (Visited.insert(V).second); - Type *IntPtrTy = DL.getIntPtrType(V->getContext(), AS); - return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); + Type *IdxPtrTy = DL.getIndexType(V->getType()); + return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset)); } /// Find dead blocks due to deleted CFG edges during inlining. diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index c27e2567d5f..75141f1ad95 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -662,16 +662,16 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds = false) { assert(V->getType()->isPtrOrPtrVectorTy()); - Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); - APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth()); + Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); + APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth()); V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); // As that strip may trace through `addrspacecast`, need to sext or trunc // the offset calculated. - IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); - Offset = Offset.sextOrTrunc(IntPtrTy->getIntegerBitWidth()); + IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); + Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth()); - Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset); + Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset); if (V->getType()->isVectorTy()) return ConstantVector::getSplat(V->getType()->getVectorNumElements(), OffsetIntPtr); @@ -4032,7 +4032,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, // The following transforms are only safe if the ptrtoint cast // doesn't truncate the pointers. if (Ops[1]->getType()->getScalarSizeInBits() == - Q.DL.getIndexSizeInBits(AS)) { + Q.DL.getPointerSizeInBits(AS)) { auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { if (match(P, m_Zero())) return Constant::getNullValue(GEPTy); diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 3f03d53778f..a7d07c0b618 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, // Require ABI alignment for loads without alignment specification const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty); - APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()), + APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty)); return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI, DT); diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index 172c86eb464..427e6fd3ace 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -544,6 +544,7 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); Value *UseZero = Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); + ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); return Builder.CreateSelect(UseZero, ConstantInt::get(ResultType, 0), ResultSize); } @@ -576,7 +577,7 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, } SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { - IntTyBits = DL.getPointerTypeSizeInBits(V->getType()); + IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); Zero = APInt::getNullValue(IntTyBits); V = V->stripPointerCasts(); @@ -746,7 +747,7 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { SizeOffsetType PtrData = compute(GEP.getPointerOperand()); - APInt Offset(IntTyBits, 0); + APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0); if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset)) return unknown(); @@ -834,7 +835,7 @@ ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { // XXX - Are vectors of pointers possible here? - IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType())); + IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); Zero = ConstantInt::get(IntTy, 0); SizeOffsetEvalType Result = compute_(V); @@ -938,12 +939,12 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) { } Value *FirstArg = CS.getArgument(FnData->FstParam); - FirstArg = Builder.CreateZExt(FirstArg, IntTy); + FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); if (FnData->SndParam < 0) return std::make_pair(FirstArg, Zero); Value *SecondArg = CS.getArgument(FnData->SndParam); - SecondArg = Builder.CreateZExt(SecondArg, IntTy); + SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); Value *Size = Builder.CreateMul(FirstArg, SecondArg); return std::make_pair(Size, Zero); diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index d635afb0a29..ca4fb9371b8 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3495,7 +3495,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); // getSCEV(Base)->getType() has the same address space as Base->getType() // because SCEV::getType() preserves the address space. - Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); + Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP // instruction to its SCEV, because the Instruction may be guarded by control // flow and the no-overflow bits may not be valid for the expression in any @@ -3504,7 +3504,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; - const SCEV *TotalOffset = getZero(IntPtrTy); + const SCEV *TotalOffset = getZero(IntIdxTy); // The array size is unimportant. The first thing we do on CurTy is getting // its element type. Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); @@ -3514,7 +3514,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, // For a struct, add the member offset. ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); unsigned FieldNo = Index->getZExtValue(); - const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); + const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); // Add the field offset to the running total offset. TotalOffset = getAddExpr(TotalOffset, FieldOffset); @@ -3525,9 +3525,9 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, // Update CurTy to its element type. CurTy = cast<SequentialType>(CurTy)->getElementType(); // For an array, add the element offset, explicitly scaled. - const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); + const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); // Getelementptr indices are signed. - IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); + IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); // Multiply the index by the element size to compute the element offset. const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); @@ -3786,7 +3786,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { /// Return a type with the same bitwidth as the given type and which represents /// how SCEV will treat the given type, for which isSCEVable must return -/// true. For pointer types, this is the pointer-sized integer type. +/// true. For pointer types, this is the pointer index sized integer type. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); @@ -3795,7 +3795,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { // The only other support type is pointer. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); - return getDataLayout().getIntPtrType(Ty); + return getDataLayout().getIndexType(Ty); } Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { @@ -5726,6 +5726,15 @@ ScalarEvolution::getRangeRef(const SCEV *S, assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && "generalize as needed!"); unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); + // If the pointer size is larger than the index size type, this can cause + // NS to be larger than BitWidth. So compensate for this. + if (U->getType()->isPointerTy()) { + unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); + int ptrIdxDiff = ptrSize - BitWidth; + if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) + NS -= ptrIdxDiff; + } + if (NS > 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp index bceec921188..dc5d02aa3a3 100644 --- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp @@ -414,7 +414,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, // without the other. SplitAddRecs(Ops, Ty, SE); - Type *IntPtrTy = DL.getIntPtrType(PTy); + Type *IntIdxTy = DL.getIndexType(PTy); // Descend down the pointer's type and attempt to convert the other // operands into GEP indices, at each level. The first index in a GEP @@ -426,7 +426,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, // array indexing. SmallVector<const SCEV *, 8> ScaledOps; if (ElTy->isSized()) { - const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); + const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy); if (!ElSize->isZero()) { SmallVector<const SCEV *, 8> NewOps; for (const SCEV *Op : Ops) { diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 59e9d2eb928..ab021aece2f 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -90,7 +90,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { if (unsigned BitWidth = Ty->getScalarSizeInBits()) return BitWidth; - return DL.getIndexTypeSizeInBits(Ty); + return DL.getPointerTypeSizeInBits(Ty); } namespace { @@ -1137,7 +1137,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, // which fall through here. Type *ScalarTy = SrcTy->getScalarType(); SrcBitWidth = ScalarTy->isPointerTy() ? - Q.DL.getIndexTypeSizeInBits(ScalarTy) : + Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); assert(SrcBitWidth && "SrcBitWidth can't be zero"); @@ -1664,7 +1664,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, Type *ScalarTy = V->getType()->getScalarType(); unsigned ExpectedWidth = ScalarTy->isPointerTy() ? - Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); + Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); (void)BitWidth; (void)ExpectedWidth; @@ -2409,7 +2409,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, Type *ScalarTy = V->getType()->getScalarType(); unsigned TyBits = ScalarTy->isPointerTy() ? - Q.DL.getIndexTypeSizeInBits(ScalarTy) : + Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); unsigned Tmp, Tmp2; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 9ca51e72ec7..cb0450cbfa8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -9322,8 +9322,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { const GlobalValue *GV = nullptr; int64_t GVOffset = 0; if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { - unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType()); - KnownBits Known(IdxWidth); + unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); + KnownBits Known(PtrWidth); llvm::computeKnownBits(GV, Known, getDataLayout()); unsigned AlignBits = Known.countMinTrailingZeros(); unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 5fe7a2e94b6..94e0740663c 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -768,13 +768,13 @@ unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const { IntegerType *DataLayout::getIntPtrType(LLVMContext &C, unsigned AddressSpace) const { - return IntegerType::get(C, getIndexSizeInBits(AddressSpace)); + return IntegerType::get(C, getPointerSizeInBits(AddressSpace)); } Type *DataLayout::getIntPtrType(Type *Ty) const { assert(Ty->isPtrOrPtrVectorTy() && "Expected a pointer or pointer vector type."); - unsigned NumBits = getIndexTypeSizeInBits(Ty); + unsigned NumBits = getPointerTypeSizeInBits(Ty); IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) return VectorType::get(IntTy, VecTy->getNumElements()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 5112fb1a6c3..e42cd7555a0 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1832,7 +1832,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { Type *Ty = CI.getType(); unsigned AS = CI.getPointerAddressSpace(); - if (Ty->getScalarSizeInBits() == DL.getIndexSizeInBits(AS)) + if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS)) return commonPointerCastTransforms(CI); Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index fd5a4682aa2..4abac988c2e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4930,7 +4930,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) { // Get scalar or pointer size. unsigned BitWidth = Ty->isIntOrIntVectorTy() ? Ty->getScalarSizeInBits() - : DL.getIndexTypeSizeInBits(Ty->getScalarType()); + : DL.getPointerTypeSizeInBits(Ty->getScalarType()); if (!BitWidth) return nullptr; diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 92783504ace..b1b87f81000 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -901,12 +901,12 @@ bool LoopIdiomRecognize::processLoopStridedStore( SCEVExpander Expander(*SE, *DL, "loop-idiom"); Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); - Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS); + Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); const SCEV *Start = Ev->getStart(); // Handle negative strided loops. if (NegStride) - Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE); + Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); // TODO: ideally we should still be able to generate memset if SCEV expander // is taught to generate the dependencies at the latest point. @@ -934,7 +934,7 @@ bool LoopIdiomRecognize::processLoopStridedStore( // Okay, everything looks good, insert the memset. const SCEV *NumBytesS = - getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE); + getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); // TODO: ideally we should still be able to generate memset if SCEV expander // is taught to generate the dependencies at the latest point. @@ -942,7 +942,7 @@ bool LoopIdiomRecognize::processLoopStridedStore( return false; Value *NumBytes = - Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); + Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); CallInst *NewCall; if (SplatValue) { @@ -955,7 +955,7 @@ bool LoopIdiomRecognize::processLoopStridedStore( Module *M = TheStore->getModule(); StringRef FuncName = "memset_pattern16"; FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), - Int8PtrTy, Int8PtrTy, IntPtr); + Int8PtrTy, Int8PtrTy, IntIdxTy); inferLibFuncAttributes(M, FuncName, *TLI); // Otherwise we should form a memset_pattern16. PatternValue is known to be @@ -1022,11 +1022,11 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *StrStart = StoreEv->getStart(); unsigned StrAS = SI->getPointerAddressSpace(); - Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS); + Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); // Handle negative strided loops. if (NegStride) - StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE); + StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); // Okay, we have a strided store "p[i]" of a loaded value. We can turn // this into a memcpy in the loop preheader now if we want. However, this @@ -1052,7 +1052,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, // Handle negative strided loops. if (NegStride) - LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE); + LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); // For a memcpy, we have to make sure that the input array is not being // mutated by the loop. @@ -1074,10 +1074,10 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, // Okay, everything is safe, we can transform this! const SCEV *NumBytesS = - getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE); + getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); Value *NumBytes = - Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); + Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); CallInst *NewCall = nullptr; // Check whether to generate an unordered atomic memcpy: diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 41f1dd951a8..d5690a04f53 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -2579,7 +2579,7 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, if (!NewTy->isPointerTy()) return; - unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy); + unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { MDNode *NN = MDNode::get(OldLI.getContext(), None); NewLI.setMetadata(LLVMContext::MD_nonnull, NN); |