diff options
author | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2019-12-12 10:25:14 +0000 |
---|---|---|
committer | Nicola Zaghen <nicola.zaghen@imgtec.com> | 2019-12-12 10:29:54 +0000 |
commit | f798eb21eca97dc44ed40da52ece22780fb74230 (patch) | |
tree | fad09859cc0f2aef457f421278e70d5e5f9ed037 /llvm/lib | |
parent | 9c48c2f9c477007234c5bdad0bc8c0969afa0724 (diff) | |
download | bcm5719-llvm-f798eb21eca97dc44ed40da52ece22780fb74230.tar.gz bcm5719-llvm-f798eb21eca97dc44ed40da52ece22780fb74230.zip |
Temporarily Revert "[DataLayout] Fix occurrences that size and range of pointers are assumed to be the same."
This reverts commit 5f6208778ff92567c57d7c1e2e740c284d7e69a5.
This caused failures in Transforms/PhaseOrdering/scev-custom-dl.ll
const: Assertion `getBitWidth() == CR.getBitWidth() && "ConstantRange types don't agree!"' failed.
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Analysis/ConstantFolding.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Analysis/InlineCost.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Analysis/InstructionSimplify.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Analysis/Loads.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Analysis/MemoryBuiltins.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolution.cpp | 23 | ||||
-rw-r--r-- | llvm/lib/Analysis/ScalarEvolutionExpander.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Analysis/ValueTracking.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/IR/DataLayout.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/Local.cpp | 2 |
14 files changed, 54 insertions, 64 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index b32924e6497..23e4b9b86fd 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -766,8 +766,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, Type *ResultTy, Optional<unsigned> InRangeIndex, const DataLayout &DL, const TargetLibraryInfo *TLI) { - Type *IntIdxTy = DL.getIndexType(ResultTy); - Type *IntIdxScalarTy = IntIdxTy->getScalarType(); + Type *IntPtrTy = DL.getIntPtrType(ResultTy); + Type *IntPtrScalarTy = IntPtrTy->getScalarType(); bool Any = false; SmallVector<Constant*, 32> NewIdxs; @@ -775,11 +775,11 @@ Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, if ((i == 1 || !isa<StructType>(GetElementPtrInst::getIndexedType( SrcElemTy, Ops.slice(1, i - 1)))) && - Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { + Ops[i]->getType()->getScalarType() != IntPtrScalarTy) { Any = true; Type *NewType = Ops[i]->getType()->isVectorTy() - ? IntIdxTy - : IntIdxScalarTy; + ? IntPtrTy + : IntPtrTy->getScalarType(); NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], true, NewType, @@ -839,7 +839,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, if (!Ptr->getType()->isPointerTy()) return nullptr; - Type *IntIdxTy = DL.getIndexType(Ptr->getType()); + Type *IntPtrTy = DL.getIntPtrType(Ptr->getType()); // If this is a constant expr gep that is effectively computing an // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' @@ -850,7 +850,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, // "inttoptr (sub (ptrtoint Ptr), V)" if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { auto *CE = dyn_cast<ConstantExpr>(Ops[1]); - assert((!CE || CE->getType() == IntIdxTy) && + assert((!CE || CE->getType() == IntPtrTy) && "CastGEPIndices didn't canonicalize index types!"); if (CE && CE->getOpcode() == Instruction::Sub && CE->getOperand(0)->isNullValue()) { @@ -865,7 +865,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, return nullptr; } - unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); + unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy); APInt Offset = APInt(BitWidth, DL.getIndexedOffsetInType( @@ -945,7 +945,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, // The element size is 0. This may be [0 x Ty]*, so just use a zero // index for this level and proceed to the next level to see if it can // accommodate the offset. - NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0)); + NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); } else { // The element size is non-zero divide the offset by the element // size (rounding down), to compute the index at this level. @@ -954,7 +954,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, if (Overflow) break; Offset -= NewIdx * ElemSize; - NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx)); + NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); } } else { auto *STy = cast<StructType>(Ty); diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 24af43af468..55ce940bc3a 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -1678,8 +1678,8 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { assert(V->getType()->isPointerTy() && "Unexpected operand type!"); } while (Visited.insert(V).second); - Type *IdxPtrTy = DL.getIndexType(V->getType()); - return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset)); + Type *IntPtrTy = DL.getIntPtrType(V->getContext(), AS); + return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); } /// Find dead blocks due to deleted CFG edges during inlining. diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 75141f1ad95..c27e2567d5f 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -662,16 +662,16 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds = false) { assert(V->getType()->isPtrOrPtrVectorTy()); - Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); - APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth()); + Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); + APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth()); V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); // As that strip may trace through `addrspacecast`, need to sext or trunc // the offset calculated. - IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); - Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth()); + IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); + Offset = Offset.sextOrTrunc(IntPtrTy->getIntegerBitWidth()); - Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset); + Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset); if (V->getType()->isVectorTy()) return ConstantVector::getSplat(V->getType()->getVectorNumElements(), OffsetIntPtr); @@ -4032,7 +4032,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, // The following transforms are only safe if the ptrtoint cast // doesn't truncate the pointers. if (Ops[1]->getType()->getScalarSizeInBits() == - Q.DL.getPointerSizeInBits(AS)) { + Q.DL.getIndexSizeInBits(AS)) { auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { if (match(P, m_Zero())) return Constant::getNullValue(GEPTy); diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index a7d07c0b618..3f03d53778f 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -150,7 +150,7 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, // Require ABI alignment for loads without alignment specification const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty); - APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), + APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty)); return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI, DT); diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index 427e6fd3ace..172c86eb464 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -544,7 +544,6 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); Value *UseZero = Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); - ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); return Builder.CreateSelect(UseZero, ConstantInt::get(ResultType, 0), ResultSize); } @@ -577,7 +576,7 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, } SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { - IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); + IntTyBits = DL.getPointerTypeSizeInBits(V->getType()); Zero = APInt::getNullValue(IntTyBits); V = V->stripPointerCasts(); @@ -747,7 +746,7 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { SizeOffsetType PtrData = compute(GEP.getPointerOperand()); - APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0); + APInt Offset(IntTyBits, 0); if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset)) return unknown(); @@ -835,7 +834,7 @@ ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { // XXX - Are vectors of pointers possible here? - IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); + IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType())); Zero = ConstantInt::get(IntTy, 0); SizeOffsetEvalType Result = compute_(V); @@ -939,12 +938,12 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) { } Value *FirstArg = CS.getArgument(FnData->FstParam); - FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); + FirstArg = Builder.CreateZExt(FirstArg, IntTy); if (FnData->SndParam < 0) return std::make_pair(FirstArg, Zero); Value *SecondArg = CS.getArgument(FnData->SndParam); - SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); + SecondArg = Builder.CreateZExt(SecondArg, IntTy); Value *Size = Builder.CreateMul(FirstArg, SecondArg); return std::make_pair(Size, Zero); diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index ca4fb9371b8..d635afb0a29 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3495,7 +3495,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); // getSCEV(Base)->getType() has the same address space as Base->getType() // because SCEV::getType() preserves the address space. - Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); + Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP // instruction to its SCEV, because the Instruction may be guarded by control // flow and the no-overflow bits may not be valid for the expression in any @@ -3504,7 +3504,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; - const SCEV *TotalOffset = getZero(IntIdxTy); + const SCEV *TotalOffset = getZero(IntPtrTy); // The array size is unimportant. The first thing we do on CurTy is getting // its element type. Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); @@ -3514,7 +3514,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, // For a struct, add the member offset. ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); unsigned FieldNo = Index->getZExtValue(); - const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); + const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); // Add the field offset to the running total offset. TotalOffset = getAddExpr(TotalOffset, FieldOffset); @@ -3525,9 +3525,9 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP, // Update CurTy to its element type. CurTy = cast<SequentialType>(CurTy)->getElementType(); // For an array, add the element offset, explicitly scaled. - const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); + const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); // Getelementptr indices are signed. - IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); + IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); // Multiply the index by the element size to compute the element offset. const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); @@ -3786,7 +3786,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { /// Return a type with the same bitwidth as the given type and which represents /// how SCEV will treat the given type, for which isSCEVable must return -/// true. For pointer types, this is the pointer index sized integer type. +/// true. For pointer types, this is the pointer-sized integer type. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); @@ -3795,7 +3795,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { // The only other support type is pointer. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); - return getDataLayout().getIndexType(Ty); + return getDataLayout().getIntPtrType(Ty); } Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { @@ -5726,15 +5726,6 @@ ScalarEvolution::getRangeRef(const SCEV *S, assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && "generalize as needed!"); unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); - // If the pointer size is larger than the index size type, this can cause - // NS to be larger than BitWidth. So compensate for this. - if (U->getType()->isPointerTy()) { - unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); - int ptrIdxDiff = ptrSize - BitWidth; - if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) - NS -= ptrIdxDiff; - } - if (NS > 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), diff --git a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp index dc5d02aa3a3..bceec921188 100644 --- a/llvm/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Analysis/ScalarEvolutionExpander.cpp @@ -414,7 +414,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, // without the other. SplitAddRecs(Ops, Ty, SE); - Type *IntIdxTy = DL.getIndexType(PTy); + Type *IntPtrTy = DL.getIntPtrType(PTy); // Descend down the pointer's type and attempt to convert the other // operands into GEP indices, at each level. The first index in a GEP @@ -426,7 +426,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, // array indexing. SmallVector<const SCEV *, 8> ScaledOps; if (ElTy->isSized()) { - const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy); + const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); if (!ElSize->isZero()) { SmallVector<const SCEV *, 8> NewOps; for (const SCEV *Op : Ops) { diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index ab021aece2f..59e9d2eb928 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -90,7 +90,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { if (unsigned BitWidth = Ty->getScalarSizeInBits()) return BitWidth; - return DL.getPointerTypeSizeInBits(Ty); + return DL.getIndexTypeSizeInBits(Ty); } namespace { @@ -1137,7 +1137,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, // which fall through here. Type *ScalarTy = SrcTy->getScalarType(); SrcBitWidth = ScalarTy->isPointerTy() ? - Q.DL.getPointerTypeSizeInBits(ScalarTy) : + Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); assert(SrcBitWidth && "SrcBitWidth can't be zero"); @@ -1664,7 +1664,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, Type *ScalarTy = V->getType()->getScalarType(); unsigned ExpectedWidth = ScalarTy->isPointerTy() ? - Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); + Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); (void)BitWidth; (void)ExpectedWidth; @@ -2409,7 +2409,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, Type *ScalarTy = V->getType()->getScalarType(); unsigned TyBits = ScalarTy->isPointerTy() ? - Q.DL.getPointerTypeSizeInBits(ScalarTy) : + Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); unsigned Tmp, Tmp2; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index cb0450cbfa8..9ca51e72ec7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -9322,8 +9322,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { const GlobalValue *GV = nullptr; int64_t GVOffset = 0; if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { - unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); - KnownBits Known(PtrWidth); + unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType()); + KnownBits Known(IdxWidth); llvm::computeKnownBits(GV, Known, getDataLayout()); unsigned AlignBits = Known.countMinTrailingZeros(); unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 94e0740663c..5fe7a2e94b6 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -768,13 +768,13 @@ unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const { IntegerType *DataLayout::getIntPtrType(LLVMContext &C, unsigned AddressSpace) const { - return IntegerType::get(C, getPointerSizeInBits(AddressSpace)); + return IntegerType::get(C, getIndexSizeInBits(AddressSpace)); } Type *DataLayout::getIntPtrType(Type *Ty) const { assert(Ty->isPtrOrPtrVectorTy() && "Expected a pointer or pointer vector type."); - unsigned NumBits = getPointerTypeSizeInBits(Ty); + unsigned NumBits = getIndexTypeSizeInBits(Ty); IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) return VectorType::get(IntTy, VecTy->getNumElements()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index e42cd7555a0..5112fb1a6c3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1832,7 +1832,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { Type *Ty = CI.getType(); unsigned AS = CI.getPointerAddressSpace(); - if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS)) + if (Ty->getScalarSizeInBits() == DL.getIndexSizeInBits(AS)) return commonPointerCastTransforms(CI); Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 4abac988c2e..fd5a4682aa2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4930,7 +4930,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) { // Get scalar or pointer size. unsigned BitWidth = Ty->isIntOrIntVectorTy() ? Ty->getScalarSizeInBits() - : DL.getPointerTypeSizeInBits(Ty->getScalarType()); + : DL.getIndexTypeSizeInBits(Ty->getScalarType()); if (!BitWidth) return nullptr; diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index b1b87f81000..92783504ace 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -901,12 +901,12 @@ bool LoopIdiomRecognize::processLoopStridedStore( SCEVExpander Expander(*SE, *DL, "loop-idiom"); Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); - Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); + Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS); const SCEV *Start = Ev->getStart(); // Handle negative strided loops. if (NegStride) - Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); + Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE); // TODO: ideally we should still be able to generate memset if SCEV expander // is taught to generate the dependencies at the latest point. @@ -934,7 +934,7 @@ bool LoopIdiomRecognize::processLoopStridedStore( // Okay, everything looks good, insert the memset. const SCEV *NumBytesS = - getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); + getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE); // TODO: ideally we should still be able to generate memset if SCEV expander // is taught to generate the dependencies at the latest point. @@ -942,7 +942,7 @@ bool LoopIdiomRecognize::processLoopStridedStore( return false; Value *NumBytes = - Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); + Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); CallInst *NewCall; if (SplatValue) { @@ -955,7 +955,7 @@ bool LoopIdiomRecognize::processLoopStridedStore( Module *M = TheStore->getModule(); StringRef FuncName = "memset_pattern16"; FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), - Int8PtrTy, Int8PtrTy, IntIdxTy); + Int8PtrTy, Int8PtrTy, IntPtr); inferLibFuncAttributes(M, FuncName, *TLI); // Otherwise we should form a memset_pattern16. PatternValue is known to be @@ -1022,11 +1022,11 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *StrStart = StoreEv->getStart(); unsigned StrAS = SI->getPointerAddressSpace(); - Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); + Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS); // Handle negative strided loops. if (NegStride) - StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); + StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE); // Okay, we have a strided store "p[i]" of a loaded value. We can turn // this into a memcpy in the loop preheader now if we want. However, this @@ -1052,7 +1052,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, // Handle negative strided loops. if (NegStride) - LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); + LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE); // For a memcpy, we have to make sure that the input array is not being // mutated by the loop. @@ -1074,10 +1074,10 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, // Okay, everything is safe, we can transform this! const SCEV *NumBytesS = - getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); + getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE); Value *NumBytes = - Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); + Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); CallInst *NewCall = nullptr; // Check whether to generate an unordered atomic memcpy: diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index d5690a04f53..41f1dd951a8 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -2579,7 +2579,7 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, if (!NewTy->isPointerTy()) return; - unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); + unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy); if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { MDNode *NN = MDNode::get(OldLI.getContext(), None); NewLI.setMetadata(LLVMContext::MD_nonnull, NN); |