diff options
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r-- | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 14 |
2 files changed, 13 insertions, 6 deletions
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 517657cf526..97fff9edd68 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -174,10 +174,11 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const { // this width can be stored. If so, check to see whether we will end up // actually reducing the number of stores used. unsigned Bytes = unsigned(End-Start); - unsigned NumPointerStores = Bytes/TD.getPointerSize(); + unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace(); + unsigned NumPointerStores = Bytes/TD.getPointerSize(AS); // Assume the remaining bytes if any are done a byte at a time. - unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); + unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS); // If we will reduce the # stores (according to this heuristic), do the // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 7d2ce098aae..3e84a91c1db 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -447,6 +447,7 @@ protected: bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { GEPOffset = Offset; + unsigned int AS = GEPI.getPointerAddressSpace(); for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); GTI != GTE; ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); @@ -476,7 +477,7 @@ protected: continue; } - APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits()); + APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS)); Index *= APInt(Index.getBitWidth(), TD.getTypeAllocSize(GTI.getIndexedType())); Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, @@ -1784,7 +1785,9 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, break; if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) { ElementTy = SeqTy->getElementType(); - Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0))); + Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits( + ElementTy->isPointerTy() ? + cast<PointerType>(ElementTy)->getAddressSpace(): 0), 0))); } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. @@ -2322,7 +2325,8 @@ private: Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { assert(BeginOffset >= NewAllocaBeginOffset); - APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset); + unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace(); + APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset); return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); } @@ -2677,8 +2681,10 @@ private: const AllocaPartitioning::MemTransferOffsets &MTO = P.getMemTransferOffsets(II); + assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!"); + unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace(); // Compute the relative offset within the transfer. - unsigned IntPtrWidth = TD.getPointerSizeInBits(); + unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin : MTO.SourceBegin)); |