summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-11-01 09:14:31 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-11-01 09:14:31 +0000
commit5da3f0512e783ff333968cc9bc6ddd261ef540da (patch)
treeade57846afc60573291dc4ad8e5c497768def89d /llvm/lib/Transforms
parent7ec5085e0158276fe4d918f212ff9919c6d11f59 (diff)
downloadbcm5719-llvm-5da3f0512e783ff333968cc9bc6ddd261ef540da.tar.gz
bcm5719-llvm-5da3f0512e783ff333968cc9bc6ddd261ef540da.zip
Revert the majority of the next patch in the address space series:
r165941: Resubmit the changes to llvm core to update the functions to support different pointer sizes on a per address space basis. Despite this commit log, this change primarily changed stuff outside of VMCore, and those changes do not carry any tests for correctness (or even plausibility), and we have consistently found questionable or flat out incorrect cases in these changes. Most of them are probably correct, but we need to devise a system that makes it more clear when we have handled the address space concerns correctly, and ideally each pass that gets updated would receive an accompanying test case that exercises that pass specificaly w.r.t. alternate address spaces. However, from this commit, I have retained the new C API entry points. Those were an orthogonal change that probably should have been split apart, but they seem entirely good. In several places the changes were very obvious cleanups with no actual multiple address space code added; these I have not reverted when I spotted them. In a few other places there were merge conflicts due to a cleaner solution being implemented later, often not using address spaces at all. In those cases, I've preserved the new code which isn't address space dependent. This is part of my ongoing effort to clean out the partial address space code which carries high risk and low test coverage, and not likely to be finished before the 3.2 release looms closer. Duncan and I would both like to see the above issues addressed before we return to these changes. llvm-svn: 167222
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp10
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp9
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp3
6 files changed, 14 insertions, 25 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 55ee4661f09..bb59db8e7ba 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1296,16 +1296,15 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// If the source integer type is not the intptr_t type for this target, do a
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the
// cast to be exposed to other transforms.
- unsigned AS = CI.getAddressSpace();
if (TD) {
if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
- TD->getPointerSizeInBits(AS)) {
+ TD->getPointerSizeInBits()) {
Value *P = Builder->CreateTrunc(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new IntToPtrInst(P, CI.getType());
}
if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
- TD->getPointerSizeInBits(AS)) {
+ TD->getPointerSizeInBits()) {
Value *P = Builder->CreateZExt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new IntToPtrInst(P, CI.getType());
@@ -1372,14 +1371,13 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
// If the destination integer type is not the intptr_t type for this target,
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
// to be exposed to other transforms.
- unsigned AS = CI.getPointerAddressSpace();
if (TD) {
- if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
+ if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new TruncInst(P, CI.getType());
}
- if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
+ if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits()) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new ZExtInst(P, CI.getType());
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index ac8b6c052dc..8cb4a59cba9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -365,12 +365,11 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// order the state machines in complexity of the generated code.
Value *Idx = GEP->getOperand(2);
- unsigned AS = GEP->getPointerAddressSpace();
// If the index is larger than the pointer size of the target, truncate the
// index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds() &&
- Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
+ Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits())
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
// If the comparison is only true for one or two elements, emit direct
@@ -529,11 +528,10 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
}
}
- unsigned AS = cast<GetElementPtrInst>(GEP)->getPointerAddressSpace();
// Okay, we know we have a single variable index, which must be a
// pointer/array/vector index. If there is no offset, life is simple, return
// the index.
- unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
if (Offset == 0) {
// Cast to intptrty in case a truncation occurs. If an extension is needed,
// we don't need to bother extending: the extension won't affect where the
@@ -1554,8 +1552,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerSizeInBits(
- cast<PtrToIntInst>(LHSCI)->getPointerAddressSpace()) ==
+ TD->getPointerSizeInBits() ==
cast<IntegerType>(DestTy)->getBitWidth()) {
Value *RHSOp = 0;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 4abaeca0c5c..adf7f0f2003 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -704,7 +704,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
BL.reset(new BlackList(ClBlackListFile));
C = &(M.getContext());
- LongSize = TD->getPointerSizeInBits(0);
+ LongSize = TD->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
IntptrPtrTy = PointerType::get(IntptrTy, 0);
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 97fff9edd68..517657cf526 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -174,11 +174,10 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
// this width can be stored. If so, check to see whether we will end up
// actually reducing the number of stores used.
unsigned Bytes = unsigned(End-Start);
- unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace();
- unsigned NumPointerStores = Bytes/TD.getPointerSize(AS);
+ unsigned NumPointerStores = Bytes/TD.getPointerSize();
// Assume the remaining bytes if any are done a byte at a time.
- unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS);
+ unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
// If we will reduce the # stores (according to this heuristic), do the
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 26d071219b0..d95c855ce7d 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -444,7 +444,6 @@ protected:
bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
GEPOffset = Offset;
- unsigned int AS = GEPI.getPointerAddressSpace();
for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
@@ -474,7 +473,7 @@ protected:
continue;
}
- APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS));
+ APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
Index *= APInt(Index.getBitWidth(),
TD.getTypeAllocSize(GTI.getIndexedType()));
Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
@@ -2395,8 +2394,7 @@ private:
Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
assert(BeginOffset >= NewAllocaBeginOffset);
- unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace();
- APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset);
+ APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
}
@@ -2793,10 +2791,8 @@ private:
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(II);
- assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
- unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace();
// Compute the relative offset within the transfer.
- unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
: MTO.SourceBegin));
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 9729687a83e..a954d82c05b 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -806,8 +806,7 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD) {
assert(V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!");
- unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();
- unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64;
+ unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
unsigned TrailZ = KnownZero.countTrailingOnes();
OpenPOWER on IntegriCloud