summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Analysis
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-11-01 09:14:31 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-11-01 09:14:31 +0000
commit5da3f0512e783ff333968cc9bc6ddd261ef540da (patch)
treeade57846afc60573291dc4ad8e5c497768def89d /llvm/lib/Analysis
parent7ec5085e0158276fe4d918f212ff9919c6d11f59 (diff)
downloadbcm5719-llvm-5da3f0512e783ff333968cc9bc6ddd261ef540da.tar.gz
bcm5719-llvm-5da3f0512e783ff333968cc9bc6ddd261ef540da.zip
Revert the majority of the next patch in the address space series:
r165941: Resubmit the changes to llvm core to update the functions to support different pointer sizes on a per address space basis. Despite this commit log, this change primarily changed stuff outside of VMCore, and those changes do not carry any tests for correctness (or even plausibility), and we have consistently found questionable or flat out incorrect cases in these changes. Most of them are probably correct, but we need to devise a system that makes it more clear when we have handled the address space concerns correctly, and ideally each pass that gets updated would receive an accompanying test case that exercises that pass specificaly w.r.t. alternate address spaces. However, from this commit, I have retained the new C API entry points. Those were an orthogonal change that probably should have been split apart, but they seem entirely good. In several places the changes were very obvious cleanups with no actual multiple address space code added; these I have not reverted when I spotted them. In a few other places there were merge conflicts due to a cleaner solution being implemented later, often not using address spaces at all. In those cases, I've preserved the new code which isn't address space dependent. This is part of my ongoing effort to clean out the partial address space code which carries high risk and low test coverage, and not likely to be finished before the 3.2 release looms closer. Duncan and I would both like to see the above issues addressed before we return to these changes. llvm-svn: 167222
Diffstat (limited to 'llvm/lib/Analysis')
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp7
-rw-r--r--llvm/lib/Analysis/CodeMetrics.cpp6
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp12
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp15
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp11
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp6
6 files changed, 20 insertions, 37 deletions
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 36903f94e25..263bfc031fc 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -286,8 +286,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
V = GEPOp->getOperand(0);
continue;
}
-
- unsigned AS = GEPOp->getPointerAddressSpace();
+
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp);
for (User::const_op_iterator I = GEPOp->op_begin()+1,
@@ -316,7 +315,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
- if (TD->getPointerSizeInBits(AS) > Width)
+ if (TD->getPointerSizeInBits() > Width)
Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
@@ -345,7 +344,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// Make sure that we have a scale that makes sense for this target's
// pointer size.
- if (unsigned ShiftBits = 64-TD->getPointerSizeInBits(AS)) {
+ if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}
diff --git a/llvm/lib/Analysis/CodeMetrics.cpp b/llvm/lib/Analysis/CodeMetrics.cpp
index d6692684960..651a54be1b9 100644
--- a/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/llvm/lib/Analysis/CodeMetrics.cpp
@@ -91,16 +91,14 @@ bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
// which doesn't contain values outside the range of a pointer.
if (isa<IntToPtrInst>(CI) && TD &&
TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
- Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits(
- cast<IntToPtrInst>(CI)->getAddressSpace()))
+ Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits())
return true;
// A ptrtoint cast is free so long as the result is large enough to store
// the pointer, and a legal integer type.
if (isa<PtrToIntInst>(CI) && TD &&
TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
- Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits(
- cast<PtrToIntInst>(CI)->getPointerAddressSpace()))
+ Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits())
return true;
// trunc to a native type is free (assuming the target has compare and
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 146897ad675..b7bf044a368 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -916,11 +916,10 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
if (TD && CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
- unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
- if (TD->getPointerSizeInBits(AS) < InWidth) {
+ if (TD->getPointerSizeInBits() < InWidth) {
Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
- TD->getPointerSizeInBits(AS)));
+ TD->getPointerSizeInBits()));
Input = ConstantExpr::getAnd(Input, Mask);
}
// Do a zext or trunc to get to the dest size.
@@ -933,10 +932,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
// the int size is >= the ptr size. This requires knowing the width of a
// pointer, so it can't be done in ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
- if (TD && CE->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerSizeInBits(
- cast<PointerType>(CE->getOperand(0)->getType())->getAddressSpace())
- <= CE->getType()->getScalarSizeInBits())
+ if (TD &&
+ TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() &&
+ CE->getOpcode() == Instruction::PtrToInt)
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index 95e58022ca1..5f51f775f14 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -243,8 +243,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
if (!TD)
return false;
- unsigned AS = GEP.getPointerAddressSpace();
- unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = TD->getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@@ -392,8 +391,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
- unsigned AS = I.getPointerAddressSpace();
- if (TD && IntegerSize >= TD->getPointerSizeInBits(AS)) {
+ if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first)
@@ -427,8 +425,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
- unsigned AS = I.getAddressSpace();
- if (TD && IntegerSize <= TD->getPointerSizeInBits(AS)) {
+ if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset;
@@ -763,8 +760,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
if (!TD || !V->getType()->isPointerTy())
return 0;
- unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();;
- unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = TD->getPointerSizeInBits();
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
@@ -828,8 +824,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
- unsigned AS = PTy->getAddressSpace();
- unsigned PointerSize = TD->getPointerSizeInBits(AS);
+ unsigned PointerSize = TD->getPointerSizeInBits();
// Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 8e326122fa5..b3d62487fc1 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -666,8 +666,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// 'Offset' APInt must be the bitwidth of the target's pointer size.
static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) {
- unsigned AS = GEP->getPointerAddressSpace();
- unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
gep_type_iterator GTI = gep_type_begin(GEP);
@@ -697,14 +696,12 @@ static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
/// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
-/// FIXME: This function also exists in InlineCost.cpp.
static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) {
if (!V->getType()->isPointerTy())
return 0;
- unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();;
- unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
@@ -1880,9 +1877,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
- Q.TD->getPointerSizeInBits(
- cast<PtrToIntInst>(LI)->getPointerAddressSpace()) ==
- DstTy->getPrimitiveSizeInBits()) {
+ Q.TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 2c3ac0e0756..3beb373dc5c 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -40,8 +40,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
- return TD ?
- TD->getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace()) : 0;
+ return TD ? TD->getPointerSizeInBits() : 0;
}
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
@@ -1620,8 +1619,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
// Re-sign extend from the pointer size if needed to get overflow edge cases
// right.
- unsigned AS = GEP->getPointerAddressSpace();
- unsigned PtrSize = TD.getPointerSizeInBits(AS);
+ unsigned PtrSize = TD.getPointerSizeInBits();
if (PtrSize < 64)
Offset = SignExtend64(Offset, PtrSize);
OpenPOWER on IntegriCloud