diff options
| author | Johannes Doerfert <jdoerfert@anl.gov> | 2019-07-11 01:14:48 +0000 |
|---|---|---|
| committer | Johannes Doerfert <jdoerfert@anl.gov> | 2019-07-11 01:14:48 +0000 |
| commit | 3ed286a388836d0cdd2c8a19fb4c295a19935b47 (patch) | |
| tree | f37b2e311f7a7847475e5e222331353a8d0ee2bf /llvm/lib/IR | |
| parent | 24830ea7108197c7880939aa2e32db3aa4bc6284 (diff) | |
| download | bcm5719-llvm-3ed286a388836d0cdd2c8a19fb4c295a19935b47.tar.gz bcm5719-llvm-3ed286a388836d0cdd2c8a19fb4c295a19935b47.zip | |
Replace three "strip & accumulate" implementations with a single one
This patch replaces the three almost identical "strip & accumulate"
implementations for constant pointer offsets with a single one,
combining the respective functionalities. The old interfaces are kept
for now.
Differential Revision: https://reviews.llvm.org/D64468
llvm-svn: 365723
Diffstat (limited to 'llvm/lib/IR')
| -rw-r--r-- | llvm/lib/IR/Value.cpp | 48 |
1 files changed, 30 insertions, 18 deletions
diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index 9e0a43ce1e3..b7f77dc3043 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -555,13 +555,13 @@ const Value *Value::stripPointerCastsAndInvariantGroups() const { } const Value * -Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, - APInt &Offset) const { - if (!getType()->isPointerTy()) +Value::stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, + bool AllowNonInbounds) const { + if (!getType()->isPtrOrPtrVectorTy()) return this; - assert(Offset.getBitWidth() == DL.getIndexSizeInBits(cast<PointerType>( - getType())->getAddressSpace()) && + unsigned BitWidth = Offset.getBitWidth(); + assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) && "The offset bit width does not match the DL specification."); // Even though we don't look through PHI nodes, we could be called on an @@ -571,27 +571,39 @@ Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, const Value *V = this; do { if (auto *GEP = dyn_cast<GEPOperator>(V)) { - if (!GEP->isInBounds()) + // If in-bounds was requested, we do not strip non-in-bounds GEPs. + if (!AllowNonInbounds && !GEP->isInBounds()) return V; - APInt GEPOffset(Offset); + + // If one of the values we have visited is an addrspacecast, then + // the pointer type of this GEP may be different from the type + // of the Ptr parameter which was passed to this function. This + // means when we construct GEPOffset, we need to use the size + // of GEP's pointer type rather than the size of the original + // pointer type. + APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0); if (!GEP->accumulateConstantOffset(DL, GEPOffset)) return V; - Offset = GEPOffset; + + // Stop traversal if the pointer offset wouldn't fit in the bit-width + // provided by the Offset argument. This can happen due to AddrSpaceCast + // stripping. + if (GEPOffset.getMinSignedBits() > BitWidth) + return V; + + Offset += GEPOffset.sextOrTrunc(BitWidth); V = GEP->getPointerOperand(); - } else if (Operator::getOpcode(V) == Instruction::BitCast) { + } else if (Operator::getOpcode(V) == Instruction::BitCast || + Operator::getOpcode(V) == Instruction::AddrSpaceCast) { V = cast<Operator>(V)->getOperand(0); } else if (auto *GA = dyn_cast<GlobalAlias>(V)) { - V = GA->getAliasee(); - } else { - if (const auto *Call = dyn_cast<CallBase>(V)) - if (const Value *RV = Call->getReturnedArgOperand()) { + if (!GA->isInterposable()) + V = GA->getAliasee(); + } else if (const auto *Call = dyn_cast<CallBase>(V)) { + if (const Value *RV = Call->getReturnedArgOperand()) V = RV; - continue; - } - - return V; } - assert(V->getType()->isPointerTy() && "Unexpected operand type!"); + assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!"); } while (Visited.insert(V).second); return V; |

