summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/SROA.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-02-26 04:25:04 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-02-26 04:25:04 +0000
commitea27cf08d823dd8d01c8093d3c59f0686cbf31d0 (patch)
tree8944cb8f4f935801b7a4ab925d7af46f0aa48e73 /llvm/lib/Transforms/Scalar/SROA.cpp
parentc46b6eb302d0514d8bc1eed7d6cb407069170b56 (diff)
downloadbcm5719-llvm-ea27cf08d823dd8d01c8093d3c59f0686cbf31d0.tar.gz
bcm5719-llvm-ea27cf08d823dd8d01c8093d3c59f0686cbf31d0.zip
[SROA] Use the members for New{Begin,End}Offset in the rewrite helpers
rather than passing them as arguments. While I generally prefer actual arguments, in this case the readability loss is substantial. By using members we avoid repeatedly calculating the offsets, and once we're using members it is useful to ensure that those names *always* refer to the original-alloca-relative new offset for a rewritten slice. No functionality changed. Follow-up refactoring, all toward getting the address space patch merged. llvm-svn: 202228
Diffstat (limited to 'llvm/lib/Transforms/Scalar/SROA.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp22
1 files changed, 8 insertions, 14 deletions
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 3f0764fdd6d..2ecc517d485 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2169,8 +2169,7 @@ private:
Pass.DeadInsts.insert(I);
}
- Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ Value *rewriteVectorizedLoadInst() {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
@@ -2180,8 +2179,7 @@ private:
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
- Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
@@ -2205,9 +2203,9 @@ private:
bool IsPtrAdjusted = false;
Value *V;
if (VecTy) {
- V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset);
+ V = rewriteVectorizedLoadInst();
} else if (IntTy && LI.getType()->isIntegerTy()) {
- V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset);
+ V = rewriteIntegerLoad(LI);
} else if (NewBeginOffset == NewAllocaBeginOffset &&
canConvertValue(DL, NewAllocaTy, LI.getType())) {
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
@@ -2254,9 +2252,7 @@ private:
return !LI.isVolatile() && !IsPtrAdjusted;
}
- bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
- uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) {
if (V->getType() != VecTy) {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
@@ -2282,8 +2278,7 @@ private:
return true;
}
- bool rewriteIntegerStore(Value *V, StoreInst &SI,
- uint64_t NewBeginOffset, uint64_t NewEndOffset) {
+ bool rewriteIntegerStore(Value *V, StoreInst &SI) {
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
@@ -2329,10 +2324,9 @@ private:
}
if (VecTy)
- return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset,
- NewEndOffset);
+ return rewriteVectorizedStoreInst(V, SI, OldOp);
if (IntTy && V->getType()->isIntegerTy())
- return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset);
+ return rewriteIntegerStore(V, SI);
StoreInst *NewSI;
if (NewBeginOffset == NewAllocaBeginOffset &&
OpenPOWER on IntegriCloud