summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorGuozhi Wei <carrot@google.com>2016-10-25 20:43:42 +0000
committerGuozhi Wei <carrot@google.com>2016-10-25 20:43:42 +0000
commitae541f6a71ebffc96411fc6ce7437ad1cd03181c (patch)
treea39b9b89758a8da7c59c13ef8c86ce239b71c252 /llvm/lib/Transforms
parentb3f709e10f37225ae65c1d48c4623f6abc2cac1e (diff)
downloadbcm5719-llvm-ae541f6a71ebffc96411fc6ce7437ad1cd03181c.tar.gz
bcm5719-llvm-ae541f6a71ebffc96411fc6ce7437ad1cd03181c.zip
[InstCombine] Resubmit the combine of A->B->A BitCast and fix for pr27996
The original patch of the A->B->A BitCast optimization was reverted by r274094 because it may cause infinite loop inside compiler https://llvm.org/bugs/show_bug.cgi?id=27996. The problem is with following code xB = load (type B); xA = load (type A); +yA = (A)xB; B -> A +zAn = PHI[yA, xA]; PHI +zBn = (B)zAn; // A -> B store zAn; store zBn; optimizeBitCastFromPhi generates +zBn = (B)zAn; // A -> B and expects it will be combined with the following store instruction to another store zAn Unfortunately before combineStoreToValueType is called on the store instruction, optimizeBitCastFromPhi is called on the new BitCast again, and this pattern repeats indefinitely. optimizeBitCastFromPhi only generates BitCast for load/store instructions, only the BitCast before store can cause the reexecution of optimizeBitCastFromPhi, and BitCast before store can easily be handled by InstCombineLoadStoreAlloca.cpp. So the solution to the problem is if all users of a CI are store instructions, we should not do optimizeBitCastFromPhi on it. Then optimizeBitCastFromPhi will not be called on the new BitCast instructions. Differential Revision: https://reviews.llvm.org/D23896 llvm-svn: 285116
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp127
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineInternal.h1
2 files changed, 128 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index f05c714b661..07a373d6395 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/PatternMatch.h"
@@ -1779,6 +1780,127 @@ static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
}
+/// Check if all users of CI are StoreInsts.
+static bool hasStoreUsersOnly(CastInst &CI) {
+ for (User *U : CI.users()) {
+ if (!isa<StoreInst>(U))
+ return false;
+ }
+ return true;
+}
+
+/// This function handles following case
+///
+/// A -> B cast
+/// PHI
+/// B -> A cast
+///
+/// All the related PHI nodes can be replaced by new PHI nodes with type A.
+/// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
+Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
+ // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
+ if (hasStoreUsersOnly(CI))
+ return nullptr;
+
+ Value *Src = CI.getOperand(0);
+ Type *SrcTy = Src->getType(); // Type B
+ Type *DestTy = CI.getType(); // Type A
+
+ SmallVector<PHINode *, 4> PhiWorklist;
+ SmallSetVector<PHINode *, 4> OldPhiNodes;
+
+ // Find all of the A->B casts and PHI nodes.
+ // We need to inpect all related PHI nodes, but PHIs can be cyclic, so
+ // OldPhiNodes is used to track all known PHI nodes, before adding a new
+ // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
+ PhiWorklist.push_back(PN);
+ OldPhiNodes.insert(PN);
+ while (!PhiWorklist.empty()) {
+ auto *OldPN = PhiWorklist.pop_back_val();
+ for (Value *IncValue : OldPN->incoming_values()) {
+ if (isa<Constant>(IncValue))
+ continue;
+
+ if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
+ // If there is a sequence of one or more load instructions, each loaded
+ // value is used as address of later load instruction, bitcast is
+ // necessary to change the value type, don't optimize it. For
+ // simplicity we give up if the load address comes from another load.
+ Value *Addr = LI->getOperand(0);
+ if (Addr == &CI || isa<LoadInst>(Addr))
+ return nullptr;
+ if (LI->hasOneUse() && LI->isSimple())
+ continue;
+ // If a LoadInst has more than one use, changing the type of loaded
+ // value may create another bitcast.
+ return nullptr;
+ }
+
+ if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
+ if (OldPhiNodes.insert(PNode))
+ PhiWorklist.push_back(PNode);
+ continue;
+ }
+
+ auto *BCI = dyn_cast<BitCastInst>(IncValue);
+ // We can't handle other instructions.
+ if (!BCI)
+ return nullptr;
+
+ // Verify it's a A->B cast.
+ Type *TyA = BCI->getOperand(0)->getType();
+ Type *TyB = BCI->getType();
+ if (TyA != DestTy || TyB != SrcTy)
+ return nullptr;
+ }
+ }
+
+ // For each old PHI node, create a corresponding new PHI node with a type A.
+ SmallDenseMap<PHINode *, PHINode *> NewPNodes;
+ for (auto *OldPN : OldPhiNodes) {
+ Builder->SetInsertPoint(OldPN);
+ PHINode *NewPN = Builder->CreatePHI(DestTy, OldPN->getNumOperands());
+ NewPNodes[OldPN] = NewPN;
+ }
+
+ // Fill in the operands of new PHI nodes.
+ for (auto *OldPN : OldPhiNodes) {
+ PHINode *NewPN = NewPNodes[OldPN];
+ for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
+ Value *V = OldPN->getOperand(j);
+ Value *NewV = nullptr;
+ if (auto *C = dyn_cast<Constant>(V)) {
+ NewV = ConstantExpr::getBitCast(C, DestTy);
+ } else if (auto *LI = dyn_cast<LoadInst>(V)) {
+ Builder->SetInsertPoint(LI->getNextNode());
+ NewV = Builder->CreateBitCast(LI, DestTy);
+ Worklist.Add(LI);
+ } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
+ NewV = BCI->getOperand(0);
+ } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
+ NewV = NewPNodes[PrevPN];
+ }
+ assert(NewV);
+ NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
+ }
+ }
+
+ // If there is a store with type B, change it to type A.
+ for (User *U : PN->users()) {
+ auto *SI = dyn_cast<StoreInst>(U);
+ if (SI && SI->isSimple() && SI->getOperand(0) == PN) {
+ Builder->SetInsertPoint(SI);
+ auto *NewBC =
+ cast<BitCastInst>(Builder->CreateBitCast(NewPNodes[PN], SrcTy));
+ SI->setOperand(0, NewBC);
+ Worklist.Add(SI);
+ assert(hasStoreUsersOnly(*NewBC));
+ }
+ }
+
+ return replaceInstUsesWith(CI, NewPNodes[PN]);
+}
+
Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If the operands are integer typed then apply the integer transforms,
// otherwise just apply the common ones.
@@ -1902,6 +2024,11 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
}
+ // Handle the A->B->A cast, and there is an intervening PHI node.
+ if (PHINode *PN = dyn_cast<PHINode>(Src))
+ if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
+ return I;
+
if (Instruction *I = canonicalizeBitCastExtElt(CI, *this, DL))
return I;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index dca24d2eb91..e85b3a58af5 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -362,6 +362,7 @@ private:
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
+ Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
/// Determine if a pair of casts can be replaced by a single cast.
///
OpenPOWER on IntegriCloud