summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp110
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h119
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/CMakeLists.txt11
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/LLVMBuild.txt22
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp401
-rw-r--r--llvm/lib/Transforms/CMakeLists.txt1
-rw-r--r--llvm/lib/Transforms/IPO/LLVMBuild.txt2
-rw-r--r--llvm/lib/Transforms/IPO/PassManagerBuilder.cpp4
-rw-r--r--llvm/lib/Transforms/LLVMBuild.txt2
-rw-r--r--llvm/lib/Transforms/Scalar/LLVMBuild.txt2
10 files changed, 671 insertions, 3 deletions
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
new file mode 100644
index 00000000000..b7364d2ecdf
--- /dev/null
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -0,0 +1,110 @@
+//===- AggressiveInstCombine.cpp ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the aggressive expression pattern combiner classes.
+// Currently, it handles expression patterns for:
+// * Truncate instruction
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
+#include "AggressiveInstCombineInternal.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Pass.h"
+#include "llvm/Transforms/Scalar.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aggressive-instcombine"
+
+namespace {
+/// Contains expression pattern combiner logic.
+/// This class provides both the logic to combine expression patterns and
+/// combine them. It differs from InstCombiner class in that each pattern
+/// combiner runs only once as opposed to InstCombine's multi-iteration,
+/// which allows pattern combiner to have higher complexity than the O(1)
+/// required by the instruction combiner.
+class AggressiveInstCombinerLegacyPass : public FunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ AggressiveInstCombinerLegacyPass() : FunctionPass(ID) {
+ initializeAggressiveInstCombinerLegacyPassPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// Run all expression pattern optimizations on the given /p F function.
+ ///
+ /// \param F function to optimize.
+ /// \returns true if the IR is changed.
+ bool runOnFunction(Function &F) override;
+};
+} // namespace
+
+void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
+ AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addPreserved<AAResultsWrapperPass>();
+ AU.addPreserved<BasicAAWrapperPass>();
+ AU.addPreserved<GlobalsAAWrapperPass>();
+}
+
+bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
+ auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ auto &DL = F.getParent()->getDataLayout();
+
+ bool MadeIRChange = false;
+
+ // Handle TruncInst patterns
+ TruncInstCombine TIC(TLI, DL);
+ MadeIRChange |= TIC.run(F);
+
+ // TODO: add more patterns to handle...
+
+ return MadeIRChange;
+}
+
+PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
+ auto &DL = F.getParent()->getDataLayout();
+ bool MadeIRChange = false;
+
+ // Handle TruncInst patterns
+ TruncInstCombine TIC(TLI, DL);
+ MadeIRChange |= TIC.run(F);
+ if (!MadeIRChange)
+ // No changes, all analyses are preserved.
+ return PreservedAnalyses::all();
+
+ // Mark all the analyses that instcombine updates as preserved.
+ PreservedAnalyses PA;
+ PA.preserveSet<CFGAnalyses>();
+ PA.preserve<AAManager>();
+ PA.preserve<GlobalsAA>();
+ return PA;
+}
+
+char AggressiveInstCombinerLegacyPass::ID = 0;
+INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
+ "aggressive-instcombine",
+ "Combine pattern based expressions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",
+ "Combine pattern based expressions", false, false)
+
+FunctionPass *llvm::createAggressiveInstCombinerPass() {
+ return new AggressiveInstCombinerLegacyPass();
+}
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
new file mode 100644
index 00000000000..63255dd28f8
--- /dev/null
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
@@ -0,0 +1,119 @@
+//===- AggressiveInstCombineInternal.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the instruction pattern combiner classes.
+// Currently, it handles pattern expressions for:
+// * Truncate instruction
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Pass.h"
+#include "llvm/Transforms/Scalar.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// TruncInstCombine - looks for expression dags dominated by trunc instructions
+// and for each eligible dag, it will create a reduced bit-width expression and
+// replace the old expression with this new one and remove the old one.
+// Eligible expression dag is such that:
+// 1. Contains only supported instructions.
+// 2. Supported leaves: ZExtInst, SExtInst, TruncInst and Constant value.
+// 3. Can be evaluated into type with reduced legal bit-width (or Trunc type).
+// 4. All instructions in the dag must not have users outside the dag.
+// Only exception is for {ZExt, SExt}Inst with operand type equal to the
+// new reduced type chosen in (3).
+//
+// The motivation for this optimization is that evaluating and expression using
+// smaller bit-width is preferable, especially for vectorization where we can
+// fit more values in one vectorized instruction. In addition, this optimization
+// may decrease the number of cast instructions, but will not increase it.
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+ class DataLayout;
+ class TargetLibraryInfo;
+
+class TruncInstCombine {
+ TargetLibraryInfo &TLI;
+ const DataLayout &DL;
+
+ /// List of all TruncInst instructions to be processed.
+ SmallVector<TruncInst *, 4> Worklist;
+
+ /// Current processed TruncInst instruction.
+ TruncInst *CurrentTruncInst;
+
+ /// Information per each instruction in the expression dag.
+ struct Info {
+ /// Number of LSBs that are needed to generate a valid expression.
+ unsigned ValidBitWidth = 0;
+ /// Minimum number of LSBs needed to generate the ValidBitWidth.
+ unsigned MinBitWidth = 0;
+ /// The reduced value generated to replace the old instruction.
+ Value *NewValue = nullptr;
+ };
+ /// An ordered map representing expression dag post-dominated by current
+ /// processed TruncInst. It maps each instruction in the dag to its Info
+ /// structure. The map is ordered such that each instruction appears before
+ /// all other instructions in the dag that uses it.
+ MapVector<Instruction *, Info> InstInfoMap;
+
+public:
+ TruncInstCombine(TargetLibraryInfo &TLI, const DataLayout &DL)
+ : TLI(TLI), DL(DL), CurrentTruncInst(nullptr) {}
+
+ /// Perform TruncInst pattern optimization on given function.
+ bool run(Function &F);
+
+private:
+ /// Build expression dag dominated by the /p CurrentTruncInst and append it to
+ /// the InstInfoMap container.
+ ///
+ /// \return true only if succeed to generate an eligible sub expression dag.
+ bool buildTruncExpressionDag();
+
+ /// Calculate the minimal allowed bit-width of the chain ending with the
+ /// currently visited truncate's operand.
+ ///
+ /// \return minimum number of bits to which the chain ending with the
+ /// truncate's operand can be shrunk to.
+ unsigned getMinBitWidth();
+
+ /// Build an expression dag dominated by the current processed TruncInst and
+ /// Check if it is eligible to be reduced to a smaller type.
+ ///
+ /// \return the scalar version of the new type to be used for the reduced
+ /// expression dag, or nullptr if the expression dag is not eligible
+ /// to be reduced.
+ Type *getBestTruncatedType();
+
+ /// Given a \p V value and a \p SclTy scalar type return the generated reduced
+ /// value of \p V based on the type \p SclTy.
+ ///
+ /// \param V value to be reduced.
+ /// \param SclTy scalar version of new type to reduce to.
+ /// \return the new reduced value.
+ Value *getReducedOperand(Value *V, Type *SclTy);
+
+ /// Create a new expression dag using the reduced /p SclTy type and replace
+ /// the old expression dag with it. Also erase all instructions in the old
+ /// dag, except those that are still needed outside the dag.
+ ///
+ /// \param SclTy scalar version of new type to reduce expression dag into.
+ void ReduceExpressionDag(Type *SclTy);
+};
+} // end namespace llvm.
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/CMakeLists.txt b/llvm/lib/Transforms/AggressiveInstCombine/CMakeLists.txt
new file mode 100644
index 00000000000..386314801e3
--- /dev/null
+++ b/llvm/lib/Transforms/AggressiveInstCombine/CMakeLists.txt
@@ -0,0 +1,11 @@
+add_llvm_library(LLVMAggressiveInstCombine
+ AggressiveInstCombine.cpp
+ TruncInstCombine.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/AggressiveInstCombine
+
+ DEPENDS
+ intrinsics_gen
+ )
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/LLVMBuild.txt b/llvm/lib/Transforms/AggressiveInstCombine/LLVMBuild.txt
new file mode 100644
index 00000000000..c05844f33de
--- /dev/null
+++ b/llvm/lib/Transforms/AggressiveInstCombine/LLVMBuild.txt
@@ -0,0 +1,22 @@
+;===- ./lib/Transforms/AggressiveInstCombine/LLVMBuild.txt -----*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = AggressiveInstCombine
+parent = Transforms
+required_libraries = Analysis Core Support TransformUtils
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
new file mode 100644
index 00000000000..55dcb539dbb
--- /dev/null
+++ b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp
@@ -0,0 +1,401 @@
+//===- TruncInstCombine.cpp -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TruncInstCombine - looks for expression dags post-dominated by TruncInst and
+// for each eligible dag, it will create a reduced bit-width expression, replace
+// the old expression with this new one and remove the old expression.
+// Eligible expression dag is such that:
+// 1. Contains only supported instructions.
+// 2. Supported leaves: ZExtInst, SExtInst, TruncInst and Constant value.
+// 3. Can be evaluated into type with reduced legal bit-width.
+// 4. All instructions in the dag must not have users outside the dag.
+// The only exception is for {ZExt, SExt}Inst with operand type equal to
+// the new reduced type evaluated in (3).
+//
+// The motivation for this optimization is that evaluating and expression using
+// smaller bit-width is preferable, especially for vectorization where we can
+// fit more values in one vectorized instruction. In addition, this optimization
+// may decrease the number of cast instructions, but will not increase it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AggressiveInstCombineInternal.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IRBuilder.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "aggressive-instcombine"
+
+/// Given an instruction and a container, it fills all the relevant operands of
+/// that instruction, with respect to the Trunc expression dag optimizaton.
+static void getRelevantOperands(Instruction *I, SmallVectorImpl<Value *> &Ops) {
+ unsigned Opc = I->getOpcode();
+ switch (Opc) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ // These CastInst are considered leaves of the evaluated expression, thus,
+ // their operands are not relevent.
+ break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ Ops.push_back(I->getOperand(0));
+ Ops.push_back(I->getOperand(1));
+ break;
+ default:
+ llvm_unreachable("Unreachable!");
+ }
+}
+
+bool TruncInstCombine::buildTruncExpressionDag() {
+ SmallVector<Value *, 8> Worklist;
+ SmallVector<Instruction *, 8> Stack;
+ // Clear old expression dag.
+ InstInfoMap.clear();
+
+ Worklist.push_back(CurrentTruncInst->getOperand(0));
+
+ while (!Worklist.empty()) {
+ Value *Curr = Worklist.back();
+
+ if (isa<Constant>(Curr)) {
+ Worklist.pop_back();
+ continue;
+ }
+
+ auto *I = dyn_cast<Instruction>(Curr);
+ if (!I)
+ return false;
+
+ if (!Stack.empty() && Stack.back() == I) {
+ // Already handled all instruction operands, can remove it from both the
+ // Worklist and the Stack, and add it to the instruction info map.
+ Worklist.pop_back();
+ Stack.pop_back();
+ // Insert I to the Info map.
+ InstInfoMap.insert(std::make_pair(I, Info()));
+ continue;
+ }
+
+ if (InstInfoMap.count(I)) {
+ Worklist.pop_back();
+ continue;
+ }
+
+ // Add the instruction to the stack before start handling its operands.
+ Stack.push_back(I);
+
+ unsigned Opc = I->getOpcode();
+ switch (Opc) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ // trunc(trunc(x)) -> trunc(x)
+ // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
+ // trunc(ext(x)) -> trunc(x) if the source type is larger than the new
+ // dest
+ break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ SmallVector<Value *, 2> Operands;
+ getRelevantOperands(I, Operands);
+ for (Value *Operand : Operands)
+ Worklist.push_back(Operand);
+ break;
+ }
+ default:
+ // TODO: Can handle more cases here:
+ // 1. select, shufflevector, extractelement, insertelement
+ // 2. udiv, urem
+ // 3. shl, lshr, ashr
+ // 4. phi node(and loop handling)
+ // ...
+ return false;
+ }
+ }
+ return true;
+}
+
+unsigned TruncInstCombine::getMinBitWidth() {
+ SmallVector<Value *, 8> Worklist;
+ SmallVector<Instruction *, 8> Stack;
+
+ Value *Src = CurrentTruncInst->getOperand(0);
+ Type *DstTy = CurrentTruncInst->getType();
+ unsigned TruncBitWidth = DstTy->getScalarSizeInBits();
+ unsigned OrigBitWidth =
+ CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
+
+ if (isa<Constant>(Src))
+ return TruncBitWidth;
+
+ Worklist.push_back(Src);
+ InstInfoMap[cast<Instruction>(Src)].ValidBitWidth = TruncBitWidth;
+
+ while (!Worklist.empty()) {
+ Value *Curr = Worklist.back();
+
+ if (isa<Constant>(Curr)) {
+ Worklist.pop_back();
+ continue;
+ }
+
+ // Otherwise, it must be an instruction.
+ auto *I = cast<Instruction>(Curr);
+
+ auto &Info = InstInfoMap[I];
+
+ SmallVector<Value *, 2> Operands;
+ getRelevantOperands(I, Operands);
+
+ if (!Stack.empty() && Stack.back() == I) {
+ // Already handled all instruction operands, can remove it from both, the
+ // Worklist and the Stack, and update MinBitWidth.
+ Worklist.pop_back();
+ Stack.pop_back();
+ for (auto *Operand : Operands)
+ if (auto *IOp = dyn_cast<Instruction>(Operand))
+ Info.MinBitWidth =
+ std::max(Info.MinBitWidth, InstInfoMap[IOp].MinBitWidth);
+ continue;
+ }
+
+ // Add the instruction to the stack before start handling its operands.
+ Stack.push_back(I);
+ unsigned ValidBitWidth = Info.ValidBitWidth;
+
+ // Update minimum bit-width before handling its operands. This is required
+ // when the instruction is part of a loop.
+ Info.MinBitWidth = std::max(Info.MinBitWidth, Info.ValidBitWidth);
+
+ for (auto *Operand : Operands)
+ if (auto *IOp = dyn_cast<Instruction>(Operand)) {
+ // If we already calculated the minimum bit-width for this valid
+ // bit-width, or for a smaller valid bit-width, then just keep the
+ // answer we already calculated.
+ unsigned IOpBitwidth = InstInfoMap.lookup(IOp).ValidBitWidth;
+ if (IOpBitwidth >= ValidBitWidth)
+ continue;
+ InstInfoMap[IOp].ValidBitWidth = std::max(ValidBitWidth, IOpBitwidth);
+ Worklist.push_back(IOp);
+ }
+ }
+ unsigned MinBitWidth = InstInfoMap.lookup(cast<Instruction>(Src)).MinBitWidth;
+ assert(MinBitWidth >= TruncBitWidth);
+
+ if (MinBitWidth > TruncBitWidth) {
+ // In this case reducing expression with vector type might generate a new
+ // vector type, which is not preferable as it might result in generating
+ // sub-optimal code.
+ if (DstTy->isVectorTy())
+ return OrigBitWidth;
+ // Use the smallest integer type in the range [MinBitWidth, OrigBitWidth).
+ Type *Ty = DL.getSmallestLegalIntType(DstTy->getContext(), MinBitWidth);
+ // Update minimum bit-width with the new destination type bit-width if
+ // succeeded to find such, otherwise, with original bit-width.
+ MinBitWidth = Ty ? Ty->getScalarSizeInBits() : OrigBitWidth;
+ } else { // MinBitWidth == TruncBitWidth
+ // In this case the expression can be evaluated with the trunc instruction
+ // destination type, and trunc instruction can be omitted. However, we
+ // should not perform the evaluation if the original type is a legal scalar
+ // type and the target type is illegal.
+ bool FromLegal = MinBitWidth == 1 || DL.isLegalInteger(OrigBitWidth);
+ bool ToLegal = MinBitWidth == 1 || DL.isLegalInteger(MinBitWidth);
+ if (!DstTy->isVectorTy() && FromLegal && !ToLegal)
+ return OrigBitWidth;
+ }
+ return MinBitWidth;
+}
+
+Type *TruncInstCombine::getBestTruncatedType() {
+ if (!buildTruncExpressionDag())
+ return nullptr;
+
+ // We don't want to duplicate instructions, which isn't profitable. Thus, we
+ // can't shrink something that has multiple users, unless all users are
+ // post-dominated by the trunc instruction, i.e., were visited during the
+ // expression evaluation.
+ unsigned DesiredBitWidth = 0;
+ for (auto Itr : InstInfoMap) {
+ Instruction *I = Itr.first;
+ if (I->hasOneUse())
+ continue;
+ bool IsExtInst = (isa<ZExtInst>(I) || isa<SExtInst>(I));
+ for (auto *U : I->users())
+ if (auto *UI = dyn_cast<Instruction>(U))
+ if (UI != CurrentTruncInst && !InstInfoMap.count(UI)) {
+ if (!IsExtInst)
+ return nullptr;
+ // If this is an extension from the dest type, we can eliminate it,
+ // even if it has multiple users. Thus, update the DesiredBitWidth and
+ // validate all extension instructions agrees on same DesiredBitWidth.
+ unsigned ExtInstBitWidth =
+ I->getOperand(0)->getType()->getScalarSizeInBits();
+ if (DesiredBitWidth && DesiredBitWidth != ExtInstBitWidth)
+ return nullptr;
+ DesiredBitWidth = ExtInstBitWidth;
+ }
+ }
+
+ unsigned OrigBitWidth =
+ CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
+
+ // Calculate minimum allowed bit-width allowed for shrinking the currently
+ // visited truncate's operand.
+ unsigned MinBitWidth = getMinBitWidth();
+
+ // Check that we can shrink to smaller bit-width than original one and that
+ // it is similar to the DesiredBitWidth is such exists.
+ if (MinBitWidth >= OrigBitWidth ||
+ (DesiredBitWidth && DesiredBitWidth != MinBitWidth))
+ return nullptr;
+
+ return IntegerType::get(CurrentTruncInst->getContext(), MinBitWidth);
+}
+
+/// Given a reduced scalar type \p Ty and a \p V value, return a reduced type
+/// for \p V, according to its type, if it vector type, return the vector
+/// version of \p Ty, otherwise return \p Ty.
+static Type *getReducedType(Value *V, Type *Ty) {
+ assert(Ty && !Ty->isVectorTy() && "Expect Scalar Type");
+ if (auto *VTy = dyn_cast<VectorType>(V->getType()))
+ return VectorType::get(Ty, VTy->getNumElements());
+ return Ty;
+}
+
+Value *TruncInstCombine::getReducedOperand(Value *V, Type *SclTy) {
+ Type *Ty = getReducedType(V, SclTy);
+ if (auto *C = dyn_cast<Constant>(V)) {
+ C = ConstantExpr::getIntegerCast(C, Ty, false);
+ // If we got a constantexpr back, try to simplify it with DL info.
+ if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI))
+ C = FoldedC;
+ return C;
+ }
+
+ auto *I = cast<Instruction>(V);
+ Info Entry = InstInfoMap.lookup(I);
+ assert(Entry.NewValue);
+ return Entry.NewValue;
+}
+
+void TruncInstCombine::ReduceExpressionDag(Type *SclTy) {
+ for (auto &Itr : InstInfoMap) { // Forward
+ Instruction *I = Itr.first;
+
+ assert(!InstInfoMap.lookup(I).NewValue && "Instruction has been evaluated");
+
+ IRBuilder<> Builder(I);
+ Value *Res = nullptr;
+ unsigned Opc = I->getOpcode();
+ switch (Opc) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt: {
+ Type *Ty = getReducedType(I, SclTy);
+ // If the source type of the cast is the type we're trying for then we can
+ // just return the source. There's no need to insert it because it is not
+ // new.
+ if (I->getOperand(0)->getType() == Ty) {
+ InstInfoMap[I].NewValue = I->getOperand(0);
+ continue;
+ }
+ // Otherwise, must be the same type of cast, so just reinsert a new one.
+ // This also handles the case of zext(trunc(x)) -> zext(x).
+ Res = Builder.CreateIntCast(I->getOperand(0), Ty,
+ Opc == Instruction::SExt);
+
+ // Update Worklist entries with new value if needed.
+ if (auto *NewCI = dyn_cast<TruncInst>(Res)) {
+ auto Entry = find(Worklist, I);
+ if (Entry != Worklist.end())
+ *Entry = NewCI;
+ }
+ break;
+ }
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ Value *LHS = getReducedOperand(I->getOperand(0), SclTy);
+ Value *RHS = getReducedOperand(I->getOperand(1), SclTy);
+ Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
+ break;
+ }
+ default:
+ llvm_unreachable("Unhandled instruction");
+ }
+
+ InstInfoMap[I].NewValue = Res;
+ cast<Instruction>(Res)->takeName(I);
+ }
+
+ Value *Res = getReducedOperand(CurrentTruncInst->getOperand(0), SclTy);
+ Type *DstTy = CurrentTruncInst->getType();
+ if (Res->getType() != DstTy) {
+ IRBuilder<> Builder(CurrentTruncInst);
+ Res = Builder.CreateIntCast(Res, DstTy, false);
+ cast<Instruction>(Res)->takeName(CurrentTruncInst);
+ }
+ CurrentTruncInst->replaceAllUsesWith(Res);
+
+ // Erase old expression dag, which was replaced by the reduced expression dag.
+ // We iterate backward, which means we visit the instruction before we visit
+ // any of its operands, this way, when we get to the operand, we already
+ // removed the instructions (from the expression dag) that uses it.
+ CurrentTruncInst->eraseFromParent();
+ for (auto I = InstInfoMap.rbegin(), E = InstInfoMap.rend(); I != E; ++I) {
+ // We still need to check that the instruction has no users before we erase
+ // it, because {SExt, ZExt}Inst Instruction might have other users that was
+ // not reduced, in such case, we need to keep that instruction.
+ if (!I->first->getNumUses())
+ I->first->eraseFromParent();
+ }
+}
+
+bool TruncInstCombine::run(Function &F) {
+ bool MadeIRChange = false;
+
+ // Collect all TruncInst in the function into the Worklist for evaluating.
+ for (auto &BB : F)
+ for (auto &I : BB)
+ if (auto *CI = dyn_cast<TruncInst>(&I))
+ Worklist.push_back(CI);
+
+ // Process all TruncInst in the Worklist, for each instruction:
+ // 1. Check if it dominates an eligible expression dag to be reduced.
+ // 2. Create a reduced expression dag and replace the old one with it.
+ while (!Worklist.empty()) {
+ CurrentTruncInst = Worklist.pop_back_val();
+
+ if (Type *NewDstSclTy = getBestTruncatedType()) {
+ DEBUG(dbgs() << "ICE: TruncInstCombine reducing type of expression dag "
+ "dominated by: "
+ << CurrentTruncInst << '\n');
+ ReduceExpressionDag(NewDstSclTy);
+ MadeIRChange = true;
+ }
+ }
+
+ return MadeIRChange;
+}
diff --git a/llvm/lib/Transforms/CMakeLists.txt b/llvm/lib/Transforms/CMakeLists.txt
index 67bdeb27212..74db9e53304 100644
--- a/llvm/lib/Transforms/CMakeLists.txt
+++ b/llvm/lib/Transforms/CMakeLists.txt
@@ -1,5 +1,6 @@
add_subdirectory(Utils)
add_subdirectory(Instrumentation)
+add_subdirectory(AggressiveInstCombine)
add_subdirectory(InstCombine)
add_subdirectory(Scalar)
add_subdirectory(IPO)
diff --git a/llvm/lib/Transforms/IPO/LLVMBuild.txt b/llvm/lib/Transforms/IPO/LLVMBuild.txt
index a8b0f32fd78..54ce23876e6 100644
--- a/llvm/lib/Transforms/IPO/LLVMBuild.txt
+++ b/llvm/lib/Transforms/IPO/LLVMBuild.txt
@@ -20,4 +20,4 @@ type = Library
name = IPO
parent = Transforms
library_name = ipo
-required_libraries = Analysis BitReader BitWriter Core InstCombine IRReader Linker Object ProfileData Scalar Support TransformUtils Vectorize Instrumentation
+required_libraries = AggressiveInstCombine Analysis BitReader BitWriter Core InstCombine IRReader Linker Object ProfileData Scalar Support TransformUtils Vectorize Instrumentation
diff --git a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index 3855e6245d8..f8dfa4d3fe7 100644
--- a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -318,6 +318,8 @@ void PassManagerBuilder::addFunctionSimplificationPasses(
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
// Combine silly seq's
+ if (OptLevel > 2)
+ MPM.add(createAggressiveInstCombinerPass());
addInstructionCombiningPass(MPM);
if (SizeLevel == 0 && !DisableLibCallsShrinkWrap)
MPM.add(createLibCallsShrinkWrapPass());
@@ -765,6 +767,8 @@ void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
// simplification opportunities, and both can propagate functions through
// function pointers. When this happens, we often have to resolve varargs
// calls, etc, so let instcombine do this.
+ if (OptLevel > 2)
+ PM.add(createAggressiveInstCombinerPass());
addInstructionCombiningPass(PM);
addExtensionsToPM(EP_Peephole, PM);
diff --git a/llvm/lib/Transforms/LLVMBuild.txt b/llvm/lib/Transforms/LLVMBuild.txt
index 95482ad2022..f061c6d9285 100644
--- a/llvm/lib/Transforms/LLVMBuild.txt
+++ b/llvm/lib/Transforms/LLVMBuild.txt
@@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = Coroutines IPO InstCombine Instrumentation Scalar Utils Vectorize ObjCARC
+subdirectories = AggressiveInstCombine Coroutines IPO InstCombine Instrumentation Scalar Utils Vectorize ObjCARC
[component_0]
type = Group
diff --git a/llvm/lib/Transforms/Scalar/LLVMBuild.txt b/llvm/lib/Transforms/Scalar/LLVMBuild.txt
index 8a99df86b84..ffe35f041b3 100644
--- a/llvm/lib/Transforms/Scalar/LLVMBuild.txt
+++ b/llvm/lib/Transforms/Scalar/LLVMBuild.txt
@@ -20,4 +20,4 @@ type = Library
name = Scalar
parent = Transforms
library_name = ScalarOpts
-required_libraries = Analysis Core InstCombine Support TransformUtils
+required_libraries = AggressiveInstCombine Analysis Core InstCombine Support TransformUtils
OpenPOWER on IntegriCloud