summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/CMakeLists.txt1
-rw-r--r--llvm/lib/CodeGen/CodeGen.cpp1
-rw-r--r--llvm/lib/CodeGen/ExpandMemCmp.cpp (renamed from llvm/lib/Transforms/Scalar/ExpandMemCmp.cpp)124
-rw-r--r--llvm/lib/CodeGen/TargetPassConfig.cpp13
-rw-r--r--llvm/lib/Transforms/IPO/PassManagerBuilder.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/CMakeLists.txt1
-rw-r--r--llvm/lib/Transforms/Scalar/MergeICmps.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/Scalar.cpp1
8 files changed, 66 insertions, 91 deletions
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index d8ce9a95bfb..5d8a1382867 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -21,6 +21,7 @@ add_llvm_library(LLVMCodeGen
EarlyIfConversion.cpp
EdgeBundles.cpp
ExecutionDomainFix.cpp
+ ExpandMemCmp.cpp
ExpandPostRAPseudos.cpp
ExpandReductions.cpp
FaultMaps.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index ea315794b2e..c37ed57781d 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -30,6 +30,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeEarlyIfConverterPass(Registry);
initializeEarlyMachineLICMPass(Registry);
initializeEarlyTailDuplicatePass(Registry);
+ initializeExpandMemCmpPassPass(Registry);
initializeExpandPostRAPass(Registry);
initializeFEntryInserterPass(Registry);
initializeFinalizeISelPass(Registry);
diff --git a/llvm/lib/Transforms/Scalar/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp
index ceaa570402f..b425482e6ad 100644
--- a/llvm/lib/Transforms/Scalar/ExpandMemCmp.cpp
+++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp
@@ -13,15 +13,13 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/DomTreeUpdater.h"
-#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
-#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/Transforms/Scalar.h"
using namespace llvm;
@@ -48,6 +46,7 @@ static cl::opt<unsigned> MaxLoadsPerMemcmpOptSize(
namespace {
+
// This class provides helper functions to expand a memcmp library call into an
// inline expansion.
class MemCmpExpansion {
@@ -66,18 +65,18 @@ class MemCmpExpansion {
uint64_t NumLoadsNonOneByte;
const uint64_t NumLoadsPerBlockForZeroCmp;
std::vector<BasicBlock *> LoadCmpBlocks;
- BasicBlock *EndBlock = nullptr;
+ BasicBlock *EndBlock;
PHINode *PhiRes;
const bool IsUsedForZeroCmp;
const DataLayout &DL;
IRBuilder<> Builder;
- DomTreeUpdater DTU;
// Represents the decomposition in blocks of the expansion. For example,
// comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
// 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}.
struct LoadEntry {
LoadEntry(unsigned LoadSize, uint64_t Offset)
- : LoadSize(LoadSize), Offset(Offset) {}
+ : LoadSize(LoadSize), Offset(Offset) {
+ }
// The size of the load for this block, in bytes.
unsigned LoadSize;
@@ -114,8 +113,7 @@ class MemCmpExpansion {
public:
MemCmpExpansion(CallInst *CI, uint64_t Size,
const TargetTransformInfo::MemCmpExpansionOptions &Options,
- const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
- DominatorTree *DT);
+ const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout);
unsigned getNumBlocks();
uint64_t getNumLoads() const { return LoadSequence.size(); }
@@ -204,13 +202,10 @@ MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
MemCmpExpansion::MemCmpExpansion(
CallInst *const CI, uint64_t Size,
const TargetTransformInfo::MemCmpExpansionOptions &Options,
- const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
- DominatorTree *DT)
+ const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout)
: CI(CI), Size(Size), MaxLoadSize(0), NumLoadsNonOneByte(0),
NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
- IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI),
- DTU(DT, /*PostDominator*/ nullptr,
- DomTreeUpdater::UpdateStrategy::Eager) {
+ IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI) {
assert(Size > 0 && "zero blocks");
// Scale the max size down if the target can load more bytes than we need.
llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
@@ -250,7 +245,6 @@ unsigned MemCmpExpansion::getNumBlocks() {
}
void MemCmpExpansion::createLoadCmpBlocks() {
- assert(ResBlock.BB && "ResBlock must be created before LoadCmpBlocks");
for (unsigned i = 0; i < getNumBlocks(); i++) {
BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
EndBlock->getParent(), EndBlock);
@@ -259,7 +253,6 @@ void MemCmpExpansion::createLoadCmpBlocks() {
}
void MemCmpExpansion::createResultBlock() {
- assert(EndBlock && "EndBlock must be created before ResultBlock");
ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
EndBlock->getParent(), EndBlock);
}
@@ -284,8 +277,7 @@ Value *MemCmpExpansion::getPtrToElementAtOffset(Value *Source,
// final phi node for selecting the memcmp result.
void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
unsigned OffsetBytes) {
- BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
- Builder.SetInsertPoint(BB);
+ Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
Type *LoadSizeType = Type::getInt8Ty(CI->getContext());
Value *Source1 =
getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType, OffsetBytes);
@@ -306,16 +298,13 @@ void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
// next LoadCmpBlock,
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
ConstantInt::get(Diff->getType(), 0));
- BasicBlock *const NextBB = LoadCmpBlocks[BlockIndex + 1];
- BranchInst *CmpBr = BranchInst::Create(EndBlock, NextBB, Cmp);
+ BranchInst *CmpBr =
+ BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp);
Builder.Insert(CmpBr);
- DTU.applyUpdates({{DominatorTree::Insert, BB, EndBlock},
- {DominatorTree::Insert, BB, NextBB}});
} else {
// The last block has an unconditional branch to EndBlock.
BranchInst *CmpBr = BranchInst::Create(EndBlock);
Builder.Insert(CmpBr);
- DTU.applyUpdates({{DominatorTree::Insert, BB, EndBlock}});
}
}
@@ -423,17 +412,14 @@ void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
// continue to next LoadCmpBlock or EndBlock.
BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
Builder.Insert(CmpBr);
- BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
// Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
// since early exit to ResultBlock was not taken (no difference was found in
// any of the bytes).
if (BlockIndex == LoadCmpBlocks.size() - 1) {
Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
- PhiRes->addIncoming(Zero, BB);
+ PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
}
- DTU.applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
- {DominatorTree::Insert, BB, NextBB}});
}
// This function creates the IR intructions for loading and comparing using the
@@ -459,8 +445,7 @@ void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
- BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
- Builder.SetInsertPoint(BB);
+ Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
CurLoadEntry.Offset);
@@ -504,10 +489,8 @@ void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
// any of the bytes).
if (BlockIndex == LoadCmpBlocks.size() - 1) {
Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
- PhiRes->addIncoming(Zero, BB);
+ PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
}
- DTU.applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
- {DominatorTree::Insert, BB, NextBB}});
}
// This function populates the ResultBlock with a sequence to calculate the
@@ -523,7 +506,6 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
PhiRes->addIncoming(Res, ResBlock.BB);
BranchInst *NewBr = BranchInst::Create(EndBlock);
Builder.Insert(NewBr);
- DTU.applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
return;
}
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
@@ -539,7 +521,6 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
BranchInst *NewBr = BranchInst::Create(EndBlock);
Builder.Insert(NewBr);
PhiRes->addIncoming(Res, ResBlock.BB);
- DTU.applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
}
void MemCmpExpansion::setupResultBlockPHINodes() {
@@ -631,7 +612,6 @@ Value *MemCmpExpansion::getMemCmpExpansion() {
if (getNumBlocks() != 1) {
BasicBlock *StartBlock = CI->getParent();
EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
- DTU.applyUpdates({{DominatorTree::Insert, StartBlock, EndBlock}});
setupEndBlockPHINodes();
createResultBlock();
@@ -639,18 +619,14 @@ Value *MemCmpExpansion::getMemCmpExpansion() {
// calculate which source was larger. The calculation requires the
// two loaded source values of each load compare block.
// These will be saved in the phi nodes created by setupResultBlockPHINodes.
- if (!IsUsedForZeroCmp)
- setupResultBlockPHINodes();
+ if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
// Create the number of required load compare basic blocks.
createLoadCmpBlocks();
// Update the terminator added by splitBasicBlock to branch to the first
// LoadCmpBlock.
- BasicBlock *const FirstLoadBB = LoadCmpBlocks[0];
- StartBlock->getTerminator()->setSuccessor(0, FirstLoadBB);
- DTU.applyUpdates({{DominatorTree::Delete, StartBlock, EndBlock},
- {DominatorTree::Insert, StartBlock, FirstLoadBB}});
+ StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
}
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
@@ -744,7 +720,7 @@ Value *MemCmpExpansion::getMemCmpExpansion() {
/// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
/// ret i32 %phi.res
static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
- const DataLayout *DL, DominatorTree *DT) {
+ const TargetLowering *TLI, const DataLayout *DL) {
NumMemCmpCalls++;
// Early exit from expansion if -Oz.
@@ -767,8 +743,7 @@ static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
auto Options = TTI->enableMemCmpExpansion(CI->getFunction()->hasOptSize(),
IsUsedForZeroCmp);
- if (!Options)
- return false;
+ if (!Options) return false;
if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
@@ -780,7 +755,7 @@ static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
if (!CI->getFunction()->hasOptSize() && MaxLoadsPerMemcmp.getNumOccurrences())
Options.MaxNumLoads = MaxLoadsPerMemcmp;
- MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL, DT);
+ MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL);
// Don't expand if this will require more loads than desired by the target.
if (Expansion.getNumLoads() == 0) {
@@ -799,6 +774,8 @@ static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
return true;
}
+
+
class ExpandMemCmpPass : public FunctionPass {
public:
static char ID;
@@ -808,17 +785,20 @@ public:
}
bool runOnFunction(Function &F) override {
- if (skipFunction(F))
+ if (skipFunction(F)) return false;
+
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ if (!TPC) {
return false;
+ }
+ const TargetLowering* TL =
+ TPC->getTM<TargetMachine>().getSubtargetImpl(F)->getTargetLowering();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
const TargetTransformInfo *TTI =
&getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- // ExpandMemCmp does not need the DominatorTree, but we update it if it's
- // already available.
- auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- auto PA = runImpl(F, TLI, TTI, DTWP ? &DTWP->getDomTree() : nullptr);
+ auto PA = runImpl(F, TLI, TTI, TL);
return !PA.areAllPreserved();
}
@@ -826,24 +806,23 @@ private:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
- AU.addUsedIfAvailable<DominatorTreeWrapperPass>();
- AU.addPreserved<GlobalsAAWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
}
PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI, DominatorTree *DT);
+ const TargetTransformInfo *TTI,
+ const TargetLowering* TL);
// Returns true if a change was made.
bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI, const DataLayout &DL,
- DominatorTree *DT);
+ const TargetTransformInfo *TTI, const TargetLowering* TL,
+ const DataLayout& DL);
};
-bool ExpandMemCmpPass::runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI,
- const DataLayout &DL, DominatorTree *DT) {
- for (Instruction &I : BB) {
+bool ExpandMemCmpPass::runOnBlock(
+ BasicBlock &BB, const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI, const TargetLowering* TL,
+ const DataLayout& DL) {
+ for (Instruction& I : BB) {
CallInst *CI = dyn_cast<CallInst>(&I);
if (!CI) {
continue;
@@ -851,21 +830,21 @@ bool ExpandMemCmpPass::runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
LibFunc Func;
if (TLI->getLibFunc(ImmutableCallSite(CI), Func) &&
(Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
- expandMemCmp(CI, TTI, &DL, DT)) {
+ expandMemCmp(CI, TTI, TL, &DL)) {
return true;
}
}
return false;
}
-PreservedAnalyses ExpandMemCmpPass::runImpl(Function &F,
- const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI,
- DominatorTree *DT) {
- const DataLayout &DL = F.getParent()->getDataLayout();
+
+PreservedAnalyses ExpandMemCmpPass::runImpl(
+ Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI,
+ const TargetLowering* TL) {
+ const DataLayout& DL = F.getParent()->getDataLayout();
bool MadeChanges = false;
for (auto BBIt = F.begin(); BBIt != F.end();) {
- if (runOnBlock(*BBIt, TLI, TTI, DL, DT)) {
+ if (runOnBlock(*BBIt, TLI, TTI, TL, DL)) {
MadeChanges = true;
// If changes were made, restart the function from the beginning, since
// the structure of the function was changed.
@@ -874,12 +853,7 @@ PreservedAnalyses ExpandMemCmpPass::runImpl(Function &F,
++BBIt;
}
}
- if (!MadeChanges)
- return PreservedAnalyses::all();
- PreservedAnalyses PA;
- PA.preserve<GlobalsAA>();
- PA.preserve<DominatorTreeAnalysis>();
- return PA;
+ return MadeChanges ? PreservedAnalyses::none() : PreservedAnalyses::all();
}
} // namespace
@@ -892,4 +866,6 @@ INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
"Expand memcmp() to load/stores", false, false)
-Pass *llvm::createExpandMemCmpPass() { return new ExpandMemCmpPass(); }
+FunctionPass *llvm::createExpandMemCmpPass() {
+ return new ExpandMemCmpPass();
+}
diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp
index 7f1189fec3c..98b4742be1c 100644
--- a/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -99,6 +99,9 @@ static cl::opt<bool> EnableImplicitNullChecks(
"enable-implicit-null-checks",
cl::desc("Fold null checks into faulting memory operations"),
cl::init(false), cl::Hidden);
+static cl::opt<bool> DisableMergeICmps("disable-mergeicmps",
+ cl::desc("Disable MergeICmps Pass"),
+ cl::init(false), cl::Hidden);
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
@@ -637,6 +640,16 @@ void TargetPassConfig::addIRPasses() {
addPass(createPrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
}
+ if (getOptLevel() != CodeGenOpt::None) {
+ // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
+ // loads and compares. ExpandMemCmpPass then tries to expand those calls
+ // into optimally-sized loads and compares. The transforms are enabled by a
+ // target lowering hook.
+ if (!DisableMergeICmps)
+ addPass(createMergeICmpsLegacyPass());
+ addPass(createExpandMemCmpPass());
+ }
+
// Run GC lowering passes for builtin collectors
// TODO: add a pass insertion point here
addPass(createGCLoweringPass());
diff --git a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index bd5d01c8ad2..d451653533a 100644
--- a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -246,18 +246,6 @@ void PassManagerBuilder::addInstructionCombiningPass(
PM.add(createInstructionCombiningPass(ExpensiveCombines));
}
-void PassManagerBuilder::addMemcmpPasses(legacy::PassManagerBase &PM) const {
- if (OptLevel > 0) {
- // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
- // loads and compares. ExpandMemCmpPass then tries to expand those calls
- // into optimally-sized loads and compares. The transforms are enabled by a
- // target transform info hook.
- PM.add(createMergeICmpsLegacyPass());
- PM.add(createExpandMemCmpPass());
- PM.add(createEarlyCSEPass());
- }
-}
-
void PassManagerBuilder::populateFunctionPassManager(
legacy::FunctionPassManager &FPM) {
addExtensionsToPM(EP_EarlyAsPossible, FPM);
@@ -403,7 +391,6 @@ void PassManagerBuilder::addFunctionSimplificationPasses(
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
}
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
- addMemcmpPasses(MPM); // Merge/Expand comparisons.
MPM.add(createSCCPPass()); // Constant prop with SCCP
// Delete dead bit computations (instcombine runs after to fold away the dead
@@ -923,7 +910,6 @@ void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
PM.add(NewGVN ? createNewGVNPass()
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
- addMemcmpPasses(PM); // Merge/Expand comparisons.
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
diff --git a/llvm/lib/Transforms/Scalar/CMakeLists.txt b/llvm/lib/Transforms/Scalar/CMakeLists.txt
index 18bb9e873d8..e6f8901ec81 100644
--- a/llvm/lib/Transforms/Scalar/CMakeLists.txt
+++ b/llvm/lib/Transforms/Scalar/CMakeLists.txt
@@ -10,7 +10,6 @@ add_llvm_library(LLVMScalarOpts
DeadStoreElimination.cpp
DivRemPairs.cpp
EarlyCSE.cpp
- ExpandMemCmp.cpp
FlattenCFGPass.cpp
Float2Int.cpp
GuardWidening.cpp
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index c8fb1bb6e52..630f029850c 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -866,7 +866,7 @@ static bool runImpl(Function &F, const TargetLibraryInfo &TLI,
// We only try merging comparisons if the target wants to expand memcmp later.
// The rationale is to avoid turning small chains into memcmp calls.
- if (!TTI.enableMemCmpExpansion(F.hasOptSize(), /*IsZeroCmp*/ true))
+ if (!TTI.enableMemCmpExpansion(F.hasOptSize(), true))
return false;
// If we don't have memcmp avaiable we can't emit calls to it.
diff --git a/llvm/lib/Transforms/Scalar/Scalar.cpp b/llvm/lib/Transforms/Scalar/Scalar.cpp
index 3e92ade883f..869cf00e0a8 100644
--- a/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -84,7 +84,6 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeLowerWidenableConditionLegacyPassPass(Registry);
initializeMemCpyOptLegacyPassPass(Registry);
initializeMergeICmpsLegacyPassPass(Registry);
- initializeExpandMemCmpPassPass(Registry);
initializeMergedLoadStoreMotionLegacyPassPass(Registry);
initializeNaryReassociateLegacyPassPass(Registry);
initializePartiallyInlineLibCallsLegacyPassPass(Registry);
OpenPOWER on IntegriCloud