summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/examples/ExceptionDemo/ExceptionDemo.cpp1
-rw-r--r--llvm/examples/Kaleidoscope/Chapter4/toy.cpp1
-rw-r--r--llvm/examples/Kaleidoscope/Chapter5/toy.cpp1
-rw-r--r--llvm/examples/Kaleidoscope/Chapter6/toy.cpp1
-rw-r--r--llvm/examples/Kaleidoscope/Chapter7/toy.cpp1
-rw-r--r--llvm/examples/Kaleidoscope/Chapter8/toy.cpp1
-rw-r--r--llvm/examples/Kaleidoscope/Orc/fully_lazy/toy.cpp2
-rw-r--r--llvm/examples/Kaleidoscope/Orc/initial/toy.cpp2
-rw-r--r--llvm/examples/Kaleidoscope/Orc/lazy_codegen/toy.cpp2
-rw-r--r--llvm/examples/Kaleidoscope/Orc/lazy_irgen/toy.cpp2
-rw-r--r--llvm/include/llvm/Analysis/AliasAnalysis.h2
-rw-r--r--llvm/include/llvm/Analysis/LibCallAliasAnalysis.h8
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h2
-rw-r--r--llvm/include/llvm/IR/DataLayout.h28
-rw-r--r--llvm/include/llvm/IR/Module.h17
-rw-r--r--llvm/include/llvm/Transforms/Utils/Cloning.h4
-rw-r--r--llvm/lib/Analysis/AliasAnalysis.cpp5
-rw-r--r--llvm/lib/Analysis/AliasAnalysisCounter.cpp3
-rw-r--r--llvm/lib/Analysis/AliasDebugger.cpp2
-rw-r--r--llvm/lib/Analysis/BasicAliasAnalysis.cpp9
-rw-r--r--llvm/lib/Analysis/CFLAliasAnalysis.cpp7
-rw-r--r--llvm/lib/Analysis/IPA/GlobalsModRef.cpp2
-rw-r--r--llvm/lib/Analysis/IPA/InlineCost.cpp10
-rw-r--r--llvm/lib/Analysis/IVUsers.cpp4
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp3
-rw-r--r--llvm/lib/Analysis/LibCallAliasAnalysis.cpp6
-rw-r--r--llvm/lib/Analysis/Lint.cpp3
-rw-r--r--llvm/lib/Analysis/Loads.cpp9
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/MemDerefPrinter.cpp7
-rw-r--r--llvm/lib/Analysis/MemoryDependenceAnalysis.cpp3
-rw-r--r--llvm/lib/Analysis/NoAliasAnalysis.cpp7
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp3
-rw-r--r--llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp3
-rw-r--r--llvm/lib/Analysis/ScopedNoAliasAA.cpp7
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp9
-rw-r--r--llvm/lib/CodeGen/WinEHPrepare.cpp4
-rw-r--r--llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp3
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h4
-rw-r--r--llvm/lib/IR/DataLayout.cpp93
-rw-r--r--llvm/lib/IR/Module.cpp24
-rw-r--r--llvm/lib/LTO/LTOCodeGenerator.cpp5
-rw-r--r--llvm/lib/LTO/LTOModule.cpp2
-rw-r--r--llvm/lib/Linker/LinkModules.cpp18
-rw-r--r--llvm/lib/Object/IRObjectFile.cpp9
-rw-r--r--llvm/lib/Target/CppBackend/CPPBackend.cpp3
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp9
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h1
-rw-r--r--llvm/lib/Target/PowerPC/PPCCTRLoops.cpp3
-rw-r--r--llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp8
-rw-r--r--llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp6
-rw-r--r--llvm/lib/Target/Target.cpp4
-rw-r--r--llvm/lib/Target/TargetMachineC.cpp3
-rw-r--r--llvm/lib/Transforms/IPO/ArgumentPromotion.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/ConstantMerge.cpp3
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp109
-rw-r--r--llvm/lib/Transforms/IPO/Inliner.cpp18
-rw-r--r--llvm/lib/Transforms/IPO/LowerBitSets.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/MergeFunctions.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp23
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp9
-rw-r--r--llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp3
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp7
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp5
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp8
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp5
-rw-r--r--llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp5
-rw-r--r--llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h4
-rw-r--r--llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/BDCE.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/ConstantProp.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/EarlyCSE.cpp9
-rw-r--r--llvm/lib/Transforms/Scalar/GVN.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/IndVarSimplify.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/JumpThreading.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/LoadCombine.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp33
-rw-r--r--llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/LoopRerollPass.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/SCCP.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/Scalarizer.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp9
-rw-r--r--llvm/lib/Transforms/Scalar/Sink.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp60
-rw-r--r--llvm/lib/Transforms/Utils/LoopSimplify.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnroll.cpp5
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyInstructions.cpp5
-rw-r--r--llvm/lib/Transforms/Vectorize/BBVectorize.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp3
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp3
-rw-r--r--llvm/test/Analysis/Delinearization/a.ll2
-rw-r--r--llvm/test/Analysis/Delinearization/himeno_1.ll2
-rw-r--r--llvm/test/Analysis/Delinearization/himeno_2.ll2
-rw-r--r--llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll4
-rw-r--r--llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll2
-rw-r--r--llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll2
-rw-r--r--llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll2
-rw-r--r--llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll6
-rw-r--r--llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll2
-rw-r--r--llvm/test/Analysis/ScalarEvolution/max-trip-count.ll14
-rw-r--r--llvm/test/Bitcode/highLevelStructure.3.2.ll2
-rw-r--r--llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll2
-rw-r--r--llvm/test/Linker/comdat3.ll5
-rw-r--r--llvm/test/Linker/datalayout.ll5
-rw-r--r--llvm/test/Other/constant-fold-gep.ll120
-rw-r--r--llvm/test/Transforms/ConstantMerge/merge-both.ll7
-rw-r--r--llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-widen.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr-extend-const.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr_simple.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr20680.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/preserve-signed-wrap.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll7
-rw-r--r--llvm/test/Transforms/IndVarSimplify/signed-trip-count.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll4
-rw-r--r--llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll88
-rw-r--r--llvm/test/Transforms/Inline/alloca-merge-align.ll18
-rw-r--r--llvm/test/Transforms/Inline/lifetime-no-datalayout.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2012-09-24-MemcpyFromGlobalCrash.ll19
-rw-r--r--llvm/test/Transforms/InstCombine/extractvalue.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/gc.relocate.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/load-cmp.ll251
-rw-r--r--llvm/test/Transforms/InstCombine/overflow-mul.ll11
-rw-r--r--llvm/test/Transforms/InstCombine/pr21651.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/simplify-libcalls.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/store.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/type_pun.ll4
-rw-r--r--llvm/test/Transforms/LoopSimplify/preserve-scev.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll12
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll7
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/2008-08-14-ShadowIV.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/2011-07-20-DoubleIV.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/count-to-zero.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/ivchain.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/nested-reduce.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr12691.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/remove_indvar.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/variable_stride.ll3
-rw-r--r--llvm/test/Transforms/PhaseOrdering/scev.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll6
-rw-r--r--llvm/test/Transforms/Scalarizer/no-data-layout.ll25
-rw-r--r--llvm/tools/llc/llc.cpp3
-rw-r--r--llvm/tools/llvm-extract/llvm-extract.cpp1
-rw-r--r--llvm/tools/opt/opt.cpp10
-rw-r--r--llvm/unittests/IR/IRBuilderTest.cpp4
-rw-r--r--llvm/unittests/IR/LegacyPassManagerTest.cpp10
163 files changed, 617 insertions, 973 deletions
diff --git a/llvm/examples/ExceptionDemo/ExceptionDemo.cpp b/llvm/examples/ExceptionDemo/ExceptionDemo.cpp
index 84cff0dd4c8..f6fc03de912 100644
--- a/llvm/examples/ExceptionDemo/ExceptionDemo.cpp
+++ b/llvm/examples/ExceptionDemo/ExceptionDemo.cpp
@@ -1973,7 +1973,6 @@ int main(int argc, char *argv[]) {
// Start with registering info about how the
// target lays out data structures.
module->setDataLayout(executionEngine->getDataLayout());
- fpm.add(new llvm::DataLayoutPass());
// Optimizations turned on
#ifdef ADD_OPT_PASSES
diff --git a/llvm/examples/Kaleidoscope/Chapter4/toy.cpp b/llvm/examples/Kaleidoscope/Chapter4/toy.cpp
index 70fe57f4776..22e6856784f 100644
--- a/llvm/examples/Kaleidoscope/Chapter4/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Chapter4/toy.cpp
@@ -561,7 +561,6 @@ void *MCJITHelper::getPointerToFunction(Function *F) {
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
OpenModule->setDataLayout(NewEngine->getDataLayout());
- FPM->add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
FPM->add(createBasicAliasAnalysisPass());
// Promote allocas to registers.
diff --git a/llvm/examples/Kaleidoscope/Chapter5/toy.cpp b/llvm/examples/Kaleidoscope/Chapter5/toy.cpp
index 6821429dc0a..e7cab0597d0 100644
--- a/llvm/examples/Kaleidoscope/Chapter5/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Chapter5/toy.cpp
@@ -914,7 +914,6 @@ int main() {
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
// Do simple "peephole" optimizations and bit-twiddling optzns.
diff --git a/llvm/examples/Kaleidoscope/Chapter6/toy.cpp b/llvm/examples/Kaleidoscope/Chapter6/toy.cpp
index d6b9661d21f..eda39e1b290 100644
--- a/llvm/examples/Kaleidoscope/Chapter6/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Chapter6/toy.cpp
@@ -1035,7 +1035,6 @@ int main() {
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
// Do simple "peephole" optimizations and bit-twiddling optzns.
diff --git a/llvm/examples/Kaleidoscope/Chapter7/toy.cpp b/llvm/examples/Kaleidoscope/Chapter7/toy.cpp
index 7a71d015ffd..932290dc596 100644
--- a/llvm/examples/Kaleidoscope/Chapter7/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Chapter7/toy.cpp
@@ -1209,7 +1209,6 @@ int main() {
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
// Promote allocas to registers.
diff --git a/llvm/examples/Kaleidoscope/Chapter8/toy.cpp b/llvm/examples/Kaleidoscope/Chapter8/toy.cpp
index 1b6e5ed6b98..3a582e9d1c2 100644
--- a/llvm/examples/Kaleidoscope/Chapter8/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Chapter8/toy.cpp
@@ -1460,7 +1460,6 @@ int main() {
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
#if 0
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
diff --git a/llvm/examples/Kaleidoscope/Orc/fully_lazy/toy.cpp b/llvm/examples/Kaleidoscope/Orc/fully_lazy/toy.cpp
index 9c93d128cc8..d101a29424d 100644
--- a/llvm/examples/Kaleidoscope/Orc/fully_lazy/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Orc/fully_lazy/toy.cpp
@@ -716,7 +716,7 @@ public:
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
diff --git a/llvm/examples/Kaleidoscope/Orc/initial/toy.cpp b/llvm/examples/Kaleidoscope/Orc/initial/toy.cpp
index 1b65e8c2a05..cc6fb8e0a01 100644
--- a/llvm/examples/Kaleidoscope/Orc/initial/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Orc/initial/toy.cpp
@@ -715,7 +715,7 @@ public:
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
diff --git a/llvm/examples/Kaleidoscope/Orc/lazy_codegen/toy.cpp b/llvm/examples/Kaleidoscope/Orc/lazy_codegen/toy.cpp
index 1ed267d5d68..6e2ec2723a4 100644
--- a/llvm/examples/Kaleidoscope/Orc/lazy_codegen/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Orc/lazy_codegen/toy.cpp
@@ -715,7 +715,7 @@ public:
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
diff --git a/llvm/examples/Kaleidoscope/Orc/lazy_irgen/toy.cpp b/llvm/examples/Kaleidoscope/Orc/lazy_irgen/toy.cpp
index 388c3f9b7bd..19801e12030 100644
--- a/llvm/examples/Kaleidoscope/Orc/lazy_irgen/toy.cpp
+++ b/llvm/examples/Kaleidoscope/Orc/lazy_irgen/toy.cpp
@@ -715,7 +715,7 @@ public:
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h
index 763f3729881..84f1354b1fc 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -68,7 +68,7 @@ protected:
/// typically called by the run* methods of these subclasses. This may be
/// called multiple times.
///
- void InitializeAliasAnalysis(Pass *P);
+ void InitializeAliasAnalysis(Pass *P, const DataLayout *DL);
/// getAnalysisUsage - All alias analysis implementations should invoke this
/// directly (using AliasAnalysis::getAnalysisUsage(AU)).
diff --git a/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h b/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
index 4c03c922447..49e0dc86564 100644
--- a/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
@@ -15,6 +15,7 @@
#define LLVM_ANALYSIS_LIBCALLALIASANALYSIS_H
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
namespace llvm {
@@ -48,11 +49,8 @@ namespace llvm {
void getAnalysisUsage(AnalysisUsage &AU) const override;
- bool runOnFunction(Function &F) override {
- InitializeAliasAnalysis(this); // set up super class
- return false;
- }
-
+ bool runOnFunction(Function &F) override;
+
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
index 2a94abe33fc..94722212556 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -183,7 +183,7 @@ private:
auto Names = llvm::make_unique<StringMap<bool>>();
for (const auto &M : Ms) {
- Mangler Mang(M->getDataLayout());
+ Mangler Mang(&M->getDataLayout());
for (const auto &GV : M->globals())
if (addGlobalValue(*Names, GV, Mang, SearchName, ExportedSymbolsOnly))
diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h
index 9479ba487c2..deda8779569 100644
--- a/llvm/include/llvm/IR/DataLayout.h
+++ b/llvm/include/llvm/IR/DataLayout.h
@@ -116,6 +116,9 @@ private:
/// \brief Primitive type alignment data.
SmallVector<LayoutAlignElem, 16> Alignments;
+ /// \brief The string representation used to create this DataLayout
+ std::string StringRepresentation;
+
typedef SmallVector<PointerAlignElem, 8> PointersTy;
PointersTy Pointers;
@@ -185,6 +188,7 @@ public:
DataLayout &operator=(const DataLayout &DL) {
clear();
+ StringRepresentation = DL.StringRepresentation;
BigEndian = DL.isBigEndian();
StackNaturalAlign = DL.StackNaturalAlign;
ManglingMode = DL.ManglingMode;
@@ -209,8 +213,12 @@ public:
/// \brief Returns the string representation of the DataLayout.
///
/// This representation is in the same format accepted by the string
- /// constructor above.
- std::string getStringRepresentation() const;
+ /// constructor above. This should not be used to compare two DataLayout as
+ /// different string can represent the same layout.
+ std::string getStringRepresentation() const { return StringRepresentation; }
+
+ /// \brief Test if the DataLayout was constructed from an empty string.
+ bool isDefault() const { return StringRepresentation.empty(); }
/// \brief Returns true if the specified type is known to be a native integer
/// type supported by the CPU.
@@ -451,22 +459,6 @@ inline LLVMTargetDataRef wrap(const DataLayout *P) {
return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
}
-class DataLayoutPass : public ImmutablePass {
- DataLayout DL;
-
-public:
- /// This has to exist, because this is a pass, but it should never be used.
- DataLayoutPass();
- ~DataLayoutPass();
-
- const DataLayout &getDataLayout() const { return DL; }
-
- static char ID; // Pass identification, replacement for typeid
-
- bool doFinalization(Module &M) override;
- bool doInitialization(Module &M) override;
-};
-
/// Used to lazily calculate structure layout information for a target machine,
/// based on the DataLayout structure.
class StructLayout {
diff --git a/llvm/include/llvm/IR/Module.h b/llvm/include/llvm/IR/Module.h
index 62f41943be8..932acb52b5e 100644
--- a/llvm/include/llvm/IR/Module.h
+++ b/llvm/include/llvm/IR/Module.h
@@ -219,14 +219,7 @@ private:
std::string TargetTriple; ///< Platform target triple Module compiled on
///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
void *NamedMDSymTab; ///< NamedMDNode names.
-
- // We need to keep the string because the C API expects us to own the string
- // representation.
- // Since we have it, we also use an empty string to represent a module without
- // a DataLayout. If it has a DataLayout, these variables are in sync and the
- // string is just a cache of getDataLayout()->getStringRepresentation().
- std::string DataLayoutStr;
- DataLayout DL;
+ DataLayout DL; ///< DataLayout associated with the module
friend class Constant;
@@ -256,10 +249,12 @@ public:
/// Get the data layout string for the module's target platform. This is
/// equivalent to getDataLayout()->getStringRepresentation().
- const std::string &getDataLayoutStr() const { return DataLayoutStr; }
+ const std::string getDataLayoutStr() const {
+ return DL.getStringRepresentation();
+ }
/// Get the data layout for the module's target platform.
- const DataLayout *getDataLayout() const;
+ const DataLayout &getDataLayout() const;
/// Get the target triple which is a string describing the target host.
/// @returns a string containing the target triple.
@@ -293,7 +288,7 @@ public:
/// Set the data layout
void setDataLayout(StringRef Desc);
- void setDataLayout(const DataLayout *Other);
+ void setDataLayout(const DataLayout &Other);
/// Set the target triple.
void setTargetTriple(StringRef T) { TargetTriple = T; }
diff --git a/llvm/include/llvm/Transforms/Utils/Cloning.h b/llvm/include/llvm/Transforms/Utils/Cloning.h
index d1563efb09e..7fac6615d97 100644
--- a/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -192,15 +192,13 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
class InlineFunctionInfo {
public:
explicit InlineFunctionInfo(CallGraph *cg = nullptr,
- const DataLayout *DL = nullptr,
AliasAnalysis *AA = nullptr,
AssumptionCacheTracker *ACT = nullptr)
- : CG(cg), DL(DL), AA(AA), ACT(ACT) {}
+ : CG(cg), AA(AA), ACT(ACT) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
- const DataLayout *DL;
AliasAnalysis *AA;
AssumptionCacheTracker *ACT;
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 4e95aa0bfd1..fb162d2699a 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -462,9 +462,8 @@ AliasAnalysis::~AliasAnalysis() {}
/// InitializeAliasAnalysis - Subclasses must call this method to initialize the
/// AliasAnalysis interface before any other methods are called.
///
-void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+void AliasAnalysis::InitializeAliasAnalysis(Pass *P, const DataLayout *NewDL) {
+ DL = NewDL;
auto *TLIP = P->getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &P->getAnalysis<AliasAnalysis>();
diff --git a/llvm/lib/Analysis/AliasAnalysisCounter.cpp b/llvm/lib/Analysis/AliasAnalysisCounter.cpp
index b8609142fa2..5865259ad9a 100644
--- a/llvm/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/llvm/lib/Analysis/AliasAnalysisCounter.cpp
@@ -14,6 +14,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -76,7 +77,7 @@ namespace {
bool runOnModule(Module &M) override {
this->M = &M;
- InitializeAliasAnalysis(this);
+ InitializeAliasAnalysis(this, &M.getDataLayout());
return false;
}
diff --git a/llvm/lib/Analysis/AliasDebugger.cpp b/llvm/lib/Analysis/AliasDebugger.cpp
index 5d61cf9752e..f98b5781960 100644
--- a/llvm/lib/Analysis/AliasDebugger.cpp
+++ b/llvm/lib/Analysis/AliasDebugger.cpp
@@ -44,7 +44,7 @@ namespace {
}
bool runOnModule(Module &M) override {
- InitializeAliasAnalysis(this); // set up super class
+ InitializeAliasAnalysis(this, &M.getDataLayout()); // set up super class
for(Module::global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I) {
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 46ca6ee91f5..4a15adfc2e1 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -461,9 +461,7 @@ namespace {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
@@ -815,6 +813,11 @@ static bool isAssumeIntrinsic(ImmutableCallSite CS) {
return false;
}
+bool BasicAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
diff --git a/llvm/lib/Analysis/CFLAliasAnalysis.cpp b/llvm/lib/Analysis/CFLAliasAnalysis.cpp
index 82fbfe06aee..c2c054f3f2e 100644
--- a/llvm/lib/Analysis/CFLAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/CFLAliasAnalysis.cpp
@@ -240,7 +240,7 @@ public:
return QueryResult;
}
- void initializePass() override { InitializeAliasAnalysis(this); }
+ bool doInitialization(Module &M) override;
};
void FunctionHandle::removeSelfFromCache() {
@@ -1034,3 +1034,8 @@ CFLAliasAnalysis::query(const AliasAnalysis::Location &LocA,
return AliasAnalysis::NoAlias;
}
+
+bool CFLAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
diff --git a/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index 607c0681012..a16a8b979b7 100644
--- a/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -96,7 +96,7 @@ namespace {
}
bool runOnModule(Module &M) override {
- InitializeAliasAnalysis(this);
+ InitializeAliasAnalysis(this, &M.getDataLayout());
// Find non-addr taken globals.
AnalyzeGlobals(M);
diff --git a/llvm/lib/Analysis/IPA/InlineCost.cpp b/llvm/lib/Analysis/IPA/InlineCost.cpp
index eb02b80c94a..d556700f536 100644
--- a/llvm/lib/Analysis/IPA/InlineCost.cpp
+++ b/llvm/lib/Analysis/IPA/InlineCost.cpp
@@ -396,7 +396,6 @@ bool CallAnalyzer::visitBitCast(BitCastInst &I) {
}
bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
- const DataLayout *DL = I.getModule()->getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
@@ -410,7 +409,8 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
- if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
+ const DataLayout &DL = I.getModule()->getDataLayout();
+ if (IntegerSize >= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first)
@@ -433,7 +433,6 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
}
bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
- const DataLayout *DL = I.getModule()->getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
@@ -448,7 +447,8 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
- if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
+ const DataLayout &DL = I.getModule()->getDataLayout();
+ if (IntegerSize <= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset;
@@ -1333,7 +1333,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
- CallAnalyzer CA(Callee->getParent()->getDataLayout(), TTIWP->getTTI(*Callee),
+ CallAnalyzer CA(&Callee->getParent()->getDataLayout(), TTIWP->getTTI(*Callee),
ACT, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
diff --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index 140753c9041..e3ea78311b8 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -253,8 +254,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index d4580dcee84..56e9a0c17e8 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1117,8 +1117,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
diff --git a/llvm/lib/Analysis/LibCallAliasAnalysis.cpp b/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
index 016f8c5cc79..f6025e3252e 100644
--- a/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -36,7 +36,11 @@ void LibCallAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll(); // Does not transform code
}
-
+bool LibCallAliasAnalysis::runOnFunction(Function &F) {
+ // set up super class
+ InitializeAliasAnalysis(this, &F.getParent()->getDataLayout());
+ return false;
+}
/// AnalyzeLibCallDetails - Given a call to a function with the specified
/// LibCallFunctionInfo, see if we can improve the mod/ref footprint of the call
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 874ed0abb99..1635888a400 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -184,8 +184,7 @@ bool Lint::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
visit(F);
dbgs() << MessagesStr.str();
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index aaaf75f3015..315a42ef376 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -179,11 +179,10 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// Try to get the DataLayout for this module. This may be null, in which case
// the optimizations will be limited.
- const DataLayout *DL = ScanBB->getModule()->getDataLayout();
+ const DataLayout &DL = ScanBB->getModule()->getDataLayout();
// Try to get the store size for the type.
- uint64_t AccessSize = DL ? DL->getTypeStoreSize(AccessTy)
- : AA ? AA->getTypeStoreSize(AccessTy) : 0;
+ uint64_t AccessSize = DL.getTypeStoreSize(AccessTy);
Value *StrippedPtr = Ptr->stripPointerCasts();
@@ -208,7 +207,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
if (AreEquivalentAddressValues(
LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
- CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
+ CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, &DL)) {
if (AATags)
LI->getAAMetadata(*AATags);
return LI;
@@ -221,7 +220,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// those cases are unlikely.)
if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
- AccessTy, DL)) {
+ AccessTy, &DL)) {
if (AATags)
SI->getAAMetadata(*AATags);
return SI->getOperand(0);
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 7bedd40432b..790ec5df047 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1360,7 +1360,7 @@ void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
bool LoopAccessAnalysis::runOnFunction(Function &F) {
SE = &getAnalysis<ScalarEvolution>();
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &getAnalysis<AliasAnalysis>();
diff --git a/llvm/lib/Analysis/MemDerefPrinter.cpp b/llvm/lib/Analysis/MemDerefPrinter.cpp
index 531d75e730d..5b74f5c1fab 100644
--- a/llvm/lib/Analysis/MemDerefPrinter.cpp
+++ b/llvm/lib/Analysis/MemDerefPrinter.cpp
@@ -14,6 +14,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -27,7 +28,6 @@ namespace {
initializeMemDerefPrinterPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.setPreservesAll();
}
bool runOnFunction(Function &F) override;
@@ -41,7 +41,6 @@ namespace {
char MemDerefPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDerefPrinter, "print-memderefs",
"Memory Dereferenciblity of pointers in function", false, true)
-INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
INITIALIZE_PASS_END(MemDerefPrinter, "print-memderefs",
"Memory Dereferenciblity of pointers in function", false, true)
@@ -50,11 +49,11 @@ FunctionPass *llvm::createMemDerefPrinter() {
}
bool MemDerefPrinter::runOnFunction(Function &F) {
- const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (auto &I: inst_range(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Value *PO = LI->getPointerOperand();
- if (PO->isDereferenceablePointer(DL))
+ if (PO->isDereferenceablePointer(&DL))
Vec.push_back(PO);
}
}
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 6d38863db69..ddb7b5f4081 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -93,8 +93,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
diff --git a/llvm/lib/Analysis/NoAliasAnalysis.cpp b/llvm/lib/Analysis/NoAliasAnalysis.cpp
index c214d3cdf17..203e1daf7a0 100644
--- a/llvm/lib/Analysis/NoAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/NoAliasAnalysis.cpp
@@ -16,6 +16,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
using namespace llvm;
@@ -33,11 +34,11 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const override {}
- void initializePass() override {
+ bool doInitialization(Module &M) override {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &M.getDataLayout();
+ return true;
}
AliasResult alias(const Location &LocA, const Location &LocB) override {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 9e4eb111c2e..8e0bffa52fd 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -7956,8 +7956,7 @@ bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
diff --git a/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 5c339eecdd6..ccec0a877f5 100644
--- a/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -22,6 +22,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
using namespace llvm;
@@ -79,7 +80,7 @@ ScalarEvolutionAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool
ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) {
- InitializeAliasAnalysis(this);
+ InitializeAliasAnalysis(this, &F.getParent()->getDataLayout());
SE = &getAnalysis<ScalarEvolution>();
return false;
}
diff --git a/llvm/lib/Analysis/ScopedNoAliasAA.cpp b/llvm/lib/Analysis/ScopedNoAliasAA.cpp
index c6ea3af3d0c..02f8b0b1384 100644
--- a/llvm/lib/Analysis/ScopedNoAliasAA.cpp
+++ b/llvm/lib/Analysis/ScopedNoAliasAA.cpp
@@ -80,7 +80,7 @@ public:
initializeScopedNoAliasAAPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override { InitializeAliasAnalysis(this); }
+ bool doInitialization(Module &M) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
@@ -119,6 +119,11 @@ ImmutablePass *llvm::createScopedNoAliasAAPass() {
return new ScopedNoAliasAA();
}
+bool ScopedNoAliasAA::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
void
ScopedNoAliasAA::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 7ff29b028ae..4d336363c5f 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -277,7 +277,7 @@ TargetIRAnalysis::Result TargetIRAnalysis::run(Function &F) {
char TargetIRAnalysis::PassID;
TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(Function &F) {
- return Result(F.getParent()->getDataLayout());
+ return Result(&F.getParent()->getDataLayout());
}
// Register the basic pass.
diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index ff8955870cb..1ed2653bee9 100644
--- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -282,9 +282,7 @@ namespace {
initializeTypeBasedAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
@@ -321,6 +319,11 @@ ImmutablePass *llvm::createTypeBasedAliasAnalysisPass() {
return new TypeBasedAliasAnalysis();
}
+bool TypeBasedAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
void
TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp
index 148c0a42175..5a13e556d54 100644
--- a/llvm/lib/CodeGen/WinEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WinEHPrepare.cpp
@@ -369,7 +369,7 @@ bool WinEHPrepare::prepareCPPEHHandlers(
Builder.SetInsertPoint(Entry->getFirstInsertionPt());
Function *FrameAllocFn =
Intrinsic::getDeclaration(M, Intrinsic::frameallocate);
- uint64_t EHAllocSize = M->getDataLayout()->getTypeAllocSize(EHDataStructTy);
+ uint64_t EHAllocSize = M->getDataLayout().getTypeAllocSize(EHDataStructTy);
Value *FrameAllocArgs[] = {
ConstantInt::get(Type::getInt32Ty(Context), EHAllocSize)};
CallInst *FrameAlloc =
@@ -538,7 +538,7 @@ bool WinEHPrepare::outlineHandler(HandlerType CatchOrCleanup, Function *SrcFn,
CloneAndPruneIntoFromInst(
Handler, SrcFn, ++II, VMap,
/*ModuleLevelChanges=*/false, Returns, "", &InlinedFunctionInfo,
- SrcFn->getParent()->getDataLayout(), Director.get());
+ &SrcFn->getParent()->getDataLayout(), Director.get());
// Move all the instructions in the first cloned block into our entry block.
BasicBlock *FirstClonedBB = std::next(Function::iterator(Entry));
diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index e500d3daa1d..527941b59a8 100644
--- a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -137,8 +137,7 @@ std::unique_ptr<MemoryBuffer> MCJIT::emitObject(Module *M) {
legacy::PassManager PM;
- M->setDataLayout(TM->getDataLayout());
- PM.add(new DataLayoutPass());
+ M->setDataLayout(*TM->getDataLayout());
// The RuntimeDyld will take ownership of this shortly
SmallVector<char, 4096> ObjBufferSV;
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h b/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
index 1b7b16187c8..00e39bb73c1 100644
--- a/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
+++ b/llvm/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
@@ -133,8 +133,8 @@ public:
// If this module doesn't have a DataLayout attached then attach the
// default.
- if (!M->getDataLayout())
- M->setDataLayout(getDataLayout());
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(*getDataLayout());
Modules.push_back(std::move(M));
std::vector<Module *> Ms;
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index 573e904f2ee..c70d7c68a91 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -33,11 +33,6 @@
#include <cstdlib>
using namespace llvm;
-// Handle the Pass registration stuff necessary to use DataLayout's.
-
-INITIALIZE_PASS(DataLayoutPass, "datalayout", "Data Layout", false, true)
-char DataLayoutPass::ID = 0;
-
//===----------------------------------------------------------------------===//
// Support for StructLayout
//===----------------------------------------------------------------------===//
@@ -221,6 +216,7 @@ static unsigned inBytes(unsigned Bits) {
}
void DataLayout::parseSpecifier(StringRef Desc) {
+ StringRepresentation = Desc;
while (!Desc.empty()) {
// Split at '-'.
std::pair<StringRef, StringRef> Split = split(Desc, '-');
@@ -378,13 +374,7 @@ DataLayout::DataLayout(const Module *M) : LayoutMap(nullptr) {
init(M);
}
-void DataLayout::init(const Module *M) {
- const DataLayout *Other = M->getDataLayout();
- if (Other)
- *this = *Other;
- else
- reset("");
-}
+void DataLayout::init(const Module *M) { *this = M->getDataLayout(); }
bool DataLayout::operator==(const DataLayout &Other) const {
bool Ret = BigEndian == Other.BigEndian &&
@@ -392,7 +382,7 @@ bool DataLayout::operator==(const DataLayout &Other) const {
ManglingMode == Other.ManglingMode &&
LegalIntWidths == Other.LegalIntWidths &&
Alignments == Other.Alignments && Pointers == Other.Pointers;
- assert(Ret == (getStringRepresentation() == Other.getStringRepresentation()));
+ // Note: getStringRepresentation() might differs, it is not canonicalized
return Ret;
}
@@ -567,68 +557,6 @@ const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
return L;
}
-std::string DataLayout::getStringRepresentation() const {
- std::string Result;
- raw_string_ostream OS(Result);
-
- OS << (BigEndian ? "E" : "e");
-
- switch (ManglingMode) {
- case MM_None:
- break;
- case MM_ELF:
- OS << "-m:e";
- break;
- case MM_MachO:
- OS << "-m:o";
- break;
- case MM_WINCOFF:
- OS << "-m:w";
- break;
- case MM_Mips:
- OS << "-m:m";
- break;
- }
-
- for (const PointerAlignElem &PI : Pointers) {
- // Skip default.
- if (PI.AddressSpace == 0 && PI.ABIAlign == 8 && PI.PrefAlign == 8 &&
- PI.TypeByteWidth == 8)
- continue;
-
- OS << "-p";
- if (PI.AddressSpace) {
- OS << PI.AddressSpace;
- }
- OS << ":" << PI.TypeByteWidth*8 << ':' << PI.ABIAlign*8;
- if (PI.PrefAlign != PI.ABIAlign)
- OS << ':' << PI.PrefAlign*8;
- }
-
- for (const LayoutAlignElem &AI : Alignments) {
- if (std::find(std::begin(DefaultAlignments), std::end(DefaultAlignments),
- AI) != std::end(DefaultAlignments))
- continue;
- OS << '-' << (char)AI.AlignType;
- if (AI.TypeBitWidth)
- OS << AI.TypeBitWidth;
- OS << ':' << AI.ABIAlign*8;
- if (AI.ABIAlign != AI.PrefAlign)
- OS << ':' << AI.PrefAlign*8;
- }
-
- if (!LegalIntWidths.empty()) {
- OS << "-n" << (unsigned)LegalIntWidths[0];
-
- for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i)
- OS << ':' << (unsigned)LegalIntWidths[i];
- }
-
- if (StackNaturalAlign)
- OS << "-S" << StackNaturalAlign*8;
-
- return OS.str();
-}
unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
@@ -844,18 +772,3 @@ unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
return Log2_32(getPreferredAlignment(GV));
}
-DataLayoutPass::DataLayoutPass() : ImmutablePass(ID), DL("") {
- initializeDataLayoutPassPass(*PassRegistry::getPassRegistry());
-}
-
-DataLayoutPass::~DataLayoutPass() {}
-
-bool DataLayoutPass::doInitialization(Module &M) {
- DL.init(&M);
- return false;
-}
-
-bool DataLayoutPass::doFinalization(Module &M) {
- DL.reset("");
- return false;
-}
diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp
index b0abe8cbe11..3a88db9754a 100644
--- a/llvm/lib/IR/Module.cpp
+++ b/llvm/lib/IR/Module.cpp
@@ -365,31 +365,11 @@ void Module::addModuleFlag(MDNode *Node) {
void Module::setDataLayout(StringRef Desc) {
DL.reset(Desc);
-
- if (Desc.empty()) {
- DataLayoutStr = "";
- } else {
- DataLayoutStr = DL.getStringRepresentation();
- // DataLayoutStr is now equivalent to Desc, but since the representation
- // is not unique, they may not be identical.
- }
}
-void Module::setDataLayout(const DataLayout *Other) {
- if (!Other) {
- DataLayoutStr = "";
- DL.reset("");
- } else {
- DL = *Other;
- DataLayoutStr = DL.getStringRepresentation();
- }
-}
+void Module::setDataLayout(const DataLayout &Other) { DL = Other; }
-const DataLayout *Module::getDataLayout() const {
- if (DataLayoutStr.empty())
- return nullptr;
- return &DL;
-}
+const DataLayout &Module::getDataLayout() const { return DL; }
//===----------------------------------------------------------------------===//
// Methods to control the materialization of GlobalValues in the Module.
diff --git a/llvm/lib/LTO/LTOCodeGenerator.cpp b/llvm/lib/LTO/LTOCodeGenerator.cpp
index 61c27491657..990b578acde 100644
--- a/llvm/lib/LTO/LTOCodeGenerator.cpp
+++ b/llvm/lib/LTO/LTOCodeGenerator.cpp
@@ -529,9 +529,8 @@ bool LTOCodeGenerator::optimize(bool DisableOpt,
legacy::PassManager passes;
// Add an appropriate DataLayout instance for this module...
- mergedModule->setDataLayout(TargetMach->getDataLayout());
+ mergedModule->setDataLayout(*TargetMach->getDataLayout());
- passes.add(new DataLayoutPass());
passes.add(
createTargetTransformInfoWrapperPass(TargetMach->getTargetIRAnalysis()));
@@ -567,8 +566,6 @@ bool LTOCodeGenerator::compileOptimized(raw_ostream &out, std::string &errMsg) {
legacy::PassManager codeGenPasses;
- codeGenPasses.add(new DataLayoutPass());
-
formatted_raw_ostream Out(out);
// If the bitcode files contain ARC code and were compiled with optimization,
diff --git a/llvm/lib/LTO/LTOModule.cpp b/llvm/lib/LTO/LTOModule.cpp
index 0d07791b381..087c735cd77 100644
--- a/llvm/lib/LTO/LTOModule.cpp
+++ b/llvm/lib/LTO/LTOModule.cpp
@@ -229,7 +229,7 @@ LTOModule *LTOModule::makeLTOModule(MemoryBufferRef Buffer,
TargetMachine *target = march->createTargetMachine(TripleStr, CPU, FeatureStr,
options);
- M->setDataLayout(target->getDataLayout());
+ M->setDataLayout(*target->getDataLayout());
std::unique_ptr<object::IRObjectFile> IRObj(
new object::IRObjectFile(Buffer, std::move(M)));
diff --git a/llvm/lib/Linker/LinkModules.cpp b/llvm/lib/Linker/LinkModules.cpp
index e09f02e7797..7f0fdb32f7b 100644
--- a/llvm/lib/Linker/LinkModules.cpp
+++ b/llvm/lib/Linker/LinkModules.cpp
@@ -672,17 +672,12 @@ bool ModuleLinker::computeResultingSelectionKind(StringRef ComdatName,
getComdatLeader(SrcM, ComdatName, SrcGV))
return true;
- const DataLayout *DstDL = DstM->getDataLayout();
- const DataLayout *SrcDL = SrcM->getDataLayout();
- if (!DstDL || !SrcDL) {
- return emitError(
- "Linking COMDATs named '" + ComdatName +
- "': can't do size dependent selection without DataLayout!");
- }
+ const DataLayout &DstDL = DstM->getDataLayout();
+ const DataLayout &SrcDL = SrcM->getDataLayout();
uint64_t DstSize =
- DstDL->getTypeAllocSize(DstGV->getType()->getPointerElementType());
+ DstDL.getTypeAllocSize(DstGV->getType()->getPointerElementType());
uint64_t SrcSize =
- SrcDL->getTypeAllocSize(SrcGV->getType()->getPointerElementType());
+ SrcDL.getTypeAllocSize(SrcGV->getType()->getPointerElementType());
if (Result == Comdat::SelectionKind::ExactMatch) {
if (SrcGV->getInitializer() != DstGV->getInitializer())
return emitError("Linking COMDATs named '" + ComdatName +
@@ -1482,11 +1477,10 @@ bool ModuleLinker::run() {
// Inherit the target data from the source module if the destination module
// doesn't have one already.
- if (!DstM->getDataLayout() && SrcM->getDataLayout())
+ if (DstM->getDataLayout().isDefault())
DstM->setDataLayout(SrcM->getDataLayout());
- if (SrcM->getDataLayout() && DstM->getDataLayout() &&
- *SrcM->getDataLayout() != *DstM->getDataLayout()) {
+ if (SrcM->getDataLayout() != DstM->getDataLayout()) {
emitWarning("Linking two modules of different data layouts: '" +
SrcM->getModuleIdentifier() + "' is '" +
SrcM->getDataLayoutStr() + "' whereas '" +
diff --git a/llvm/lib/Object/IRObjectFile.cpp b/llvm/lib/Object/IRObjectFile.cpp
index db304f86852..6369d8befd1 100644
--- a/llvm/lib/Object/IRObjectFile.cpp
+++ b/llvm/lib/Object/IRObjectFile.cpp
@@ -36,12 +36,9 @@ using namespace object;
IRObjectFile::IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> Mod)
: SymbolicFile(Binary::ID_IR, Object), M(std::move(Mod)) {
- // If we have a DataLayout, setup a mangler.
- const DataLayout *DL = M->getDataLayout();
- if (!DL)
- return;
-
- Mang.reset(new Mangler(DL));
+ // Setup a mangler with the DataLayout.
+ const DataLayout &DL = M->getDataLayout();
+ Mang.reset(new Mangler(&DL));
const std::string &InlineAsm = M->getModuleInlineAsm();
if (InlineAsm.empty())
diff --git a/llvm/lib/Target/CppBackend/CPPBackend.cpp b/llvm/lib/Target/CppBackend/CPPBackend.cpp
index c7fec52e077..d0e2010abfd 100644
--- a/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -1981,7 +1981,8 @@ void CppWriter::printModule(const std::string& fname,
printEscapedString(mName);
Out << "\", getGlobalContext());";
if (!TheModule->getTargetTriple().empty()) {
- nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
+ nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayoutStr()
+ << "\");";
}
if (!TheModule->getTargetTriple().empty()) {
nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
diff --git a/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h b/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
index c343980c501..91d3315b981 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
+++ b/llvm/lib/Target/NVPTX/NVPTXAllocaHoisting.h
@@ -32,7 +32,6 @@ public:
NVPTXAllocaHoisting() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addPreserved<MachineFunctionAnalysis>();
AU.addPreserved<StackProtector>();
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
index f0c3663551b..f6b62b762cf 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
@@ -22,6 +22,9 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "nvptx"
using namespace llvm;
@@ -104,7 +107,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
- const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
LLVMContext &Context = F.getParent()->getContext();
//
@@ -120,7 +123,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
if (load->hasOneUse() == false)
continue;
- if (DL->getTypeStoreSize(load->getType()) < MaxAggrCopySize)
+ if (DL.getTypeStoreSize(load->getType()) < MaxAggrCopySize)
continue;
User *use = load->user_back();
@@ -166,7 +169,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
StoreInst *store = dyn_cast<StoreInst>(*load->user_begin());
Value *srcAddr = load->getOperand(0);
Value *dstAddr = store->getOperand(1);
- unsigned numLoads = DL->getTypeStoreSize(load->getType());
+ unsigned numLoads = DL.getTypeStoreSize(load->getType());
Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
index da301d5de62..7b0a4aed522 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.h
@@ -29,7 +29,6 @@ struct NVPTXLowerAggrCopies : public FunctionPass {
NVPTXLowerAggrCopies() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addPreserved<MachineFunctionAnalysis>();
AU.addPreserved<StackProtector>();
}
diff --git a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
index 5af8aab036c..c078f30dcf1 100644
--- a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -171,8 +171,7 @@ bool PPCCTRLoops::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
LibInfo = TLIP ? &TLIP->getTLI() : nullptr;
diff --git a/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
index efd2d92b928..c1f204fcfcd 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
@@ -104,7 +104,7 @@ FunctionPass *llvm::createPPCLoopDataPrefetchPass() { return new PPCLoopDataPref
bool PPCLoopDataPrefetch::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
diff --git a/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
index df65227233e..388effd2f8b 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -36,6 +36,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Scalar.h"
@@ -84,7 +85,6 @@ namespace {
PPCTargetMachine *TM;
LoopInfo *LI;
ScalarEvolution *SE;
- const DataLayout *DL;
};
}
@@ -141,9 +141,6 @@ bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
-
bool MadeChange = false;
for (LoopInfo::iterator I = LI->begin(), E = LI->end();
@@ -158,9 +155,6 @@ bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
bool MadeChange = false;
- if (!DL)
- return MadeChange;
-
// Only prep. the inner-most loop
if (!L->empty())
return MadeChange;
diff --git a/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp
index b81fef47d55..7eb0e150991 100644
--- a/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/R600/AMDGPUPromoteAlloca.cpp
@@ -87,7 +87,7 @@ bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
continue;
if (Use->getParent()->getParent() == &F)
LocalMemAvailable -=
- Mod->getDataLayout()->getTypeAllocSize(GVTy->getElementType());
+ Mod->getDataLayout().getTypeAllocSize(GVTy->getElementType());
}
}
}
@@ -276,8 +276,8 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
// value from the reqd_work_group_size function attribute if it is
// available.
unsigned WorkGroupSize = 256;
- int AllocaSize = WorkGroupSize *
- Mod->getDataLayout()->getTypeAllocSize(AllocaTy);
+ int AllocaSize =
+ WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy);
if (AllocaSize > LocalMemAvailable) {
DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
diff --git a/llvm/lib/Target/Target.cpp b/llvm/lib/Target/Target.cpp
index 5b7953d1a6f..1b74e8cba4f 100644
--- a/llvm/lib/Target/Target.cpp
+++ b/llvm/lib/Target/Target.cpp
@@ -34,7 +34,6 @@ inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfoImpl *P) {
}
void llvm::initializeTarget(PassRegistry &Registry) {
- initializeDataLayoutPassPass(Registry);
initializeTargetLibraryInfoWrapperPassPass(Registry);
initializeTargetTransformInfoWrapperPassPass(Registry);
}
@@ -48,9 +47,6 @@ LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) {
}
void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
- // The DataLayoutPass must now be in sync with the module. Unfortunatelly we
- // cannot enforce that from the C api.
- unwrap(PM)->add(new DataLayoutPass());
}
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
diff --git a/llvm/lib/Target/TargetMachineC.cpp b/llvm/lib/Target/TargetMachineC.cpp
index c7838a98099..236cb1bed96 100644
--- a/llvm/lib/Target/TargetMachineC.cpp
+++ b/llvm/lib/Target/TargetMachineC.cpp
@@ -198,8 +198,7 @@ static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
*ErrorMessage = strdup(error.c_str());
return true;
}
- Mod->setDataLayout(td);
- pass.add(new DataLayoutPass());
+ Mod->setDataLayout(*td);
TargetMachine::CodeGenFileType ft;
switch (codegen) {
diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 52b800da4f2..52cca0f2a2e 100644
--- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -109,9 +109,6 @@ Pass *llvm::createArgumentPromotionPass(unsigned maxElements) {
bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
bool Changed = false, LocalChange;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
-
do { // Iterate until we stop promoting from this SCC.
LocalChange = false;
// Attempt to promote arguments from all functions in this SCC.
@@ -210,6 +207,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// Make sure that it is local to this module.
if (!F || !F->hasLocalLinkage()) return nullptr;
+ DL = &F->getParent()->getDataLayout();
+
// First check: see if there are any pointer arguments! If not, quick exit.
SmallVector<Argument*, 16> PointerArgs;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
diff --git a/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index 0b6ade9eb53..98077af2f54 100644
--- a/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -103,8 +103,7 @@ unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
}
bool ConstantMerge::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &M.getDataLayout();
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 45e04f10459..8f6f999e393 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -86,7 +86,7 @@ namespace {
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
- const DataLayout *DL;
+ // const DataLayout *DL;
TargetLibraryInfo *TLI;
SmallSet<const Comdat *, 8> NotDiscardableComdats;
};
@@ -269,7 +269,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
// Note that we need to use a weak value handle for the worklist items. When
@@ -318,8 +318,8 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// and will invalidate our notion of what Init is.
Constant *SubInit = nullptr;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
- ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
+ ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
+ ConstantFoldInstruction(GEP, &DL, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
@@ -739,7 +739,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
@@ -802,11 +802,11 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
-static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
+static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
TargetLibraryInfo *TLI) {
for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, &DL, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@@ -822,12 +822,10 @@ static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
/// the specified malloc. Because it is always the result of the specified
/// malloc, there is no reason to actually DO the malloc. Instead, turn the
/// malloc into a global, and any loads of GV as uses of the new global.
-static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
- CallInst *CI,
- Type *AllocTy,
- ConstantInt *NElements,
- const DataLayout *DL,
- TargetLibraryInfo *TLI) {
+static GlobalVariable *
+OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
+ ConstantInt *NElements, const DataLayout &DL,
+ TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
Type *GlobalType;
@@ -1271,7 +1269,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value *NElems, const DataLayout *DL,
+ Value *NElems, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
@@ -1301,10 +1299,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
GV->getThreadLocalMode());
FieldGlobals.push_back(NGV);
- unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
+ unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
if (StructType *ST = dyn_cast<StructType>(FieldTy))
- TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
- Type *IntPtrTy = DL->getIntPtrType(CI->getType());
+ TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
+ Type *IntPtrTy = DL.getIntPtrType(CI->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
NElems, nullptr,
@@ -1459,16 +1457,12 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
/// pointer global variable with a single value stored it that is a malloc or
/// cast of malloc.
-static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
- CallInst *CI,
+static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
- if (!DL)
- return false;
-
// If this is a malloc of an abstract type, don't touch it.
if (!AllocTy->isSized())
return false;
@@ -1496,7 +1490,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, DL, TLI, true);
+ Value *NElems = getMallocArraySize(CI, &DL, TLI, true);
if (!NElems)
return false;
@@ -1504,7 +1498,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// Restrict this transformation to only working on small allocations
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
- if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {
+ if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
return true;
}
@@ -1534,8 +1528,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
- Type *IntPtrTy = DL->getIntPtrType(CI->getType());
- unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
+ Type *IntPtrTy = DL.getIntPtrType(CI->getType());
+ unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
@@ -1550,7 +1544,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, &DL, TLI, true),
DL, TLI);
return true;
}
@@ -1563,7 +1557,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1733,6 +1727,7 @@ bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
Module::global_iterator &GVI,
const GlobalStatus &GS) {
+ auto &DL = GV->getParent()->getDataLayout();
// If this is a first class global and has only one accessing function
// and this function is main (which we know is not recursive), we replace
// the global with a local alloca in this function.
@@ -1804,12 +1799,10 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
++NumMarked;
return true;
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) {
- const DataLayout &DL = DLP->getDataLayout();
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
- GVI = FirstNewGV; // Don't skip the newly produced globals!
- return true;
- }
+ const DataLayout &DL = GV->getParent()->getDataLayout();
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
+ GVI = FirstNewGV; // Don't skip the newly produced globals!
+ return true;
}
} else if (GS.StoredType == GlobalStatus::StoredOnce) {
// If the initial value for the global was an undef value, and if only
@@ -1954,7 +1947,8 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
+ auto &DL = M.getDataLayout();
+ Constant *New = ConstantFoldConstantExpression(CE, &DL, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
@@ -1971,9 +1965,8 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSetImpl<Constant*> &SimpleConstants,
- const DataLayout *DL);
-
+ SmallPtrSetImpl<Constant *> &SimpleConstants,
+ const DataLayout &DL);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
/// handled by the code generator. We don't want to generate something like:
@@ -1983,9 +1976,10 @@ isSimpleEnoughValueToCommit(Constant *C,
/// This function should be called if C was not found (but just got inserted)
/// in SimpleConstants to avoid having to rescan the same constants all the
/// time.
-static bool isSimpleEnoughValueToCommitHelper(Constant *C,
- SmallPtrSetImpl<Constant*> &SimpleConstants,
- const DataLayout *DL) {
+static bool
+isSimpleEnoughValueToCommitHelper(Constant *C,
+ SmallPtrSetImpl<Constant *> &SimpleConstants,
+ const DataLayout &DL) {
// Simple global addresses are supported, do not allow dllimport or
// thread-local globals.
if (auto *GV = dyn_cast<GlobalValue>(C))
@@ -2019,8 +2013,8 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
case Instruction::PtrToInt:
// int <=> ptr is fine if the int type is the same size as the
// pointer type.
- if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
- DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ if (DL.getTypeSizeInBits(CE->getType()) !=
+ DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
return false;
return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
@@ -2042,8 +2036,8 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSetImpl<Constant*> &SimpleConstants,
- const DataLayout *DL) {
+ SmallPtrSetImpl<Constant *> &SimpleConstants,
+ const DataLayout &DL) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C).second)
return true;
@@ -2174,8 +2168,8 @@ namespace {
/// Once an evaluation call fails, the evaluation object should not be reused.
class Evaluator {
public:
- Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
- : DL(DL), TLI(TLI) {
+ Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
+ : DL(DL), TLI(TLI) {
ValueStack.emplace_back();
}
@@ -2249,7 +2243,7 @@ private:
/// simple enough to live in a static initializer of a global.
SmallPtrSet<Constant*, 8> SimpleConstants;
- const DataLayout *DL;
+ const DataLayout &DL;
const TargetLibraryInfo *TLI;
};
@@ -2302,7 +2296,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(SI->getOperand(1));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
- Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
@@ -2347,7 +2341,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
@@ -2422,7 +2416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(LI->getOperand(0));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
- Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
DEBUG(dbgs() << "Found a constant pointer expression, constant "
"folding: " << *Ptr << "\n");
}
@@ -2498,9 +2492,9 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Value *Ptr = PtrArg->stripPointerCasts();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
- if (DL && !Size->isAllOnesValue() &&
+ if (!Size->isAllOnesValue() &&
Size->getValue().getLimitedValue() >=
- DL->getTypeStoreSize(ElemTy)) {
+ DL.getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
<< "\n");
@@ -2606,7 +2600,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
+ InstResult = ConstantFoldConstantExpression(CE, &DL, TLI);
setVal(CurInst, InstResult);
}
@@ -2689,7 +2683,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Call the function.
Evaluator Eval(DL, TLI);
@@ -3040,8 +3034,7 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ auto &DL = M.getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
bool LocalChange = true;
diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp
index 305ad7a84ab..fe1261275e9 100644
--- a/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -121,8 +121,7 @@ static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
- int InlineHistory, bool InsertLifetime,
- const DataLayout *DL) {
+ int InlineHistory, bool InsertLifetime) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
@@ -198,11 +197,6 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
unsigned Align1 = AI->getAlignment(),
Align2 = AvailableAlloca->getAlignment();
- // If we don't have data layout information, and only one alloca is using
- // the target default, then we can't safely merge them because we can't
- // pick the greater alignment.
- if (!DL && (!Align1 || !Align2) && Align1 != Align2)
- continue;
// The available alloca has to be in the right function, not in some other
// function in this SCC.
@@ -223,8 +217,8 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
if (Align1 != Align2) {
if (!Align1 || !Align2) {
- assert(DL && "DataLayout required to compare default alignments");
- unsigned TypeAlign = DL->getABITypeAlignment(AI->getAllocatedType());
+ const DataLayout &DL = Caller->getParent()->getDataLayout();
+ unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType());
Align1 = Align1 ? Align1 : TypeAlign;
Align2 = Align2 ? Align2 : TypeAlign;
@@ -432,8 +426,6 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
@@ -495,7 +487,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
InlinedArrayAllocasTy InlinedArrayAllocas;
- InlineFunctionInfo InlineInfo(&CG, DL, AA, ACT);
+ InlineFunctionInfo InlineInfo(&CG, AA, ACT);
// Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so.
@@ -553,7 +545,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// Attempt to inline the function.
if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
- InlineHistoryID, InsertLifetime, DL)) {
+ InlineHistoryID, InsertLifetime)) {
emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
Twine(Callee->getName() +
" will not be inlined into " +
diff --git a/llvm/lib/Transforms/IPO/LowerBitSets.cpp b/llvm/lib/Transforms/IPO/LowerBitSets.cpp
index f397c38a996..85a2ced85c5 100644
--- a/llvm/lib/Transforms/IPO/LowerBitSets.cpp
+++ b/llvm/lib/Transforms/IPO/LowerBitSets.cpp
@@ -234,10 +234,7 @@ ModulePass *llvm::createLowerBitSetsPass() { return new LowerBitSets; }
bool LowerBitSets::doInitialization(Module &Mod) {
M = &Mod;
-
- DL = M->getDataLayout();
- if (!DL)
- report_fatal_error("Data layout required");
+ DL = &Mod.getDataLayout();
Int1Ty = Type::getInt1Ty(M->getContext());
Int8Ty = Type::getInt8Ty(M->getContext());
diff --git a/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index b91ebf2b96b..71c53efb584 100644
--- a/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -1212,8 +1212,7 @@ bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &M.getDataLayout();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 88fcd53b289..0b8b074a589 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2910,17 +2910,18 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout *DL,
return MadeIRChange;
}
-static bool combineInstructionsOverFunction(
- Function &F, InstCombineWorklist &Worklist, AssumptionCache &AC,
- TargetLibraryInfo &TLI, DominatorTree &DT, const DataLayout *DL = nullptr,
- LoopInfo *LI = nullptr) {
+static bool
+combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
+ AssumptionCache &AC, TargetLibraryInfo &TLI,
+ DominatorTree &DT, LoopInfo *LI = nullptr) {
// Minimizing size?
bool MinimizeSize = F.hasFnAttribute(Attribute::MinSize);
+ const DataLayout &DL = F.getParent()->getDataLayout();
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
IRBuilder<true, TargetFolder, InstCombineIRInserter> Builder(
- F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC));
+ F.getContext(), TargetFolder(&DL), InstCombineIRInserter(Worklist, &AC));
// Lower dbg.declare intrinsics otherwise their value may be clobbered
// by instcombiner.
@@ -2934,10 +2935,10 @@ static bool combineInstructionsOverFunction(
<< F.getName() << "\n");
bool Changed = false;
- if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist))
+ if (prepareICWorklistFromFunction(F, &DL, &TLI, Worklist))
Changed = true;
- InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, DL, LI);
+ InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, &DL, LI);
if (IC.run())
Changed = true;
@@ -2950,15 +2951,13 @@ static bool combineInstructionsOverFunction(
PreservedAnalyses InstCombinePass::run(Function &F,
AnalysisManager<Function> *AM) {
- auto *DL = F.getParent()->getDataLayout();
-
auto &AC = AM->getResult<AssumptionAnalysis>(F);
auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
auto *LI = AM->getCachedResult<LoopAnalysis>(F);
- if (!combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, DL, LI))
+ if (!combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, LI))
// No changes, all analyses are preserved.
return PreservedAnalyses::all();
@@ -3007,12 +3006,10 @@ bool InstructionCombiningPass::runOnFunction(Function &F) {
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
// Optional analyses.
- auto *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- auto *DL = DLP ? &DLP->getDataLayout() : nullptr;
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
- return combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, DL, LI);
+ return combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, LI);
}
char InstructionCombiningPass::ID = 0;
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 9cfcdfef292..6dc621a8806 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -392,7 +392,6 @@ struct AddressSanitizer : public FunctionPass {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
@@ -1321,9 +1320,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
}
bool AddressSanitizerModule::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) return false;
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
C = &(M.getContext());
int LongSize = DL->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
@@ -1399,9 +1396,7 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
GlobalsMD.init(M);
diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 2b5f39c9d43..241e172bb69 100644
--- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -49,7 +49,6 @@ namespace {
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
@@ -165,7 +164,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
- DL = &getAnalysis<DataLayoutPass>().getDataLayout();
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TrapBB = nullptr;
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 6adf0d24f8e..1171d902d32 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -422,10 +422,7 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
TargetTriple.getArch() == llvm::Triple::mips64el;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
Mod = &M;
Ctx = &M.getContext();
@@ -593,8 +590,6 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
}
bool DataFlowSanitizer::runOnModule(Module &M) {
- if (!DL)
- return false;
if (ABIList.isIn(M, "skip"))
return false;
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 41526797094..7ec6695a35f 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -449,10 +449,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 8f0bb460502..e7a0934f4a2 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -104,10 +104,6 @@ class SanitizerCoverageModule : public ModulePass {
return "SanitizerCoverageModule";
}
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
- }
-
private:
void InjectCoverageForIndirectCalls(Function &F,
ArrayRef<Instruction *> IndirCalls);
@@ -144,8 +140,8 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
bool SanitizerCoverageModule::runOnModule(Module &M) {
if (!CoverageLevel) return false;
C = &(M.getContext());
- DataLayoutPass *DLP = &getAnalysis<DataLayoutPass>();
- IntptrTy = Type::getIntNTy(*C, DLP->getDataLayout().getPointerSizeInBits());
+ auto &DL = M.getDataLayout();
+ IntptrTy = Type::getIntNTy(*C, DL.getPointerSizeInBits());
Type *VoidTy = Type::getVoidTy(*C);
IRBuilder<> IRB(*C);
Type *Int8PtrTy = PointerType::getUnqual(IRB.getInt8Ty());
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index e4a49112144..6b50ce9513b 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -230,10 +230,7 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
}
bool ThreadSanitizer::doInitialization(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
index be291a09755..5388c2c3713 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
@@ -46,6 +46,11 @@ ImmutablePass *llvm::createObjCARCAliasAnalysisPass() {
return new ObjCARCAliasAnalysis();
}
+bool ObjCARCAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
void
ObjCARCAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h b/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
index 3fcea4e9b86..3c5a021de26 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
@@ -44,9 +44,7 @@ namespace objcarc {
}
private:
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
/// This method is used when a pass implements an analysis interface through
/// multiple inheritance. If needed, it should override this to adjust the
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index 5c74885a45c..64e2c2b6281 100644
--- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -32,6 +32,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -413,8 +414,7 @@ bool AlignmentFromAssumptions::runOnFunction(Function &F) {
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
NewDestAlignments.clear();
NewSrcAlignments.clear();
diff --git a/llvm/lib/Transforms/Scalar/BDCE.cpp b/llvm/lib/Transforms/Scalar/BDCE.cpp
index c7bd79d9abb..3ae05be3dda 100644
--- a/llvm/lib/Transforms/Scalar/BDCE.cpp
+++ b/llvm/lib/Transforms/Scalar/BDCE.cpp
@@ -263,7 +263,7 @@ bool BDCE::runOnFunction(Function& F) {
return false;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DenseMap<Instruction *, APInt> AliveBits;
diff --git a/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index 29d4e05d482..9355444547b 100644
--- a/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -68,8 +68,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = F.getParent()->getDataLayout();
TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
@@ -78,7 +77,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
if (!I->use_empty()) // Don't muck with dead instructions...
- if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, &DL, TLI)) {
// Add all of the users of this instruction to the worklist, they might
// be constant propagatable now...
for (User *U : I->users())
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index 9309623380f..3f8089c5bbf 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -685,14 +685,14 @@ bool EarlyCSE::run() {
PreservedAnalyses EarlyCSEPass::run(Function &F,
AnalysisManager<Function> *AM) {
- const DataLayout *DL = F.getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
auto &TTI = AM->getResult<TargetIRAnalysis>(F);
auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
auto &AC = AM->getResult<AssumptionAnalysis>(F);
- EarlyCSE CSE(F, DL, TLI, TTI, DT, AC);
+ EarlyCSE CSE(F, &DL, TLI, TTI, DT, AC);
if (!CSE.run())
return PreservedAnalyses::all();
@@ -724,14 +724,13 @@ public:
if (skipOptnoneFunction(F))
return false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- auto *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ auto &DL = F.getParent()->getDataLayout();
auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- EarlyCSE CSE(F, DL, TLI, TTI, DT, AC);
+ EarlyCSE CSE(F, &DL, TLI, TTI, DT, AC);
return CSE.run();
}
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 73a1f259b8b..2c00d694ab7 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -2357,8 +2357,7 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 462c2b6f90d..38519ba717c 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1896,8 +1896,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 8b54abdcd5c..db4174d7083 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -159,8 +159,7 @@ bool JumpThreading::runOnFunction(Function &F) {
return false;
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
LVI = &getAnalysis<LazyValueInfo>();
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 14af38b9491..45bd122e072 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -48,6 +48,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -181,8 +182,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
diff --git a/llvm/lib/Transforms/Scalar/LoadCombine.cpp b/llvm/lib/Transforms/Scalar/LoadCombine.cpp
index 11e4d7606d9..9d543baf401 100644
--- a/llvm/lib/Transforms/Scalar/LoadCombine.cpp
+++ b/llvm/lib/Transforms/Scalar/LoadCombine.cpp
@@ -23,6 +23,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -85,12 +86,11 @@ private:
bool LoadCombine::doInitialization(Function &F) {
DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
C = &F.getContext();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) {
+ DL = &F.getParent()->getDataLayout();
+ if (!DL) {
DEBUG(dbgs() << " Skipping LoadCombine -- no target data!\n");
return false;
}
- DL = &DLP->getDataLayout();
return true;
}
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index d1f46ddf169..cb20c033484 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -130,7 +130,6 @@ namespace {
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const DataLayout *DL;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
@@ -139,7 +138,10 @@ namespace {
static char ID;
explicit LoopIdiomRecognize() : LoopPass(ID) {
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
- DL = nullptr; DT = nullptr; SE = nullptr; TLI = nullptr; TTI = nullptr;
+ DT = nullptr;
+ SE = nullptr;
+ TLI = nullptr;
+ TTI = nullptr;
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
@@ -179,14 +181,6 @@ namespace {
AU.addRequired<TargetTransformInfoWrapperPass>();
}
- const DataLayout *getDataLayout() {
- if (DL)
- return DL;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- return DL;
- }
-
DominatorTree *getDominatorTree() {
return DT ? DT
: (DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree());
@@ -625,10 +619,6 @@ bool LoopIdiomRecognize::runOnCountableLoop() {
if (BECst->getValue()->getValue() == 0)
return false;
- // We require target data for now.
- if (!getDataLayout())
- return false;
-
// set DT
(void)getDominatorTree();
@@ -742,7 +732,8 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
- uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return false;
@@ -917,7 +908,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
-
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
// If we're allowed to form a memset, and the stored value would be acceptable
@@ -928,9 +919,8 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
CurLoop->isLoopInvariant(SplatValue)) {
// Keep and use SplatValue.
PatternValue = nullptr;
- } else if (DestAS == 0 &&
- TLI->has(LibFunc::memset_pattern16) &&
- (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
+ } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) &&
+ (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
// Don't create memset_pattern16s with address spaces.
// It looks like we can use PatternValue!
SplatValue = nullptr;
@@ -971,7 +961,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
+ Type *IntPtr = Builder.getIntPtrTy(&DL, DestAS);
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
@@ -1085,7 +1075,8 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ Type *IntPtrTy = Builder.getIntPtrTy(&DL, SI->getPointerAddressSpace());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1),
diff --git a/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index 6dc600e6614..6fce2ac27c1 100644
--- a/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -77,8 +77,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
diff --git a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
index fdf7e3b1b19..300b168cea8 100644
--- a/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -1477,8 +1477,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
BasicBlock *Header = L->getHeader();
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 006b885bb30..a4d4652e0c0 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1077,8 +1077,7 @@ bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// If we don't have at least memset and memcpy, there is little point of doing
diff --git a/llvm/lib/Transforms/Scalar/SCCP.cpp b/llvm/lib/Transforms/Scalar/SCCP.cpp
index 05b9608f8a6..91fd09d3b4e 100644
--- a/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -154,7 +154,7 @@ namespace {
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const DataLayout *DL;
+ const DataLayout &DL;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -206,8 +206,8 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)
- : DL(DL), TLI(tli) {}
+ SCCPSolver(const DataLayout &DL, const TargetLibraryInfo *tli)
+ : DL(DL), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
@@ -1070,7 +1070,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
}
// Transform load from a constant into a constant if possible.
- if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL))
+ if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, &DL))
return markConstant(IV, &I, C);
// Otherwise we cannot say for certain what value this load will produce.
@@ -1561,8 +1561,7 @@ bool SCCP::runOnFunction(Function &F) {
return false;
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = F.getParent()->getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
SCCPSolver Solver(DL, TLI);
@@ -1691,8 +1690,7 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = M.getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
SCCPSolver Solver(DL, TLI);
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 3ab5973643c..09670c63080 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -4423,12 +4423,7 @@ bool SROA::runOnFunction(Function &F) {
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) {
- DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
- return false;
- }
- DL = &DLP->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
diff --git a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 5c49a5504b4..d89a5bcd63a 100644
--- a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -1032,8 +1032,7 @@ bool SROA::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
bool Changed = performPromotion(F);
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 6036c099be0..5638d20075d 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -248,8 +248,7 @@ bool Scalarizer::doInitialization(Module &M) {
}
bool Scalarizer::runOnFunction(Function &F) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
BasicBlock *BB = BBI;
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index bffe8dff4b6..3aa57711416 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -312,16 +312,12 @@ class SeparateConstOffsetFromGEP : public FunctionPass {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.setPreservesCFG();
}
bool doInitialization(Module &M) override {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (DLP == nullptr)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
return false;
}
@@ -386,7 +382,6 @@ INITIALIZE_PASS_BEGIN(
"Split GEPs to a variadic base and a constant offset for better CSE", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
INITIALIZE_PASS_END(
SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
"Split GEPs to a variadic base and a constant offset for better CSE", false,
diff --git a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index fb8fe38c8d7..ac932b659ab 100644
--- a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -181,11 +181,11 @@ SimplifyCFGPass::SimplifyCFGPass(int BonusInstThreshold)
PreservedAnalyses SimplifyCFGPass::run(Function &F,
AnalysisManager<Function> *AM) {
- auto *DL = F.getParent()->getDataLayout();
+ auto &DL = F.getParent()->getDataLayout();
auto &TTI = AM->getResult<TargetIRAnalysis>(F);
auto &AC = AM->getResult<AssumptionAnalysis>(F);
- if (!simplifyFunctionCFG(F, TTI, DL, &AC, BonusInstThreshold))
+ if (!simplifyFunctionCFG(F, TTI, &DL, &AC, BonusInstThreshold))
return PreservedAnalyses::none();
return PreservedAnalyses::all();
@@ -207,9 +207,8 @@ struct CFGSimplifyPass : public FunctionPass {
&getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
const TargetTransformInfo &TTI =
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- return simplifyFunctionCFG(F, TTI, DL, AC, BonusInstThreshold);
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ return simplifyFunctionCFG(F, TTI, &DL, AC, BonusInstThreshold);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
diff --git a/llvm/lib/Transforms/Scalar/Sink.cpp b/llvm/lib/Transforms/Scalar/Sink.cpp
index d0ee0a679b3..4ba3f1949db 100644
--- a/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -21,6 +21,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -100,8 +101,7 @@ bool Sinking::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
AA = &getAnalysis<AliasAnalysis>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
bool MadeChange, EverMadeChange = false;
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 79a96b512bf..bd6d925e8d5 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -159,7 +159,7 @@ bool TailCallElim::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
bool AllCallsAreTailCalls = false;
bool Modified = markTails(F, AllCallsAreTailCalls);
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index c2ef1ace967..83f96ee7527 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -622,8 +622,9 @@ static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
/// If the inlined function has non-byval align arguments, then
/// add @llvm.assume-based alignment assumptions to preserve this information.
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
- if (!PreserveAlignmentAssumptions || !IFI.DL)
+ if (!PreserveAlignmentAssumptions)
return;
+ auto &DL = CS.getCaller()->getParent()->getDataLayout();
// To avoid inserting redundant assumptions, we should check for assumptions
// already in the caller. To do this, we might need a DT of the caller.
@@ -645,13 +646,12 @@ static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
// If we can already prove the asserted alignment in the context of the
// caller, then don't bother inserting the assumption.
Value *Arg = CS.getArgument(I->getArgNo());
- if (getKnownAlignment(Arg, IFI.DL,
- &IFI.ACT->getAssumptionCache(*CalledFunc),
+ if (getKnownAlignment(Arg, &DL, &IFI.ACT->getAssumptionCache(*CalledFunc),
CS.getInstruction(), &DT) >= Align)
continue;
- IRBuilder<>(CS.getInstruction()).CreateAlignmentAssumption(*IFI.DL, Arg,
- Align);
+ IRBuilder<>(CS.getInstruction())
+ .CreateAlignmentAssumption(DL, Arg, Align);
}
}
}
@@ -726,11 +726,7 @@ static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
IRBuilder<> Builder(InsertBlock->begin());
- Value *Size;
- if (IFI.DL == nullptr)
- Size = ConstantExpr::getSizeOf(AggTy);
- else
- Size = Builder.getInt64(IFI.DL->getTypeStoreSize(AggTy));
+ Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
// Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer
@@ -761,7 +757,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
// If the pointer is already known to be sufficiently aligned, or if we can
// round it up to a larger alignment, then we don't need a temporary.
- if (getOrEnforceKnownAlignment(Arg, ByValAlignment, IFI.DL,
+ auto &DL = Caller->getParent()->getDataLayout();
+ if (getOrEnforceKnownAlignment(Arg, ByValAlignment, &DL,
&IFI.ACT->getAssumptionCache(*Caller),
TheCall) >= ByValAlignment)
return Arg;
@@ -771,10 +768,9 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
}
// Create the alloca. If we have DataLayout, use nice alignment.
- unsigned Align = 1;
- if (IFI.DL)
- Align = IFI.DL->getPrefTypeAlignment(AggTy);
-
+ unsigned Align =
+ Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
+
// If the byval had an alignment specified, we *must* use at least that
// alignment, as it is required by the byval argument (and uses of the
// pointer inside the callee).
@@ -1008,6 +1004,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// Keep a list of pair (dst, src) to emit byval initializations.
SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
+ auto &DL = Caller->getParent()->getDataLayout();
+
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -1042,9 +1040,9 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
- CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
+ CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
/*ModuleLevelChanges=*/false, Returns, ".i",
- &InlinedFunctionInfo, IFI.DL, TheCall);
+ &InlinedFunctionInfo, &DL, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
@@ -1065,7 +1063,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
CloneAliasScopeMetadata(CS, VMap);
// Add noalias metadata if necessary.
- AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA);
+ AddAliasScopeMetadata(CS, VMap, &DL, IFI.AA);
// FIXME: We could register any cloned assumptions instead of clearing the
// whole function's cache.
@@ -1173,18 +1171,17 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
ConstantInt *AllocaSize = nullptr;
if (ConstantInt *AIArraySize =
dyn_cast<ConstantInt>(AI->getArraySize())) {
- if (IFI.DL) {
- Type *AllocaType = AI->getAllocatedType();
- uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
- uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
- assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
- // Check that array size doesn't saturate uint64_t and doesn't
- // overflow when it's multiplied by type size.
- if (AllocaArraySize != ~0ULL &&
- UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
- AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
- AllocaArraySize * AllocaTypeSize);
- }
+ auto &DL = Caller->getParent()->getDataLayout();
+ Type *AllocaType = AI->getAllocatedType();
+ uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
+ uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
+ assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
+ // Check that array size doesn't saturate uint64_t and doesn't
+ // overflow when it's multiplied by type size.
+ if (AllocaArraySize != ~0ULL &&
+ UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
+ AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
+ AllocaArraySize * AllocaTypeSize);
}
}
@@ -1445,7 +1442,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
if (PHI) {
- if (Value *V = SimplifyInstruction(PHI, IFI.DL, nullptr, nullptr,
+ auto &DL = Caller->getParent()->getDataLayout();
+ if (Value *V = SimplifyInstruction(PHI, &DL, nullptr, nullptr,
&IFI.ACT->getAssumptionCache(*Caller))) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
diff --git a/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index a0f82684c9a..d4667f7b34b 100644
--- a/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -57,6 +57,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -797,8 +798,7 @@ bool LoopSimplify::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = getAnalysisIfAvailable<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
// Simplify each loop nest in the function.
diff --git a/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index accb7316cab..67e564187d7 100644
--- a/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -531,9 +531,8 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
if (!OuterL && !CompletelyUnroll)
OuterL = L;
if (OuterL) {
- DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL, AC);
+ const DataLayout &DL = F->getParent()->getDataLayout();
+ simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, &DL, AC);
// LCSSA must be performed on the outermost affected loop. The unrolled
// loop's last loop latch is guaranteed to be in the outermost loop after
diff --git a/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp b/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
index 55a4455b985..1b4104a4fba 100644
--- a/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -51,8 +51,7 @@ namespace {
const DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = F.getParent()->getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
AssumptionCache *AC =
@@ -73,7 +72,7 @@ namespace {
continue;
// Don't waste time simplifying unused instructions.
if (!I->use_empty())
- if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) {
+ if (Value *V = SimplifyInstruction(I, &DL, TLI, DT, AC)) {
// Mark all uses for resimplification next time round the loop.
for (User *U : I->users())
Next->insert(cast<Instruction>(U));
diff --git a/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
index 525c050cf28..ec10bc36401 100644
--- a/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -39,6 +39,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
@@ -206,8 +207,7 @@ namespace {
AA = &P->getAnalysis<AliasAnalysis>();
DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &P->getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TTI = IgnoreTargetInfo
? nullptr
: &P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
@@ -442,8 +442,7 @@ namespace {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &BB.getModule()->getDataLayout();
TTI = IgnoreTargetInfo
? nullptr
: &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 9124272aa82..686b8995a31 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1274,8 +1274,7 @@ struct LoopVectorize : public FunctionPass {
bool runOnFunction(Function &F) override {
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index f47d0089b1d..4302070fb7c 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -3064,8 +3064,7 @@ struct SLPVectorizer : public FunctionPass {
return false;
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
diff --git a/llvm/test/Analysis/Delinearization/a.ll b/llvm/test/Analysis/Delinearization/a.ll
index 956c19bc1d4..78bbfcf7de6 100644
--- a/llvm/test/Analysis/Delinearization/a.ll
+++ b/llvm/test/Analysis/Delinearization/a.ll
@@ -9,7 +9,7 @@
; AddRec: {{{(28 + (4 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(12 * %o)}<%for.j>,+,20}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(i32) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 4 bytes.
; CHECK: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<%for.j>][{7,+,5}<%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, i32* nocapture %A) #0 {
diff --git a/llvm/test/Analysis/Delinearization/himeno_1.ll b/llvm/test/Analysis/Delinearization/himeno_1.ll
index bba7b4cb0a1..5c86f57f779 100644
--- a/llvm/test/Analysis/Delinearization/himeno_1.ll
+++ b/llvm/test/Analysis/Delinearization/himeno_1.ll
@@ -28,7 +28,7 @@
; AddRec: {{{(4 + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>,+,4}<%for.k>
; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
+; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
%struct.Mat = type { float*, i32, i32, i32, i32 }
diff --git a/llvm/test/Analysis/Delinearization/himeno_2.ll b/llvm/test/Analysis/Delinearization/himeno_2.ll
index 2cf8ebc2839..e1e75609502 100644
--- a/llvm/test/Analysis/Delinearization/himeno_2.ll
+++ b/llvm/test/Analysis/Delinearization/himeno_2.ll
@@ -28,7 +28,7 @@
; AddRec: {{{(4 + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>,+,4}<%for.k>
; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
+; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
%struct.Mat = type { float*, i32, i32, i32, i32 }
diff --git a/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll b/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
index 97617b8cd98..0c893bf1137 100644
--- a/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
+++ b/llvm/test/Analysis/Delinearization/iv_times_constant_in_subscript.ll
@@ -8,9 +8,9 @@
; A[2i+b][2j] = 1.0;
; }
-; AddRec: {{((%m * %b * sizeof(double)) + %A),+,(2 * %m * sizeof(double))}<%for.i>,+,(2 * sizeof(double))}<%for.j>
+; AddRec: {{((%m * %b * 8) + %A),+,(2 * %m * 8)}<%for.i>,+,(2 * 8)}<%for.j>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; CHECK: ArrayRef[{%b,+,2}<%for.i>][{0,+,2}<%for.j>]
diff --git a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
index 88f139f0024..317e62c8ef9 100644
--- a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
+++ b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{(56 + (8 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.i>][{-4,+,1}<nw><%for.j>][{7,+,1}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
diff --git a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
index 364e5ee964d..ada7758b21b 100644
--- a/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
+++ b/llvm/test/Analysis/Delinearization/multidim_ivs_and_integer_offsets_nts_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{(56 + (8 * (-4 + (3 * %m)) * (%o + %p)) + %A),+,(8 * (%o + %p) * %m)}<%for.cond4.preheader.lr.ph.us>,+,(8 * (%o + %p))}<%for.body6.lr.ph.us.us>,+,8}<%for.body6.us.us>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of 8 bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.cond4.preheader.lr.ph.us>][{-4,+,1}<nw><%for.body6.lr.ph.us.us>][{7,+,1}<nw><%for.body6.us.us>]
define void @foo(i64 %n, i64 %m, i64 %o, i64 %p, double* nocapture %A) nounwind uwtable {
diff --git a/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll b/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
index 4cbac339023..9e37b76e59b 100644
--- a/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
+++ b/llvm/test/Analysis/Delinearization/multidim_ivs_and_parameteric_offsets_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{((8 * ((((%m * %p) + %q) * %o) + %r)) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{%p,+,1}<nw><%for.i>][{%q,+,1}<nw><%for.j>][{%r,+,1}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A, i64 %p, i64 %q, i64 %r) {
diff --git a/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll b/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
index 9df71093279..66e2348a927 100644
--- a/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
+++ b/llvm/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
@@ -10,16 +10,16 @@
; Inst: %val = load double, double* %arrayidx
; In Loop with Header: for.j
-; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j>
+; AddRec: {{0,+,(%m * 8)}<%for.i>,+,8}<%for.j>
; Base offset: %A
-; ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
; Inst: store double %val, double* %arrayidx
; In Loop with Header: for.j
; AddRec: {{%A,+,(8 * %m)}<%for.i>,+,8}<%for.j>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
define void @foo(i64 %n, i64 %m, double* %A) {
diff --git a/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll b/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
index 6120293bef0..8cb7ad56749 100644
--- a/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
+++ b/llvm/test/Analysis/Delinearization/multidim_only_ivs_3d.ll
@@ -10,7 +10,7 @@
; AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>][{0,+,1}<nuw><nsw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
diff --git a/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll b/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
index abebea7d74c..dde44ba8257 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-trip-count.ll
@@ -2,7 +2,7 @@
; ScalarEvolution should be able to understand the loop and eliminate the casts.
-; CHECK: {%d,+,sizeof(i32)}
+; CHECK: {%d,+,4}
define void @foo(i32* nocapture %d, i32 %n) nounwind {
entry:
@@ -39,7 +39,7 @@ return: ; preds = %bb1.return_crit_edge, %entry
; count, it should say so.
; PR7845
-; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.cond: max backedge-taken count is 5
@.str = private constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=2]
@@ -101,7 +101,7 @@ for.end: ; preds = %for.cond.for.end_cr
; PR19799: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
; CHECK-LABEL: @pr19799
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body.i: max backedge-taken count is 1
@a = common global i32 0, align 4
@@ -127,7 +127,7 @@ bar.exit: ; preds = %for.cond.i, %for.bo
; PR18886: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
; CHECK-LABEL: @pr18886
-; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body: max backedge-taken count is 3
@aa = global i64 0, align 8
@@ -157,8 +157,8 @@ return:
; before the check is forever skipped.
;
; CHECK-LABEL: @cannot_compute_mustexit
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
-; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
@b = common global i32 0, align 4
define i32 @cannot_compute_mustexit() {
@@ -186,7 +186,7 @@ bar.exit: ; preds = %for.cond.i, %for.bo
; MaxBECount should be the minimum of them.
;
; CHECK-LABEL: @two_mustexit
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body.i: max backedge-taken count is 1
define i32 @two_mustexit() {
entry:
diff --git a/llvm/test/Bitcode/highLevelStructure.3.2.ll b/llvm/test/Bitcode/highLevelStructure.3.2.ll
index 88fb3406d82..54356b9fb13 100644
--- a/llvm/test/Bitcode/highLevelStructure.3.2.ll
+++ b/llvm/test/Bitcode/highLevelStructure.3.2.ll
@@ -5,7 +5,7 @@
; older bitcode files.
; Data Layout Test
-; CHECK: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-f80:32-n8:16:32-S32"
+; CHECK: target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a0:0:64-f80:32:32-n8:16:32-S32"
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a0:0:64-f80:32:32-n8:16:32-S32"
; Module-Level Inline Assembly Test
diff --git a/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll b/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
index bb4bf52e33c..aa61c283556 100644
--- a/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
+++ b/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
@@ -1,6 +1,8 @@
; RUN: opt < %s -O3 | \
; RUN: llc -mtriple=thumbv7-apple-darwin10 -mattr=+neon | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
+
define void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
entry:
; -- The loop following the load should only use a single add-literation
diff --git a/llvm/test/Linker/comdat3.ll b/llvm/test/Linker/comdat3.ll
deleted file mode 100644
index 3b5db0a39a0..00000000000
--- a/llvm/test/Linker/comdat3.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llvm-link %s %p/Inputs/comdat2.ll -S -o - 2>&1 | FileCheck %s
-
-$foo = comdat largest
-@foo = global i32 43, comdat($foo)
-; CHECK: Linking COMDATs named 'foo': can't do size dependent selection without DataLayout!
diff --git a/llvm/test/Linker/datalayout.ll b/llvm/test/Linker/datalayout.ll
index 8cbfc198a68..cba28776eb5 100644
--- a/llvm/test/Linker/datalayout.ll
+++ b/llvm/test/Linker/datalayout.ll
@@ -1,13 +1,12 @@
; REQUIRES: shell
-; RUN: llvm-link %s %S/Inputs/datalayout-a.ll -S -o - 2>%t.a.err | FileCheck %s
+; RUN: llvm-link %s %S/Inputs/datalayout-a.ll -S -o - 2>%t.a.err
; RUN: (echo foo ;cat %t.a.err) | FileCheck --check-prefix=WARN-A %s
-; RUN: llvm-link %s %S/Inputs/datalayout-b.ll -S -o - 2>%t.b.err | FileCheck %s
+; RUN: llvm-link %s %S/Inputs/datalayout-b.ll -S -o - 2>%t.b.err
; RUN: cat %t.b.err | FileCheck --check-prefix=WARN-B %s
target datalayout = "e"
-; CHECK: target datalayout = "e"
; WARN-A-NOT: WARNING
diff --git a/llvm/test/Other/constant-fold-gep.ll b/llvm/test/Other/constant-fold-gep.ll
index c09ab4d87f6..c5cd9d987d7 100644
--- a/llvm/test/Other/constant-fold-gep.ll
+++ b/llvm/test/Other/constant-fold-gep.ll
@@ -1,8 +1,8 @@
-; "PLAIN" - No optimizations. This tests the target-independent
+; "PLAIN" - No optimizations. This tests the default target layout
; constant folder.
; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
-; "OPT" - Optimizations but no targetdata. This tests target-independent
+; "OPT" - Optimizations but no targetdata. This tests default target layout
; folding in the optimizers.
; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=OPT %s
@@ -10,7 +10,7 @@
; folding in the optimizers.
; RUN: opt -S -o - -instcombine -globalopt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
-; "SCEV" - ScalarEvolution but no targetdata.
+; "SCEV" - ScalarEvolution with default target layout
; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
@@ -24,12 +24,12 @@
; PLAIN: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
; PLAIN: @H8 = global i8* getelementptr (i8* null, i32 -1)
; PLAIN: @H1 = global i1* getelementptr (i1* null, i32 -1)
-; OPT: @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
-; OPT: @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
-; OPT: @F8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
-; OPT: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
-; OPT: @H8 = global i8* getelementptr (i8* null, i32 -1)
-; OPT: @H1 = global i1* getelementptr (i1* null, i32 -1)
+; OPT: @G8 = global i8* null
+; OPT: @G1 = global i1* null
+; OPT: @F8 = global i8* inttoptr (i64 -1 to i8*)
+; OPT: @F1 = global i1* inttoptr (i64 -1 to i1*)
+; OPT: @H8 = global i8* inttoptr (i64 -1 to i8*)
+; OPT: @H1 = global i1* inttoptr (i64 -1 to i1*)
; TO: @G8 = global i8* null
; TO: @G1 = global i1* null
; TO: @F8 = global i8* inttoptr (i64 -1 to i8*)
@@ -57,15 +57,15 @@
; PLAIN: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
; PLAIN: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
-; OPT: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
-; OPT: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
-; OPT: @c = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
-; OPT: @d = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
-; OPT: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
+; OPT: @a = constant i64 18480
+; OPT: @b = constant i64 8
+; OPT: @c = constant i64 16
+; OPT: @d = constant i64 88
+; OPT: @e = constant i64 16
; OPT: @f = constant i64 1
-; OPT: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
-; OPT: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
-; OPT: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
+; OPT: @g = constant i64 8
+; OPT: @h = constant i64 8
+; OPT: @i = constant i64 8
; TO: @a = constant i64 18480
; TO: @b = constant i64 8
; TO: @c = constant i64 16
@@ -91,9 +91,9 @@
; PLAIN: @M = constant i64* getelementptr (i64* null, i32 1)
; PLAIN: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
; PLAIN: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
-; OPT: @M = constant i64* getelementptr (i64* null, i32 1)
-; OPT: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
-; OPT: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
+; OPT: @M = constant i64* inttoptr (i64 8 to i64*)
+; OPT: @N = constant i64* inttoptr (i64 8 to i64*)
+; OPT: @O = constant i64* inttoptr (i64 8 to i64*)
; TO: @M = constant i64* inttoptr (i64 8 to i64*)
; TO: @N = constant i64* inttoptr (i64 8 to i64*)
; TO: @O = constant i64* inttoptr (i64 8 to i64*)
@@ -107,7 +107,7 @@
; PLAIN: @Y = global [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 2)
; PLAIN: @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
; OPT: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }]* @ext, i64 2)
-; OPT: @Z = global i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
+; OPT: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; TO: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }]* @ext, i64 2)
; TO: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
@@ -143,22 +143,22 @@
; PLAIN: ret i1* %t
; PLAIN: }
; OPT: define i8* @goo8() #0 {
-; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
+; OPT: ret i8* null
; OPT: }
; OPT: define i1* @goo1() #0 {
-; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
+; OPT: ret i1* null
; OPT: }
; OPT: define i8* @foo8() #0 {
-; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
+; OPT: ret i8* inttoptr (i64 -1 to i8*)
; OPT: }
; OPT: define i1* @foo1() #0 {
-; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
+; OPT: ret i1* inttoptr (i64 -1 to i1*)
; OPT: }
; OPT: define i8* @hoo8() #0 {
-; OPT: ret i8* getelementptr (i8* null, i32 -1)
+; OPT: ret i8* inttoptr (i64 -1 to i8*)
; OPT: }
; OPT: define i1* @hoo1() #0 {
-; OPT: ret i1* getelementptr (i1* null, i32 -1)
+; OPT: ret i1* inttoptr (i64 -1 to i1*)
; OPT: }
; TO: define i8* @goo8() #0 {
; TO: ret i8* null
@@ -180,20 +180,20 @@
; TO: }
; SCEV: Classifying expressions for: @goo8
; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
-; SCEV: --> ((-1 * sizeof(i8)) + inttoptr (i32 1 to i8*))
+; SCEV: --> (-1 + inttoptr (i32 1 to i8*))
; SCEV: Classifying expressions for: @goo1
; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
-; SCEV: --> ((-1 * sizeof(i1)) + inttoptr (i32 1 to i1*))
+; SCEV: --> (-1 + inttoptr (i32 1 to i1*))
; SCEV: Classifying expressions for: @foo8
; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
-; SCEV: --> ((-2 * sizeof(i8)) + inttoptr (i32 1 to i8*))
+; SCEV: --> (-2 + inttoptr (i32 1 to i8*))
; SCEV: Classifying expressions for: @foo1
; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
-; SCEV: --> ((-2 * sizeof(i1)) + inttoptr (i32 1 to i1*))
+; SCEV: --> (-2 + inttoptr (i32 1 to i1*))
; SCEV: Classifying expressions for: @hoo8
-; SCEV: --> (-1 * sizeof(i8))
+; SCEV: --> -1
; SCEV: Classifying expressions for: @hoo1
-; SCEV: --> (-1 * sizeof(i1))
+; SCEV: --> -1
define i8* @goo8() nounwind {
%t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
@@ -257,31 +257,31 @@ define i1* @hoo1() nounwind {
; PLAIN: ret i64 %t
; PLAIN: }
; OPT: define i64 @fa() #0 {
-; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
+; OPT: ret i64 18480
; OPT: }
; OPT: define i64 @fb() #0 {
-; OPT: ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; OPT: define i64 @fc() #0 {
-; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
+; OPT: ret i64 16
; OPT: }
; OPT: define i64 @fd() #0 {
-; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
+; OPT: ret i64 88
; OPT: }
; OPT: define i64 @fe() #0 {
-; OPT: ret i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
+; OPT: ret i64 16
; OPT: }
; OPT: define i64 @ff() #0 {
; OPT: ret i64 1
; OPT: }
; OPT: define i64 @fg() #0 {
-; OPT: ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; OPT: define i64 @fh() #0 {
-; OPT: ret i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; OPT: define i64 @fi() #0 {
-; OPT: ret i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; TO: define i64 @fa() #0 {
; TO: ret i64 18480
@@ -311,22 +311,22 @@ define i1* @hoo1() nounwind {
; TO: ret i64 8
; TO: }
; SCEV: Classifying expressions for: @fa
-; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
+; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
; SCEV: --> (2310 * sizeof(double))
; SCEV: Classifying expressions for: @fb
-; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
+; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
; SCEV: --> alignof(double)
; SCEV: Classifying expressions for: @fc
-; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
+; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
; SCEV: --> (2 * sizeof(double))
; SCEV: Classifying expressions for: @fd
-; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
+; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
; SCEV: --> (11 * sizeof(double))
; SCEV: Classifying expressions for: @fe
-; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
+; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
; SCEV: --> offsetof({ double, float, double, double }, 2)
; SCEV: Classifying expressions for: @ff
-; SCEV: %t = bitcast i64 1 to i64
+; SCEV: %t = bitcast i64 1 to i64
; SCEV: --> 1
; SCEV: Classifying expressions for: @fg
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
@@ -388,13 +388,13 @@ define i64 @fi() nounwind {
; PLAIN: ret i64* %t
; PLAIN: }
; OPT: define i64* @fM() #0 {
-; OPT: ret i64* getelementptr (i64* null, i32 1)
+; OPT: ret i64* inttoptr (i64 8 to i64*)
; OPT: }
; OPT: define i64* @fN() #0 {
-; OPT: ret i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
+; OPT: ret i64* inttoptr (i64 8 to i64*)
; OPT: }
; OPT: define i64* @fO() #0 {
-; OPT: ret i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
+; OPT: ret i64* inttoptr (i64 8 to i64*)
; OPT: }
; TO: define i64* @fM() #0 {
; TO: ret i64* inttoptr (i64 8 to i64*)
@@ -406,14 +406,14 @@ define i64 @fi() nounwind {
; TO: ret i64* inttoptr (i64 8 to i64*)
; TO: }
; SCEV: Classifying expressions for: @fM
-; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
-; SCEV: --> sizeof(i64)
+; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
+; SCEV: --> 8
; SCEV: Classifying expressions for: @fN
-; SCEV: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
-; SCEV: --> sizeof(i64)
+; SCEV: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
+; SCEV: --> 8
; SCEV: Classifying expressions for: @fO
-; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
-; SCEV: --> sizeof(i64)
+; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
+; SCEV: --> 8
define i64* @fM() nounwind {
%t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
@@ -433,14 +433,14 @@ define i64* @fO() nounwind {
; PLAIN: ret i32* %t
; PLAIN: }
; OPT: define i32* @fZ() #0 {
-; OPT: ret i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
+; OPT: ret i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; OPT: }
; TO: define i32* @fZ() #0 {
; TO: ret i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; TO: }
; SCEV: Classifying expressions for: @fZ
; SCEV: %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
-; SCEV: --> ((3 * sizeof(i32)) + @ext)
+; SCEV: --> (12 + @ext)
define i32* @fZ() nounwind {
%t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
@@ -457,14 +457,14 @@ define i8* @different_addrspace() nounwind noinline {
%p = getelementptr inbounds i8, i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
i32 2
ret i8* %p
-; OPT: ret i8* getelementptr (i8* addrspacecast (i8 addrspace(12)* getelementptr inbounds ([4 x i8] addrspace(12)* @p12, i32 0, i32 0) to i8*), i32 2)
+; OPT: ret i8* getelementptr ([4 x i8]* addrspacecast ([4 x i8] addrspace(12)* @p12 to [4 x i8]*), i64 0, i64 2)
}
define i8* @same_addrspace() nounwind noinline {
; OPT: same_addrspace
%p = getelementptr inbounds i8, i8* bitcast ([4 x i8] * @p0 to i8*), i32 2
ret i8* %p
-; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i32 0, i32 2)
+; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i64 0, i64 2)
}
@gv1 = internal global i32 1
diff --git a/llvm/test/Transforms/ConstantMerge/merge-both.ll b/llvm/test/Transforms/ConstantMerge/merge-both.ll
index 316267648f1..11b0621d42d 100644
--- a/llvm/test/Transforms/ConstantMerge/merge-both.ll
+++ b/llvm/test/Transforms/ConstantMerge/merge-both.ll
@@ -1,7 +1,6 @@
; RUN: opt -constmerge -S < %s | FileCheck %s
; Test that in one run var3 is merged into var2 and var1 into var4.
-; Test that we merge @var5 and @var6 into one with the higher alignment, and
-; don't merge var7/var8 into var5/var6.
+; Test that we merge @var5 and @var6 into one with the higher alignment
declare void @zed(%struct.foobar*, %struct.foobar*)
@@ -23,8 +22,8 @@ declare void @helper([16 x i8]*)
@var7 = internal constant [16 x i8] c"foo1bar2foo3bar\00"
@var8 = private unnamed_addr constant [16 x i8] c"foo1bar2foo3bar\00"
-; CHECK-NEXT: @var6 = private constant [16 x i8] c"foo1bar2foo3bar\00", align 16
-; CHECK-NEXT: @var8 = private constant [16 x i8] c"foo1bar2foo3bar\00"
+; CHECK-NEXT: @var7 = internal constant [16 x i8] c"foo1bar2foo3bar\00"
+; CHECK-NEXT: @var8 = private constant [16 x i8] c"foo1bar2foo3bar\00", align 16
@var4a = alias %struct.foobar* @var4
@llvm.used = appending global [1 x %struct.foobar*] [%struct.foobar* @var4a], section "llvm.metadata"
diff --git a/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll b/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
index d4bf8032cda..7da65092dff 100644
--- a/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
+++ b/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
@@ -26,7 +26,7 @@ define internal void @_GLOBAL__I_a() section "__TEXT,__StaticInit,regular,pure_i
declare void @test(i8*)
define void @print() {
-; CHECK: %1 = load i8*, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
+; CHECK: %1 = load i8*, i8** @_ZL14buttonInitData.0.0, align 4
%1 = load i8*, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
call void @test(i8* %1)
ret void
diff --git a/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll b/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
index 4c0c3279232..58934f5ef84 100644
--- a/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
@@ -1,6 +1,10 @@
; RUN: opt < %s -indvars -S | grep sext | count 1
; ModuleID = '<stdin>'
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
%struct.App1Marker = type <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }>
%struct.ComponentInstanceRecord = type <{ [1 x i32] }>
%struct.DCPredictors = type { [5 x i16] }
diff --git a/llvm/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll b/llvm/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll
index 47164d86047..44c43a3a043 100644
--- a/llvm/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2009-04-27-Floating.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -indvars -S | FileCheck %s
; PR4086
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
declare void @foo()
define void @test() {
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-widen.ll b/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
index 2b69cb151d2..464b03ce559 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -indvars -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
target triple = "x86_64-apple-darwin"
; CHECK-LABEL: @sloop
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-extend-const.ll b/llvm/test/Transforms/IndVarSimplify/lftr-extend-const.ll
index f12c68cacb6..fa3166d4a3e 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr-extend-const.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr-extend-const.ll
@@ -1,5 +1,9 @@
;RUN: opt -S %s -indvars | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; CHECK-LABEL: @foo(
; CHECK-NOT: %lftr.wideiv = trunc i32 %indvars.iv.next to i16
; CHECK: %exitcond = icmp ne i32 %indvars.iv.next, 512
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr_simple.ll b/llvm/test/Transforms/IndVarSimplify/lftr_simple.ll
index e373013609b..6a8d9372183 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr_simple.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr_simple.ll
@@ -1,7 +1,11 @@
-; LFTR should eliminate the need for the computation of i*i completely. It
+; LFTR should eliminate the need for the computation of i*i completely. It
; is only used to compute the exit value.
; RUN: opt < %s -indvars -dce -S | not grep mul
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
@A = external global i32 ; <i32*> [#uses=1]
define i32 @quadratic_setlt() {
diff --git a/llvm/test/Transforms/IndVarSimplify/pr20680.ll b/llvm/test/Transforms/IndVarSimplify/pr20680.ll
index 0713f31783a..2c9eb54a789 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr20680.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr20680.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -indvars -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
@a = common global i32 0, align 4
@c = common global i32 0, align 4
@b = common global i32 0, align 4
diff --git a/llvm/test/Transforms/IndVarSimplify/preserve-signed-wrap.ll b/llvm/test/Transforms/IndVarSimplify/preserve-signed-wrap.ll
index 1fdbc63762c..c8d34acb24a 100644
--- a/llvm/test/Transforms/IndVarSimplify/preserve-signed-wrap.ll
+++ b/llvm/test/Transforms/IndVarSimplify/preserve-signed-wrap.ll
@@ -4,6 +4,10 @@
; sext for the addressing, however it shouldn't eliminate the sext
; on the other phi, since that value undergoes signed wrapping.
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @foo(i32* nocapture %d, i32 %n) nounwind {
entry:
%0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll b/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
index b6765011336..f2afaf42bc9 100644
--- a/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
+++ b/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
@@ -1,5 +1,8 @@
-; RUN: opt < %s -indvars -S > %t
-; RUN: not grep sext %t
+; RUN: opt < %s -indvars -S | not grep sext
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
define i64 @test(i64* nocapture %first, i32 %count) nounwind readonly {
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/signed-trip-count.ll b/llvm/test/Transforms/IndVarSimplify/signed-trip-count.ll
index bbcc9c51141..992e22e2a96 100644
--- a/llvm/test/Transforms/IndVarSimplify/signed-trip-count.ll
+++ b/llvm/test/Transforms/IndVarSimplify/signed-trip-count.ll
@@ -2,6 +2,10 @@
; RUN: not grep sext %t
; RUN: grep phi %t | count 1
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @foo(i64* nocapture %x, i32 %n) nounwind {
entry:
%tmp102 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll b/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
index 642d1ba205f..7272ef1ec13 100644
--- a/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
+++ b/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
@@ -1,5 +1,9 @@
; RUN: opt -S -indvars < %s | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @test1(float* nocapture %autoc, float* nocapture %data, float %d, i32 %data_len, i32 %sample) nounwind {
entry:
%sub = sub i32 %data_len, %sample
diff --git a/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll b/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll
index 6be2238ce9a..eb81ceb700b 100644
--- a/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll
+++ b/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll
@@ -1,6 +1,10 @@
; RUN: opt < %s -indvars -S | FileCheck %s
target triple = "aarch64--linux-gnu"
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; Check the loop exit i32 compare instruction and operand are widened to i64
; instead of truncating IV before its use in the i32 compare instruction.
diff --git a/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll b/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll
deleted file mode 100644
index d51c5626747..00000000000
--- a/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll
+++ /dev/null
@@ -1,88 +0,0 @@
-; RUN: opt < %s -inline -S | FileCheck %s
-; This variant of the test has no data layout information.
-target triple = "powerpc64-unknown-linux-gnu"
-
-%struct.s = type { i32, i32 }
-
-define void @foo(%struct.s* byval nocapture readonly %a) {
-entry:
- %x = alloca [2 x i32], align 4
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx) #2
- ret void
-}
-
-define void @foo0(%struct.s* byval nocapture readonly %a) {
-entry:
- %x = alloca [2 x i32]
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx) #2
- ret void
-}
-
-declare void @bar(i32*) #1
-
-define void @goo(%struct.s* byval nocapture readonly %a) {
-entry:
- %x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx) #2
- ret void
-}
-
-; CHECK-LABEL: @main
-; CHECK: alloca [2 x i32], align 32
-; CHECK-NOT: alloca [2 x i32]
-; CHECK: ret i32 0
-
-define signext i32 @main() {
-entry:
- %a = alloca i64, align 8
- %tmpcast = bitcast i64* %a to %struct.s*
- store i64 0, i64* %a, align 8
- %a1 = bitcast i64* %a to i32*
- store i32 1, i32* %a1, align 8
- call void @foo(%struct.s* byval %tmpcast)
- store i32 2, i32* %a1, align 8
- call void @goo(%struct.s* byval %tmpcast)
- ret i32 0
-}
-
-; CHECK-LABEL: @test0
-; CHECK: alloca [2 x i32], align 32
-; CHECK: alloca [2 x i32]
-; CHECK: ret i32 0
-
-define signext i32 @test0() {
-entry:
- %a = alloca i64, align 8
- %tmpcast = bitcast i64* %a to %struct.s*
- store i64 0, i64* %a, align 8
- %a1 = bitcast i64* %a to i32*
- store i32 1, i32* %a1, align 8
- call void @foo0(%struct.s* byval %tmpcast)
- store i32 2, i32* %a1, align 8
- call void @goo(%struct.s* byval %tmpcast)
- ret i32 0
-}
diff --git a/llvm/test/Transforms/Inline/alloca-merge-align.ll b/llvm/test/Transforms/Inline/alloca-merge-align.ll
index ef053a7ed76..70b94f7b69a 100644
--- a/llvm/test/Transforms/Inline/alloca-merge-align.ll
+++ b/llvm/test/Transforms/Inline/alloca-merge-align.ll
@@ -102,21 +102,3 @@ entry:
call void @goo(%struct.s* byval %tmpcast)
ret i32 0
}
-
-; CHECK-LABEL: @test1
-; CHECK: {{alloca \[2 x i32\]$}}
-; CHECK-NOT: alloca [2 x i32]
-; CHECK: ret i32 0
-
-define signext i32 @test1() {
-entry:
- %a = alloca i64, align 8
- %tmpcast = bitcast i64* %a to %struct.s*
- store i64 0, i64* %a, align 8
- %a1 = bitcast i64* %a to i32*
- store i32 1, i32* %a1, align 8
- call void @foo0(%struct.s* byval %tmpcast)
- store i32 2, i32* %a1, align 8
- call void @foo1(%struct.s* byval %tmpcast)
- ret i32 0
-}
diff --git a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
index 5abb77f2c3f..4307064e194 100644
--- a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
+++ b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
@@ -8,13 +8,13 @@ define void @helper() {
ret void
}
-; Size in llvm.lifetime.X should be -1 (unknown).
+; Size in llvm.lifetime.X should be 1 (default for i8).
define void @test() {
; CHECK-LABEL: @test(
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 -1
+; CHECK: llvm.lifetime.start(i64 1
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 -1
+; CHECK: llvm.lifetime.end(i64 1
call void @helper()
; CHECK-NOT: lifetime
; CHECK: ret void
diff --git a/llvm/test/Transforms/InstCombine/2012-09-24-MemcpyFromGlobalCrash.ll b/llvm/test/Transforms/InstCombine/2012-09-24-MemcpyFromGlobalCrash.ll
deleted file mode 100644
index 570da9be394..00000000000
--- a/llvm/test/Transforms/InstCombine/2012-09-24-MemcpyFromGlobalCrash.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-; Check we don't crash due to lack of target data.
-
-@G = constant [100 x i8] zeroinitializer
-
-declare void @bar(i8*)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-
-define void @test() {
-; CHECK-LABEL: @test(
-; CHECK: llvm.memcpy
-; CHECK: ret void
- %A = alloca [100 x i8]
- %a = getelementptr inbounds [100 x i8], [100 x i8]* %A, i64 0, i64 0
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* getelementptr inbounds ([100 x i8]* @G, i64 0, i32 0), i64 100, i32 4, i1 false)
- call void @bar(i8* %a) readonly
- ret void
-}
diff --git a/llvm/test/Transforms/InstCombine/extractvalue.ll b/llvm/test/Transforms/InstCombine/extractvalue.ll
index 4dc1545cb41..6319590873a 100644
--- a/llvm/test/Transforms/InstCombine/extractvalue.ll
+++ b/llvm/test/Transforms/InstCombine/extractvalue.ll
@@ -13,7 +13,7 @@ define i32 @foo(i32 %a, i32 %b) {
%s1 = insertvalue {i32, i32} %s1.1, i32 %b, 1
%v1 = extractvalue {i32, i32} %s1, 0
%v2 = extractvalue {i32, i32} %s1, 1
-
+
; Build a nested struct and pull a sub struct out of it
; This requires instcombine to insert a few insertvalue instructions
%ns1.1 = insertvalue {i32, {i32, i32}} undef, i32 %v1, 0
@@ -40,7 +40,7 @@ define i32 @foo(i32 %a, i32 %b) {
}
; CHECK-LABEL: define i32 @extract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i32 0, i32 1
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i64 0, i32 1
; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
; CHECK-NEXT: store
; CHECK-NEXT: br label %loop
@@ -68,7 +68,7 @@ end:
}
; CHECK-LABEL: define i32 @doubleextract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i32 0, i32 1, i32 1
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i64 0, i32 1, i32 1
; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
; CHECK-NEXT: ret i32 [[LOAD]]
define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) {
diff --git a/llvm/test/Transforms/InstCombine/gc.relocate.ll b/llvm/test/Transforms/InstCombine/gc.relocate.ll
index 8fbb752f891..4a7ea2c4234 100644
--- a/llvm/test/Transforms/InstCombine/gc.relocate.ll
+++ b/llvm/test/Transforms/InstCombine/gc.relocate.ll
@@ -1,4 +1,5 @@
-; RUN: opt < %s -datalayout -instcombine -S | FileCheck %s
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
; Uses InstCombine with DataLayout to propagate dereferenceable
; attribute via gc.relocate: if the derived ptr is dereferenceable(N),
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index 7f4e2806c80..b0bfdc4c4c5 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -1,5 +1,4 @@
-; RUN: opt -instcombine -S < %s | FileCheck -check-prefix=NODL %s
-; RUN: opt -instcombine -S -default-data-layout="p:32:32:32-p1:16:16:16-n8:16:32:64" < %s | FileCheck -check-prefix=P32 %s
+; RUN: opt -instcombine -S -default-data-layout="p:32:32:32-p1:16:16:16-n8:16:32:64" < %s | FileCheck %s
@G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85,
i16 73, i16 82, i16 69, i16 68, i16 0]
@@ -25,13 +24,9 @@ define i1 @test1(i32 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
-; NODL-LABEL: @test1(
-; NODL-NEXT: %R = icmp eq i32 %X, 9
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test1(
-; P32-NEXT: %R = icmp eq i32 %X, 9
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
}
define i1 @test1_noinbounds(i32 %X) {
@@ -39,12 +34,9 @@ define i1 @test1_noinbounds(i32 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
-; NODL-LABEL: @test1_noinbounds(
-; NODL-NEXT: %P = getelementptr [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
-
-; P32-LABEL: @test1_noinbounds(
-; P32-NEXT: %R = icmp eq i32 %X, 9
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test1_noinbounds(
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
}
define i1 @test1_noinbounds_i64(i64 %X) {
@@ -52,12 +44,9 @@ define i1 @test1_noinbounds_i64(i64 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
-; NODL-LABEL: @test1_noinbounds_i64(
-; NODL-NEXT: %P = getelementptr [10 x i16], [10 x i16]* @G16, i64 0, i64 %X
-
-; P32-LABEL: @test1_noinbounds_i64(
-; P32: %R = icmp eq i32 %1, 9
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test1_noinbounds_i64(
+; CHECK: %R = icmp eq i32 %1, 9
+; CHECK-NEXT: ret i1 %R
}
define i1 @test1_noinbounds_as1(i32 %x) {
@@ -66,10 +55,10 @@ define i1 @test1_noinbounds_as1(i32 %x) {
%r = icmp eq i16 %q, 0
ret i1 %r
-; P32-LABEL: @test1_noinbounds_as1(
-; P32-NEXT: trunc i32 %x to i16
-; P32-NEXT: %r = icmp eq i16 %1, 9
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test1_noinbounds_as1(
+; CHECK-NEXT: trunc i32 %x to i16
+; CHECK-NEXT: %r = icmp eq i16 %1, 9
+; CHECK-NEXT: ret i1 %r
}
define i1 @test2(i32 %X) {
@@ -77,9 +66,9 @@ define i1 @test2(i32 %X) {
%Q = load i16, i16* %P
%R = icmp slt i16 %Q, 85
ret i1 %R
-; NODL-LABEL: @test2(
-; NODL-NEXT: %R = icmp ne i32 %X, 4
-; NODL-NEXT: ret i1 %R
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: %R = icmp ne i32 %X, 4
+; CHECK-NEXT: ret i1 %R
}
define i1 @test3(i32 %X) {
@@ -87,13 +76,9 @@ define i1 @test3(i32 %X) {
%Q = load double, double* %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
-; NODL-LABEL: @test3(
-; NODL-NEXT: %R = icmp eq i32 %X, 1
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test3(
-; P32-NEXT: %R = icmp eq i32 %X, 1
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: %R = icmp eq i32 %X, 1
+; CHECK-NEXT: ret i1 %R
}
@@ -102,17 +87,11 @@ define i1 @test4(i32 %X) {
%Q = load i16, i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
-; NODL-LABEL: @test4(
-; NODL-NEXT: lshr i32 933, %X
-; NODL-NEXT: and i32 {{.*}}, 1
-; NODL-NEXT: %R = icmp ne i32 {{.*}}, 0
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test4(
-; P32-NEXT: lshr i32 933, %X
-; P32-NEXT: and i32 {{.*}}, 1
-; P32-NEXT: %R = icmp ne i32 {{.*}}, 0
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: lshr i32 933, %X
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
}
define i1 @test4_i16(i16 %X) {
@@ -120,19 +99,12 @@ define i1 @test4_i16(i16 %X) {
%Q = load i16, i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
-
-; NODL-LABEL: @test4_i16(
-; NODL-NEXT: lshr i16 933, %X
-; NODL-NEXT: and i16 {{.*}}, 1
-; NODL-NEXT: %R = icmp ne i16 {{.*}}, 0
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test4_i16(
-; P32-NEXT: sext i16 %X to i32
-; P32-NEXT: lshr i32 933, %1
-; P32-NEXT: and i32 {{.*}}, 1
-; P32-NEXT: %R = icmp ne i32 {{.*}}, 0
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test4_i16(
+; CHECK-NEXT: sext i16 %X to i32
+; CHECK-NEXT: lshr i32 933, %1
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
}
define i1 @test5(i32 %X) {
@@ -140,17 +112,11 @@ define i1 @test5(i32 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 69
ret i1 %R
-; NODL-LABEL: @test5(
-; NODL-NEXT: icmp eq i32 %X, 2
-; NODL-NEXT: icmp eq i32 %X, 7
-; NODL-NEXT: %R = or i1
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test5(
-; P32-NEXT: icmp eq i32 %X, 2
-; P32-NEXT: icmp eq i32 %X, 7
-; P32-NEXT: %R = or i1
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: icmp eq i32 %X, 2
+; CHECK-NEXT: icmp eq i32 %X, 7
+; CHECK-NEXT: %R = or i1
+; CHECK-NEXT: ret i1 %R
}
define i1 @test6(i32 %X) {
@@ -158,15 +124,10 @@ define i1 @test6(i32 %X) {
%Q = load double, double* %P
%R = fcmp ogt double %Q, 0.0
ret i1 %R
-; NODL-LABEL: @test6(
-; NODL-NEXT: add i32 %X, -1
-; NODL-NEXT: %R = icmp ult i32 {{.*}}, 3
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test6(
-; P32-NEXT: add i32 %X, -1
-; P32-NEXT: %R = icmp ult i32 {{.*}}, 3
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 3
+; CHECK-NEXT: ret i1 %R
}
define i1 @test7(i32 %X) {
@@ -174,15 +135,10 @@ define i1 @test7(i32 %X) {
%Q = load double, double* %P
%R = fcmp olt double %Q, 0.0
ret i1 %R
-; NODL-LABEL: @test7(
-; NODL-NEXT: add i32 %X, -1
-; NODL-NEXT: %R = icmp ugt i32 {{.*}}, 2
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test7(
-; P32-NEXT: add i32 %X, -1
-; P32-NEXT: %R = icmp ugt i32 {{.*}}, 2
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ugt i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
}
define i1 @test8(i32 %X) {
@@ -191,15 +147,10 @@ define i1 @test8(i32 %X) {
%R = and i16 %Q, 3
%S = icmp eq i16 %R, 0
ret i1 %S
-; NODL-LABEL: @test8(
-; NODL-NEXT: and i32 %X, -2
-; NODL-NEXT: icmp eq i32 {{.*}}, 8
-; NODL-NEXT: ret i1
-
-; P32-LABEL: @test8(
-; P32-NEXT: and i32 %X, -2
-; P32-NEXT: icmp eq i32 {{.*}}, 8
-; P32-NEXT: ret i1
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: and i32 %X, -2
+; CHECK-NEXT: icmp eq i32 {{.*}}, 8
+; CHECK-NEXT: ret i1
}
@GA = internal constant [4 x { i32, i32 } ] [
@@ -214,23 +165,15 @@ define i1 @test9(i32 %X) {
%Q = load i32, i32* %P
%R = icmp eq i32 %Q, 1
ret i1 %R
-; NODL-LABEL: @test9(
-; NODL-NEXT: add i32 %X, -1
-; NODL-NEXT: icmp ult i32 {{.*}}, 2
-; NODL-NEXT: ret i1
-
-; P32-LABEL: @test9(
-; P32-NEXT: add i32 %X, -1
-; P32-NEXT: icmp ult i32 {{.*}}, 2
-; P32-NEXT: ret i1
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1
}
define i1 @test10_struct(i32 %x) {
-; NODL-LABEL: @test10_struct(
-; NODL: getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
-
-; P32-LABEL: @test10_struct(
-; P32: ret i1 false
+; CHECK-LABEL: @test10_struct(
+; CHECK: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -238,11 +181,8 @@ define i1 @test10_struct(i32 %x) {
}
define i1 @test10_struct_noinbounds(i32 %x) {
-; NODL-LABEL: @test10_struct_noinbounds(
-; NODL: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
-
-; P32-LABEL: @test10_struct_noinbounds(
-; P32: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
+; CHECK-LABEL: @test10_struct_noinbounds(
+; CHECK: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
%p = getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -252,11 +192,8 @@ define i1 @test10_struct_noinbounds(i32 %x) {
; Test that the GEP indices are converted before we ever get here
; Index < ptr size
define i1 @test10_struct_i16(i16 %x){
-; NODL-LABEL: @test10_struct_i16(
-; NODL: getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
-
-; P32-LABEL: @test10_struct_i16(
-; P32: ret i1 false
+; CHECK-LABEL: @test10_struct_i16(
+; CHECK: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 0
@@ -266,11 +203,8 @@ define i1 @test10_struct_i16(i16 %x){
; Test that the GEP indices are converted before we ever get here
; Index > ptr size
define i1 @test10_struct_i64(i64 %x){
-; NODL-LABEL: @test10_struct_i64(
-; NODL: getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
-
-; P32-LABEL: @test10_struct_i64(
-; P32: ret i1 false
+; CHECK-LABEL: @test10_struct_i64(
+; CHECK: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 0
@@ -278,12 +212,9 @@ define i1 @test10_struct_i64(i64 %x){
}
define i1 @test10_struct_noinbounds_i16(i16 %x) {
-; NODL-LABEL: @test10_struct_noinbounds_i16(
-; NODL: getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
-
-; P32-LABEL: @test10_struct_noinbounds_i16(
-; P32: %1 = sext i16 %x to i32
-; P32: getelementptr %Foo, %Foo* @GS, i32 %1, i32 0
+; CHECK-LABEL: @test10_struct_noinbounds_i16(
+; CHECK: %1 = sext i16 %x to i32
+; CHECK: getelementptr %Foo, %Foo* @GS, i32 %1, i32 0
%p = getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 0
@@ -291,13 +222,9 @@ define i1 @test10_struct_noinbounds_i16(i16 %x) {
}
define i1 @test10_struct_arr(i32 %x) {
-; NODL-LABEL: @test10_struct_arr(
-; NODL-NEXT: %r = icmp ne i32 %x, 1
-; NODL-NEXT: ret i1 %r
-
-; P32-LABEL: @test10_struct_arr(
-; P32-NEXT: %r = icmp ne i32 %x, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr(
+; CHECK-NEXT: %r = icmp ne i32 %x, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -305,11 +232,8 @@ define i1 @test10_struct_arr(i32 %x) {
}
define i1 @test10_struct_arr_noinbounds(i32 %x) {
-; NODL-LABEL: @test10_struct_arr_noinbounds(
-; NODL-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
-
-; P32-LABEL: @test10_struct_arr_noinbounds(
-; P32-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
+; CHECK-LABEL: @test10_struct_arr_noinbounds(
+; CHECK-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -317,13 +241,9 @@ define i1 @test10_struct_arr_noinbounds(i32 %x) {
}
define i1 @test10_struct_arr_i16(i16 %x) {
-; NODL-LABEL: @test10_struct_arr_i16(
-; NODL-NEXT: %r = icmp ne i16 %x, 1
-; NODL-NEXT: ret i1 %r
-
-; P32-LABEL: @test10_struct_arr_i16(
-; P32-NEXT: %r = icmp ne i16 %x, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr_i16(
+; CHECK-NEXT: %r = icmp ne i16 %x, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i16 0, i16 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -331,14 +251,10 @@ define i1 @test10_struct_arr_i16(i16 %x) {
}
define i1 @test10_struct_arr_i64(i64 %x) {
-; NODL-LABEL: @test10_struct_arr_i64(
-; NODL-NEXT: %r = icmp ne i64 %x, 1
-; NODL-NEXT: ret i1 %r
-
-; P32-LABEL: @test10_struct_arr_i64(
-; P32-NEXT: trunc i64 %x to i32
-; P32-NEXT: %r = icmp ne i32 %1, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr_i64(
+; CHECK-NEXT: trunc i64 %x to i32
+; CHECK-NEXT: %r = icmp ne i32 %1, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i64 0, i64 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -346,11 +262,8 @@ define i1 @test10_struct_arr_i64(i64 %x) {
}
define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
-; NODL-LABEL: @test10_struct_arr_noinbounds_i16(
-; NODL-NEXT: %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
-
-; P32-LABEL: @test10_struct_arr_noinbounds_i16(
-; P32-NEXT: %r = icmp ne i16 %x, 1
+; CHECK-LABEL: @test10_struct_arr_noinbounds_i16(
+; CHECK-NEXT: %r = icmp ne i16 %x, 1
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
@@ -358,13 +271,9 @@ define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
}
define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
-; FIXME: Should be no trunc?
-; NODL-LABEL: @test10_struct_arr_noinbounds_i64(
-; NODL-NEXT: %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
-
-; P32-LABEL: @test10_struct_arr_noinbounds_i64(
-; P32: %r = icmp ne i32 %1, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr_noinbounds_i64(
+; CHECK: %r = icmp ne i32 %1, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
diff --git a/llvm/test/Transforms/InstCombine/overflow-mul.ll b/llvm/test/Transforms/InstCombine/overflow-mul.ll
index 6d8d40bcac3..bc0504b0a96 100644
--- a/llvm/test/Transforms/InstCombine/overflow-mul.ll
+++ b/llvm/test/Transforms/InstCombine/overflow-mul.ll
@@ -174,6 +174,17 @@ define <4 x i32> @pr20113(<4 x i16> %a, <4 x i16> %b) {
ret <4 x i32> %vcgez.i
}
+
+; The last test needs this weird datalayout.
+target datalayout = "i32:8:8"
+; Without it, InstCombine will align the pointed on 4 Bytes
+; The KnownBitsZero that result from the alignment allows to
+; turn:
+; and i32 %mul, 255
+; to:
+; and i32 %mul, 252
+; The mask is no longer in the form 2^n-1 and this prevents the transformation.
+
@pr21445_data = external global i32
define i1 @pr21445(i8 %a) {
; CHECK-LABEL: @pr21445(
diff --git a/llvm/test/Transforms/InstCombine/pr21651.ll b/llvm/test/Transforms/InstCombine/pr21651.ll
index 914785f329a..d2b8d312d8c 100644
--- a/llvm/test/Transforms/InstCombine/pr21651.ll
+++ b/llvm/test/Transforms/InstCombine/pr21651.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -instcombine -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @PR21651() {
switch i2 0, label %out [
i2 0, label %out
diff --git a/llvm/test/Transforms/InstCombine/simplify-libcalls.ll b/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
index 6eb474449c9..e264c2c7240 100644
--- a/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
@@ -1,4 +1,5 @@
; RUN: opt -S < %s -instcombine | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S32"
@G = constant [3 x i8] c"%s\00" ; <[3 x i8]*> [#uses=1]
@@ -19,7 +20,7 @@ define i8* @test1() {
ret i8* %tmp3
; CHECK-LABEL: @test1(
-; CHECK: ret i8* getelementptr inbounds ([5 x i8]* @str, i32 0, i64 3)
+; CHECK: ret i8* getelementptr inbounds ([5 x i8]* @str, i32 0, i32 3)
}
declare i8* @strchr(i8*, i32)
@@ -29,7 +30,7 @@ define i8* @test2() {
ret i8* %tmp3
; CHECK-LABEL: @test2(
-; CHECK: ret i8* getelementptr inbounds ([8 x i8]* @str1, i32 0, i64 7)
+; CHECK: ret i8* getelementptr inbounds ([8 x i8]* @str1, i32 0, i32 7)
}
define i8* @test3() {
diff --git a/llvm/test/Transforms/InstCombine/store.ll b/llvm/test/Transforms/InstCombine/store.ll
index c087a733185..5dfbd714090 100644
--- a/llvm/test/Transforms/InstCombine/store.ll
+++ b/llvm/test/Transforms/InstCombine/store.ll
@@ -41,7 +41,7 @@ Cont:
; CHECK-LABEL: @test3(
; CHECK-NOT: alloca
; CHECK: Cont:
-; CHECK-NEXT: %storemerge = phi i32 [ 47, %Cond2 ], [ -987654321, %Cond ]
+; CHECK-NEXT: %storemerge = phi i32 [ -987654321, %Cond ], [ 47, %Cond2 ]
; CHECK-NEXT: ret i32 %storemerge
}
diff --git a/llvm/test/Transforms/InstCombine/type_pun.ll b/llvm/test/Transforms/InstCombine/type_pun.ll
index 33143ef561b..098164cd029 100644
--- a/llvm/test/Transforms/InstCombine/type_pun.ll
+++ b/llvm/test/Transforms/InstCombine/type_pun.ll
@@ -10,6 +10,10 @@
; cleaning up the alloca/store/GEP/load.
+; Provide legal integer types.
+target datalayout = "p:32:32"
+
+
; Extracting the zeroth element in an i32 array.
define i32 @type_pun_zeroth(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_zeroth(
diff --git a/llvm/test/Transforms/LoopSimplify/preserve-scev.ll b/llvm/test/Transforms/LoopSimplify/preserve-scev.ll
index bc6d35c3d72..f6fa8afc56b 100644
--- a/llvm/test/Transforms/LoopSimplify/preserve-scev.ll
+++ b/llvm/test/Transforms/LoopSimplify/preserve-scev.ll
@@ -1,6 +1,9 @@
; RUN: opt -S < %s -indvars | opt -analyze -iv-users | grep "%cmp = icmp slt i32" | grep "= {%\.ph,+,1}<%for.cond>"
; PR8079
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
; LoopSimplify should invalidate indvars when splitting out the
; inner loop.
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll b/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
index 11b3171886e..1e57aee4889 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -loop-reduce -S | grep add | count 2
; PR 2662
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
@g_3 = common global i16 0 ; <i16*> [#uses=2]
@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll b/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
index 5fb157b2070..ee21ea4b1c2 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -loop-reduce -S | grep phi | count 2
; PR 2779
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
@g_19 = common global i32 0 ; <i32*> [#uses=3]
@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll
index 3ea4abfe486..5588995a8ce 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll
@@ -4,6 +4,10 @@
target triple = "x86-apple-darwin"
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; Verify that identical edges are merged. rdar://problem/6453893
; CHECK-LABEL: @test1(
; CHECK: bb89:
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
index 4388a334ddb..cc8226e1258 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
@@ -5,6 +5,10 @@
target triple = "x86-apple-darwin"
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; CHECK-LABEL: @test(
; multiplies are hoisted out of the loop
; CHECK: while.body.lr.ph:
@@ -17,10 +21,10 @@ target triple = "x86-apple-darwin"
; CHECK: phi
; CHECK: phi
; CHECK-NOT: phi
-; CHECK: bitcast float* {{.*}} to i8*
-; CHECK: bitcast float* {{.*}} to i8*
-; CHECK: getelementptr i8, i8*
-; CHECK: getelementptr i8, i8*
+; CHECK: bitcast float* {{.*}} to i1*
+; CHECK: bitcast float* {{.*}} to i1*
+; CHECK: getelementptr i1, i1*
+; CHECK: getelementptr i1, i1*
define float @test(float* nocapture %A, float* nocapture %B, i32 %N, i32 %IA, i32 %IB) nounwind uwtable readonly ssp {
entry:
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll b/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
index 7cac15f0ec7..edd333b2336 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
@@ -1,5 +1,4 @@
-; RUN: opt -loop-reduce -disable-output -debug-only=loop-reduce < %s 2> %t
-; RUN: FileCheck %s < %t
+; RUN: opt -loop-reduce -disable-output -debug-only=loop-reduce < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
; PR13361: LSR + SCEV "hangs" on reasonably sized test with sequence of loops
@@ -18,6 +17,10 @@
; CHECK-NOT:reg
; CHECK: Filtering for use
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
%struct.snork = type { %struct.fuga, i32, i32, i32, i32, i32, i32 }
%struct.fuga = type { %struct.gork, i64 }
%struct.gork = type { i8*, i32, i32, %struct.noot* }
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2008-08-14-ShadowIV.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2008-08-14-ShadowIV.ll
index 9a7f4865c59..ba03597c525 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2008-08-14-ShadowIV.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2008-08-14-ShadowIV.ll
@@ -1,5 +1,9 @@
; RUN: opt < %s -loop-reduce -S -mtriple=x86_64-unknown-unknown | grep "phi double" | count 1
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @foobar(i32 %n) nounwind {
entry:
icmp eq i32 %n, 0 ; <i1>:0 [#uses=2]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2011-07-20-DoubleIV.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2011-07-20-DoubleIV.ll
index 2fe62e39fc9..0fc928ca9b2 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2011-07-20-DoubleIV.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2011-07-20-DoubleIV.ll
@@ -4,6 +4,10 @@
; nonzero initial value.
; rdar://9786536
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; First, make sure LSR doesn't crash on an empty IVUsers list.
; CHECK-LABEL: @dummyIV(
; CHECK-NOT: phi
diff --git a/llvm/test/Transforms/LoopStrengthReduce/count-to-zero.ll b/llvm/test/Transforms/LoopStrengthReduce/count-to-zero.ll
index 0e96f02904d..ca93e38ab64 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/count-to-zero.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/count-to-zero.ll
@@ -1,6 +1,9 @@
; RUN: opt < %s -loop-reduce -S | FileCheck %s
; rdar://7382068
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
define void @t(i32 %c) nounwind optsize {
entry:
br label %bb6
diff --git a/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll b/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll
index 4809def86ae..551bd03f0ae 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll
@@ -4,6 +4,10 @@
; Don't reverse the iteration if the rhs of the compare is defined
; inside the loop.
+; Provide legal integer types.
+; Declare i2 as legal so that IVUsers accepts to consider %indvar3451
+target datalayout = "n2:8:16:32:64"
+
define void @Fill_Buffer(i2* %p) nounwind {
entry:
br label %bb8
diff --git a/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll b/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
index f996f22625f..d95220d238a 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/ivchain.ll
@@ -4,6 +4,10 @@
; A sign extend feeds an IVUser and cannot be hoisted into the AddRec.
; CollectIVChains should bailout on this case.
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
%struct = type { i8*, i8*, i16, i64, i16, i16, i16, i64, i64, i16, i8*, i64, i64, i64 }
; CHECK-LABEL: @test(
diff --git a/llvm/test/Transforms/LoopStrengthReduce/nested-reduce.ll b/llvm/test/Transforms/LoopStrengthReduce/nested-reduce.ll
index 58b8d3eecd0..c05b19d8fdd 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/nested-reduce.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/nested-reduce.ll
@@ -1,5 +1,8 @@
; RUN: opt < %s -loop-reduce -S | not grep mul
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
; Make sure we don't get a multiply by 6 in this loop.
define i32 @foo(i32 %A, i32 %B, i32 %C, i32 %D) {
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll b/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll
index dfc1343912c..e33e40503a3 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll
@@ -1,5 +1,8 @@
; RUN: opt < %s -loop-reduce -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
@d = common global i32 0, align 4
define void @fn2(i32 %x) nounwind uwtable {
diff --git a/llvm/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll b/llvm/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll
index 65aa61fb937..483becc0e7b 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll
@@ -1,5 +1,8 @@
; RUN: opt < %s -analyze -iv-users | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
; The value of %r is dependent on a polynomial iteration expression.
;
; CHECK-LABEL: IV Users for loop %foo.loop
diff --git a/llvm/test/Transforms/LoopStrengthReduce/remove_indvar.ll b/llvm/test/Transforms/LoopStrengthReduce/remove_indvar.ll
index fd0e9a5a808..3b92c25389b 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/remove_indvar.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/remove_indvar.ll
@@ -1,6 +1,9 @@
; Check that this test makes INDVAR and related stuff dead.
; RUN: opt < %s -loop-reduce -S | not grep INDVAR
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
declare i1 @pred()
define void @test(i32* %P) {
diff --git a/llvm/test/Transforms/LoopStrengthReduce/variable_stride.ll b/llvm/test/Transforms/LoopStrengthReduce/variable_stride.ll
index 7c0f053e4c3..f82b2fc29ac 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/variable_stride.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/variable_stride.ll
@@ -1,6 +1,9 @@
; Check that variable strides are reduced to adds instead of multiplies.
; RUN: opt < %s -loop-reduce -S | not grep mul
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
declare i1 @pred(i32)
define void @test([10000 x i32]* %P, i32 %STRIDE) {
diff --git a/llvm/test/Transforms/PhaseOrdering/scev.ll b/llvm/test/Transforms/PhaseOrdering/scev.ll
index 914af360937..c616ca2d768 100644
--- a/llvm/test/Transforms/PhaseOrdering/scev.ll
+++ b/llvm/test/Transforms/PhaseOrdering/scev.ll
@@ -7,7 +7,7 @@
; CHECK: test1
; The loop body contains two increments by %div.
; Make sure that 2*%div is recognizable, and not expressed as a bit mask of %d.
-; CHECK: --> {%p,+,(2 * (%d /u 4) * sizeof(i32))}
+; CHECK: --> {%p,+,(8 * (%d /u 4))}
define void @test1(i64 %d, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i64 %d, 4
@@ -36,7 +36,7 @@ for.end: ; preds = %for.cond
; CHECK: test1a
; Same thing as test1, but it is even more tempting to fold 2 * (%d /u 2)
-; CHECK: --> {%p,+,(2 * (%d /u 2) * sizeof(i32))}
+; CHECK: --> {%p,+,(8 * (%d /u 2))}
define void @test1a(i64 %d, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i64 %d, 2
diff --git a/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll b/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
index 2de2f6717b5..28f503a210c 100644
--- a/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
+++ b/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
@@ -1,5 +1,11 @@
; RUN: opt < %s -scalarrepl -S | not grep shr
+; FIXME: I think this test is no longer valid.
+; It was working because SROA was aborting when
+; no datalayout was supplied
+; XFAIL: *
+
+
%struct.S = type { i16 }
define zeroext i1 @f(i16 signext %b) {
diff --git a/llvm/test/Transforms/Scalarizer/no-data-layout.ll b/llvm/test/Transforms/Scalarizer/no-data-layout.ll
deleted file mode 100644
index c89c7868c57..00000000000
--- a/llvm/test/Transforms/Scalarizer/no-data-layout.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: opt %s -scalarizer -scalarize-load-store -S | FileCheck %s
-
-; Test the handling of loads and stores when no data layout is available.
-define void @f1(<4 x float> *%dest, <4 x float> *%src) {
-; CHECK: @f1(
-; CHECK: %val = load <4 x float>, <4 x float>* %src, align 4
-; CHECK: %val.i0 = extractelement <4 x float> %val, i32 0
-; CHECK: %add.i0 = fadd float %val.i0, %val.i0
-; CHECK: %val.i1 = extractelement <4 x float> %val, i32 1
-; CHECK: %add.i1 = fadd float %val.i1, %val.i1
-; CHECK: %val.i2 = extractelement <4 x float> %val, i32 2
-; CHECK: %add.i2 = fadd float %val.i2, %val.i2
-; CHECK: %val.i3 = extractelement <4 x float> %val, i32 3
-; CHECK: %add.i3 = fadd float %val.i3, %val.i3
-; CHECK: %add.upto0 = insertelement <4 x float> undef, float %add.i0, i32 0
-; CHECK: %add.upto1 = insertelement <4 x float> %add.upto0, float %add.i1, i32 1
-; CHECK: %add.upto2 = insertelement <4 x float> %add.upto1, float %add.i2, i32 2
-; CHECK: %add = insertelement <4 x float> %add.upto2, float %add.i3, i32 3
-; CHECK: store <4 x float> %add, <4 x float>* %dest, align 8
-; CHECK: ret void
- %val = load <4 x float> , <4 x float> *%src, align 4
- %add = fadd <4 x float> %val, %val
- store <4 x float> %add, <4 x float> *%dest, align 8
- ret void
-}
diff --git a/llvm/tools/llc/llc.cpp b/llvm/tools/llc/llc.cpp
index f9634a7104c..55a45dca2e8 100644
--- a/llvm/tools/llc/llc.cpp
+++ b/llvm/tools/llc/llc.cpp
@@ -306,8 +306,7 @@ static int compileModule(char **argv, LLVMContext &Context) {
// Add the target data from the target machine, if it exists, or the module.
if (const DataLayout *DL = Target->getDataLayout())
- M->setDataLayout(DL);
- PM.add(new DataLayoutPass());
+ M->setDataLayout(*DL);
if (RelaxAll.getNumOccurrences() > 0 &&
FileType != TargetMachine::CGFT_ObjectFile)
diff --git a/llvm/tools/llvm-extract/llvm-extract.cpp b/llvm/tools/llvm-extract/llvm-extract.cpp
index 8cbcdc50bce..8bfd3197fdc 100644
--- a/llvm/tools/llvm-extract/llvm-extract.cpp
+++ b/llvm/tools/llvm-extract/llvm-extract.cpp
@@ -247,7 +247,6 @@ int main(int argc, char **argv) {
// In addition to deleting all other functions, we also want to spiff it
// up a little bit. Do this now.
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass()); // Use correct DataLayout
std::vector<GlobalValue*> Gvs(GVs.begin(), GVs.end());
diff --git a/llvm/tools/opt/opt.cpp b/llvm/tools/opt/opt.cpp
index d9525259ed2..58376ee7ee9 100644
--- a/llvm/tools/opt/opt.cpp
+++ b/llvm/tools/opt/opt.cpp
@@ -419,15 +419,11 @@ int main(int argc, char **argv) {
Passes.add(new TargetLibraryInfoWrapperPass(TLII));
// Add an appropriate DataLayout instance for this module.
- const DataLayout *DL = M->getDataLayout();
- if (!DL && !DefaultDataLayout.empty()) {
+ const DataLayout &DL = M->getDataLayout();
+ if (DL.isDefault() && !DefaultDataLayout.empty()) {
M->setDataLayout(DefaultDataLayout);
- DL = M->getDataLayout();
}
- if (DL)
- Passes.add(new DataLayoutPass());
-
// Add internal analysis passes from the target machine.
Passes.add(createTargetTransformInfoWrapperPass(TM ? TM->getTargetIRAnalysis()
: TargetIRAnalysis()));
@@ -435,8 +431,6 @@ int main(int argc, char **argv) {
std::unique_ptr<legacy::FunctionPassManager> FPasses;
if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
FPasses.reset(new legacy::FunctionPassManager(M.get()));
- if (DL)
- FPasses->add(new DataLayoutPass());
FPasses->add(createTargetTransformInfoWrapperPass(
TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis()));
}
diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index 08a729aba50..d47b5e9e6ab 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -111,9 +111,9 @@ TEST_F(IRBuilderTest, LandingPadName) {
TEST_F(IRBuilderTest, DataLayout) {
std::unique_ptr<Module> M(new Module("test", Ctx));
M->setDataLayout("e-n32");
- EXPECT_TRUE(M->getDataLayout()->isLegalInteger(32));
+ EXPECT_TRUE(M->getDataLayout().isLegalInteger(32));
M->setDataLayout("e");
- EXPECT_FALSE(M->getDataLayout()->isLegalInteger(32));
+ EXPECT_FALSE(M->getDataLayout().isLegalInteger(32));
}
TEST_F(IRBuilderTest, GetIntTy) {
diff --git a/llvm/unittests/IR/LegacyPassManagerTest.cpp b/llvm/unittests/IR/LegacyPassManagerTest.cpp
index 9cb94140766..66fd1ccd7f2 100644
--- a/llvm/unittests/IR/LegacyPassManagerTest.cpp
+++ b/llvm/unittests/IR/LegacyPassManagerTest.cpp
@@ -98,7 +98,6 @@ namespace llvm {
initializeModuleNDMPass(*PassRegistry::getPassRegistry());
}
bool runOnModule(Module &M) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run++;
return false;
}
@@ -175,7 +174,6 @@ namespace llvm {
initializeCGPassPass(*PassRegistry::getPassRegistry());
}
bool runOnSCC(CallGraphSCC &SCMM) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run();
return false;
}
@@ -214,7 +212,6 @@ namespace llvm {
return false;
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run();
return false;
}
@@ -251,7 +248,6 @@ namespace llvm {
return false;
}
bool runOnBasicBlock(BasicBlock &BB) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run();
return false;
}
@@ -276,7 +272,6 @@ namespace llvm {
initializeFPassPass(*PassRegistry::getPassRegistry());
}
bool runOnModule(Module &M) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
for (Module::iterator I=M.begin(),E=M.end(); I != E; ++I) {
Function &F = *I;
{
@@ -302,7 +297,6 @@ namespace llvm {
mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(mNDM2);
Passes.add(mNDM);
Passes.add(mNDNM);
@@ -326,7 +320,6 @@ namespace llvm {
mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(mNDM);
Passes.add(mNDNM);
Passes.add(mNDM2);// invalidates mNDM needed by mDNM
@@ -348,7 +341,6 @@ namespace llvm {
std::unique_ptr<Module> M(makeLLVMModule());
T *P = new T();
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(P);
Passes.run(*M);
T::finishedOK(run);
@@ -359,7 +351,6 @@ namespace llvm {
Module *M = makeLLVMModule();
T *P = new T();
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(P);
Passes.run(*M);
T::finishedOK(run, N);
@@ -397,7 +388,6 @@ namespace llvm {
SCOPED_TRACE("Running OnTheFlyTest");
struct OnTheFlyTest *O = new OnTheFlyTest();
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(O);
Passes.run(*M);
OpenPOWER on IntegriCloud