diff options
67 files changed, 213 insertions, 196 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64.h b/llvm/lib/Target/AArch64/AArch64.h index e33e1232ada..9bc800e5f96 100644 --- a/llvm/lib/Target/AArch64/AArch64.h +++ b/llvm/lib/Target/AArch64/AArch64.h @@ -46,8 +46,21 @@ FunctionPass *createAArch64CleanupLocalDynamicTLSPass(); FunctionPass *createAArch64CollectLOHPass(); +void initializeAArch64A53Fix835769Pass(PassRegistry&); +void initializeAArch64A57FPLoadBalancingPass(PassRegistry&); +void initializeAArch64AddressTypePromotionPass(PassRegistry&); +void initializeAArch64AdvSIMDScalarPass(PassRegistry&); +void initializeAArch64BranchRelaxationPass(PassRegistry&); +void initializeAArch64CollectLOHPass(PassRegistry&); +void initializeAArch64ConditionalComparesPass(PassRegistry&); +void initializeAArch64ConditionOptimizerPass(PassRegistry&); +void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry&); void initializeAArch64ExpandPseudoPass(PassRegistry&); void initializeAArch64LoadStoreOptPass(PassRegistry&); +void initializeAArch64PromoteConstantPass(PassRegistry&); +void initializeAArch64RedundantCopyEliminationPass(PassRegistry&); +void initializeAArch64StorePairSuppressPass(PassRegistry&); +void initializeLDTLSCleanupPass(PassRegistry&); } // end namespace llvm #endif diff --git a/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp b/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp index c2cca63f497..510cdba5faf 100644 --- a/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp +++ b/llvm/lib/Target/AArch64/AArch64A53Fix835769.cpp @@ -82,7 +82,9 @@ class AArch64A53Fix835769 : public MachineFunctionPass { public: static char ID; - explicit AArch64A53Fix835769() : MachineFunctionPass(ID) {} + explicit AArch64A53Fix835769() : MachineFunctionPass(ID) { + initializeAArch64A53Fix835769Pass(*PassRegistry::getPassRegistry()); + } bool runOnMachineFunction(MachineFunction &F) override; @@ -107,6 +109,9 @@ char AArch64A53Fix835769::ID = 0; } // end anonymous namespace +INITIALIZE_PASS(AArch64A53Fix835769, "aarch64-fix-cortex-a53-835769-pass", + "AArch64 fix for A53 erratum 835769", false, false) + //===----------------------------------------------------------------------===// bool diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp index 0465e59dc54..4dbc1d77fe1 100644 --- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -95,10 +95,6 @@ static bool isMla(MachineInstr *MI) { } } -namespace llvm { -static void initializeAArch64A57FPLoadBalancingPass(PassRegistry &); -} - //===----------------------------------------------------------------------===// namespace { diff --git a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp index 4846ef08c98..b40f675e820 100644 --- a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -47,10 +47,6 @@ using namespace llvm; #define DEBUG_TYPE "aarch64-type-promotion" static cl::opt<bool> -EnableAddressTypePromotion("aarch64-type-promotion", cl::Hidden, - cl::desc("Enable the type promotion pass"), - cl::init(true)); -static cl::opt<bool> EnableMerge("aarch64-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), @@ -62,10 +58,6 @@ EnableMerge("aarch64-type-promotion-merge", cl::Hidden, // AArch64AddressTypePromotion //===----------------------------------------------------------------------===// -namespace llvm { -void initializeAArch64AddressTypePromotionPass(PassRegistry &); -} - namespace { class AArch64AddressTypePromotion : public FunctionPass { @@ -481,7 +473,7 @@ bool AArch64AddressTypePromotion::runOnFunction(Function &F) { if (skipFunction(F)) return false; - if (!EnableAddressTypePromotion || F.isDeclaration()) + if (F.isDeclaration()) return false; Func = &F; ConsideredSExtType = Type::getInt64Ty(Func->getContext()); diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp index d0a2dd3fa1f..65af1b2ba30 100644 --- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp +++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp @@ -61,10 +61,6 @@ STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used"); STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted"); STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted"); -namespace llvm { -void initializeAArch64AdvSIMDScalarPass(PassRegistry &); -} - #define AARCH64_ADVSIMD_NAME "AdvSIMD Scalar Operation Optimization" namespace { diff --git a/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp b/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp index f8dc03e0210..47df20fc3e9 100644 --- a/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp +++ b/llvm/lib/Target/AArch64/AArch64BranchRelaxation.cpp @@ -26,10 +26,6 @@ using namespace llvm; #define DEBUG_TYPE "aarch64-branch-relax" -static cl::opt<bool> -BranchRelaxation("aarch64-branch-relax", cl::Hidden, cl::init(true), - cl::desc("Relax out of range conditional branches")); - static cl::opt<unsigned> TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14), cl::desc("Restrict range of TB[N]Z instructions (DEBUG)")); @@ -480,10 +476,6 @@ bool AArch64BranchRelaxation::relaxBranchInstructions() { bool AArch64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) { MF = &mf; - // If the pass is disabled, just bail early. - if (!BranchRelaxation) - return false; - DEBUG(dbgs() << "***** AArch64BranchRelaxation *****\n"); TII = (const AArch64InstrInfo *)MF->getSubtarget().getInstrInfo(); diff --git a/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp index 011a03622ba..7ba3021a07f 100644 --- a/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp +++ b/llvm/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp @@ -33,10 +33,14 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" using namespace llvm; +#define TLSCLEANUP_PASS_NAME "AArch64 Local Dynamic TLS Access Clean-up" + namespace { struct LDTLSCleanup : public MachineFunctionPass { static char ID; - LDTLSCleanup() : MachineFunctionPass(ID) {} + LDTLSCleanup() : MachineFunctionPass(ID) { + initializeLDTLSCleanupPass(*PassRegistry::getPassRegistry()); + } bool runOnMachineFunction(MachineFunction &MF) override { if (skipFunction(*MF.getFunction())) @@ -128,9 +132,7 @@ struct LDTLSCleanup : public MachineFunctionPass { return Copy; } - const char *getPassName() const override { - return "Local Dynamic TLS Access Clean-up"; - } + const char *getPassName() const override { return TLSCLEANUP_PASS_NAME; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); @@ -140,6 +142,9 @@ struct LDTLSCleanup : public MachineFunctionPass { }; } +INITIALIZE_PASS(LDTLSCleanup, "aarch64-local-dynamic-tls-cleanup", + TLSCLEANUP_PASS_NAME, false, false) + char LDTLSCleanup::ID = 0; FunctionPass *llvm::createAArch64CleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); diff --git a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp index 5eecb3a8685..e19187e79d4 100644 --- a/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp +++ b/llvm/lib/Target/AArch64/AArch64CollectLOH.cpp @@ -164,10 +164,6 @@ STATISTIC(NumTooCplxLvl2, "Number of too complex case of level 2"); STATISTIC(NumADRSimpleCandidate, "Number of simplifiable ADRP + ADD"); STATISTIC(NumADRComplexCandidate, "Number of too complex ADRP + ADD"); -namespace llvm { -void initializeAArch64CollectLOHPass(PassRegistry &); -} - #define AARCH64_COLLECT_LOH_NAME "AArch64 Collect Linker Optimization Hint (LOH)" namespace { diff --git a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp index 8fff381d391..5eea0e09787 100644 --- a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -95,7 +95,9 @@ public: typedef std::tuple<int, unsigned, AArch64CC::CondCode> CmpInfo; static char ID; - AArch64ConditionOptimizer() : MachineFunctionPass(ID) {} + AArch64ConditionOptimizer() : MachineFunctionPass(ID) { + initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry()); + } void getAnalysisUsage(AnalysisUsage &AU) const override; MachineInstr *findSuitableCompare(MachineBasicBlock *MBB); CmpInfo adjustCmp(MachineInstr *CmpMI, AArch64CC::CondCode Cmp); @@ -111,10 +113,6 @@ public: char AArch64ConditionOptimizer::ID = 0; -namespace llvm { -void initializeAArch64ConditionOptimizerPass(PassRegistry &); -} - INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer, "aarch64-condopt", "AArch64 CondOpt Pass", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp index e1b0dc724b3..6e9ca8428bb 100644 --- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -732,7 +732,9 @@ class AArch64ConditionalCompares : public MachineFunctionPass { public: static char ID; - AArch64ConditionalCompares() : MachineFunctionPass(ID) {} + AArch64ConditionalCompares() : MachineFunctionPass(ID) { + initializeAArch64ConditionalComparesPass(*PassRegistry::getPassRegistry()); + } void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnMachineFunction(MachineFunction &MF) override; const char *getPassName() const override { @@ -750,10 +752,6 @@ private: char AArch64ConditionalCompares::ID = 0; -namespace llvm { -void initializeAArch64ConditionalComparesPass(PassRegistry &); -} - INITIALIZE_PASS_BEGIN(AArch64ConditionalCompares, "aarch64-ccmp", "AArch64 CCMP Pass", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp index 7a6f7669db5..8681b7ca115 100644 --- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp +++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp @@ -26,10 +26,6 @@ using namespace llvm; STATISTIC(NumDeadDefsReplaced, "Number of dead definitions replaced"); -namespace llvm { -void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry &); -} - #define AARCH64_DEAD_REG_DEF_NAME "AArch64 Dead register definitions" namespace { diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp index b1e40510b2a..73932e7f35f 100644 --- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp +++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -101,7 +101,9 @@ public: }; static char ID; - AArch64PromoteConstant() : ModulePass(ID) {} + AArch64PromoteConstant() : ModulePass(ID) { + initializeAArch64PromoteConstantPass(*PassRegistry::getPassRegistry()); + } const char *getPassName() const override { return "AArch64 Promote Constant"; } @@ -214,10 +216,6 @@ private: char AArch64PromoteConstant::ID = 0; -namespace llvm { -void initializeAArch64PromoteConstantPass(PassRegistry &); -} - INITIALIZE_PASS_BEGIN(AArch64PromoteConstant, "aarch64-promote-const", "AArch64 Promote Constant Pass", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) diff --git a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp index 60d8bbd260b..8d891caf166 100644 --- a/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ b/llvm/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -39,10 +39,6 @@ using namespace llvm; STATISTIC(NumCopiesRemoved, "Number of copies removed."); -namespace llvm { -void initializeAArch64RedundantCopyEliminationPass(PassRegistry &); -} - namespace { class AArch64RedundantCopyElimination : public MachineFunctionPass { const MachineRegisterInfo *MRI; @@ -50,7 +46,10 @@ class AArch64RedundantCopyElimination : public MachineFunctionPass { public: static char ID; - AArch64RedundantCopyElimination() : MachineFunctionPass(ID) {} + AArch64RedundantCopyElimination() : MachineFunctionPass(ID) { + initializeAArch64RedundantCopyEliminationPass( + *PassRegistry::getPassRegistry()); + } bool optimizeCopy(MachineBasicBlock *MBB); bool runOnMachineFunction(MachineFunction &MF) override; MachineFunctionProperties getRequiredProperties() const override { diff --git a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp index f904b237941..564d4ef9980 100644 --- a/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ b/llvm/lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -25,6 +25,8 @@ using namespace llvm; #define DEBUG_TYPE "aarch64-stp-suppress" +#define STPSUPPRESS_PASS_NAME "AArch64 Store Pair Suppression" + namespace { class AArch64StorePairSuppress : public MachineFunctionPass { const AArch64InstrInfo *TII; @@ -36,12 +38,12 @@ class AArch64StorePairSuppress : public MachineFunctionPass { public: static char ID; - AArch64StorePairSuppress() : MachineFunctionPass(ID) {} - - const char *getPassName() const override { - return "AArch64 Store Pair Suppression"; + AArch64StorePairSuppress() : MachineFunctionPass(ID) { + initializeAArch64StorePairSuppressPass(*PassRegistry::getPassRegistry()); } + const char *getPassName() const override { return STPSUPPRESS_PASS_NAME; } + bool runOnMachineFunction(MachineFunction &F) override; private: @@ -59,6 +61,9 @@ private: char AArch64StorePairSuppress::ID = 0; } // anonymous +INITIALIZE_PASS(AArch64StorePairSuppress, "aarch64-stp-suppress", + STPSUPPRESS_PASS_NAME, false, false) + FunctionPass *llvm::createAArch64StorePairSuppressPass() { return new AArch64StorePairSuppress(); } diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp index 7ee07efbc88..9faeb6feb6a 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -34,53 +34,56 @@ #include "llvm/Transforms/Scalar.h" using namespace llvm; -static cl::opt<bool> -EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"), - cl::init(true), cl::Hidden); +static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp", + cl::desc("Enable the CCMP formation pass"), + cl::init(true), cl::Hidden); -static cl::opt<bool> EnableMCR("aarch64-mcr", +static cl::opt<bool> EnableMCR("aarch64-enable-mcr", cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden); -static cl::opt<bool> -EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"), - cl::init(true), cl::Hidden); - -static cl::opt<bool> -EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar" - " integer instructions"), cl::init(false), cl::Hidden); - -static cl::opt<bool> -EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote " - "constant pass"), cl::init(true), cl::Hidden); +static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress", + cl::desc("Suppress STP for AArch64"), + cl::init(true), cl::Hidden); -static cl::opt<bool> -EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the" - " linker optimization hints (LOH)"), cl::init(true), - cl::Hidden); +static cl::opt<bool> EnableAdvSIMDScalar( + "aarch64-enable-simd-scalar", + cl::desc("Enable use of AdvSIMD scalar integer instructions"), + cl::init(false), cl::Hidden); static cl::opt<bool> -EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden, - cl::desc("Enable the pass that removes dead" - " definitons and replaces stores to" - " them with stores to the zero" - " register"), - cl::init(true)); + EnablePromoteConstant("aarch64-enable-promote-const", + cl::desc("Enable the promote constant pass"), + cl::init(true), cl::Hidden); -static cl::opt<bool> -EnableRedundantCopyElimination("aarch64-redundant-copy-elim", - cl::desc("Enable the redundant copy elimination pass"), - cl::init(true), cl::Hidden); +static cl::opt<bool> EnableCollectLOH( + "aarch64-enable-collect-loh", + cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), + cl::init(true), cl::Hidden); static cl::opt<bool> -EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair" - " optimization pass"), cl::init(true), cl::Hidden); - -static cl::opt<bool> -EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden, - cl::desc("Run SimplifyCFG after expanding atomic operations" - " to make use of cmpxchg flow-based information"), - cl::init(true)); + EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, + cl::desc("Enable the pass that removes dead" + " definitons and replaces stores to" + " them with stores to the zero" + " register"), + cl::init(true)); + +static cl::opt<bool> EnableRedundantCopyElimination( + "aarch64-enable-copyelim", + cl::desc("Enable the redundant copy elimination pass"), cl::init(true), + cl::Hidden); + +static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt", + cl::desc("Enable the load/store pair" + " optimization pass"), + cl::init(true), cl::Hidden); + +static cl::opt<bool> EnableAtomicTidy( + "aarch64-enable-atomic-cfg-tidy", cl::Hidden, + cl::desc("Run SimplifyCFG after expanding atomic operations" + " to make use of cmpxchg flow-based information"), + cl::init(true)); static cl::opt<bool> EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, @@ -88,9 +91,9 @@ EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::init(true)); static cl::opt<bool> -EnableCondOpt("aarch64-condopt", - cl::desc("Enable the condition optimizer pass"), - cl::init(true), cl::Hidden); + EnableCondOpt("aarch64-enable-condopt", + cl::desc("Enable the condition optimizer pass"), + cl::init(true), cl::Hidden); static cl::opt<bool> EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, @@ -98,17 +101,26 @@ EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, cl::init(false)); static cl::opt<bool> -EnableGEPOpt("aarch64-gep-opt", cl::Hidden, - cl::desc("Enable optimizations on complex GEPs"), - cl::init(false)); + EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden, + cl::desc("Enable the type promotion pass"), + cl::init(true)); + +static cl::opt<bool> + EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, + cl::desc("Enable optimizations on complex GEPs"), + cl::init(false)); + +static cl::opt<bool> + BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), + cl::desc("Relax out of range conditional branches")); // FIXME: Unify control over GlobalMerge. static cl::opt<cl::boolOrDefault> -EnableGlobalMerge("aarch64-global-merge", cl::Hidden, - cl::desc("Enable the global merge pass")); + EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, + cl::desc("Enable the global merge pass")); static cl::opt<bool> - EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden, + EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, cl::desc("Enable the loop data prefetch pass"), cl::init(true)); @@ -119,8 +131,21 @@ extern "C" void LLVMInitializeAArch64Target() { RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target); auto PR = PassRegistry::getPassRegistry(); initializeGlobalISel(*PR); + initializeAArch64A53Fix835769Pass(*PR); + initializeAArch64A57FPLoadBalancingPass(*PR); + initializeAArch64AddressTypePromotionPass(*PR); + initializeAArch64AdvSIMDScalarPass(*PR); + initializeAArch64BranchRelaxationPass(*PR); + initializeAArch64CollectLOHPass(*PR); + initializeAArch64ConditionalComparesPass(*PR); + initializeAArch64ConditionOptimizerPass(*PR); + initializeAArch64DeadRegisterDefinitionsPass(*PR); initializeAArch64ExpandPseudoPass(*PR); initializeAArch64LoadStoreOptPass(*PR); + initializeAArch64PromoteConstantPass(*PR); + initializeAArch64RedundantCopyEliminationPass(*PR); + initializeAArch64StorePairSuppressPass(*PR); + initializeLDTLSCleanupPass(*PR); } //===----------------------------------------------------------------------===// @@ -374,7 +399,7 @@ bool AArch64PassConfig::addPreISel() { addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize)); } - if (TM->getOptLevel() != CodeGenOpt::None) + if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion) addPass(createAArch64AddressTypePromotionPass()); return false; @@ -461,7 +486,8 @@ void AArch64PassConfig::addPreEmitPass() { addPass(createAArch64A53Fix835769()); // Relax conditional branch instructions if they're otherwise out of // range of their destination. - addPass(createAArch64BranchRelaxation()); + if (BranchRelaxation) + addPass(createAArch64BranchRelaxation()); if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && TM->getTargetTriple().isOSBinFormatMachO()) addPass(createAArch64CollectLOHPass()); diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll index cae00a9b1cb..6e4a47b0440 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll @@ -1,8 +1,8 @@ -; RUN: llc -O3 -aarch64-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s -; RUN: llc -O3 -aarch64-gep-opt=true -mattr=-use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s -; RUN: llc -O3 -aarch64-gep-opt=true -mattr=+use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s -; RUN: llc -O3 -aarch64-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s -; RUN: llc -O3 -aarch64-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -mattr=-use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -mattr=+use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" target triple = "aarch64-linux-gnueabi" diff --git a/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll index 84277995ce5..1b2ed4b8952 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -aarch64-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck <%t %s ; REQUIRES: asserts target triple = "aarch64--linux-android" diff --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll index 5cab38eafb5..fe5abbf15ef 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -1,5 +1,5 @@ ; REQUIRES: asserts -; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -aarch64-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s +; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -aarch64-enable-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: stp_i64_scale:BB#0 diff --git a/llvm/test/CodeGen/AArch64/addsub_ext.ll b/llvm/test/CodeGen/AArch64/addsub_ext.ll index f30ab89f238..cfe0dfc956a 100644 --- a/llvm/test/CodeGen/AArch64/addsub_ext.ll +++ b/llvm/test/CodeGen/AArch64/addsub_ext.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s @var8 = global i8 0 @var16 = global i16 0 diff --git a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll index 563a18bd59b..649bc25b726 100644 --- a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll +++ b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll @@ -1,7 +1,7 @@ -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone { ; CHECK-LABEL: bar: diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll index 3197f5bd27e..6eaf75c4fb9 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -mtriple arm64-apple-ios3 -aarch64-gep-opt=false %s -o - | FileCheck %s +; RUN: llc -O3 -mtriple arm64-apple-ios3 -aarch64-enable-gep-opt=false %s -o - | FileCheck %s ; <rdar://problem/13621857> @block = common global i8* null, align 8 diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll index 0a3c4f728c2..c57be5684ad 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-collect-loh=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-enable-collect-loh=false | FileCheck %s ; rdar://13452552 ; Disable the collecting of LOH so that the labels do not get in the ; way of the NEXT patterns. diff --git a/llvm/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll b/llvm/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll index 38661a5f38f..87826fdbcb8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ands-bad-peephole.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -o - -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc %s -o - -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; Check that ANDS (tst) is not merged with ADD when the immediate ; is not 0. ; <rdar://problem/16693089> diff --git a/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll b/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll index 863f06c8664..85aa9c44305 100644 --- a/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll +++ b/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-apple-ios7.0.0 -aarch64-dead-def-elimination=false < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios7.0.0 -aarch64-enable-dead-defs=false < %s | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll index 876a69193b4..6f88212cd39 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -O1 -o - | FileCheck %s -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -O0 -fast-isel=true -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -O1 -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -O0 -fast-isel=true -o - | FileCheck %s ; CHECK-LABEL: test_i64_f64: define void @test_i64_f64(double* %p, i64* %q) { diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll index cc9badc5c55..52d269d3773 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -o - | FileCheck %s -; RUN: llc -mtriple aarch64_be < %s -fast-isel=true -aarch64-load-store-opt=false -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -fast-isel=true -aarch64-enable-ldst-opt=false -o - | FileCheck %s ; CHECK-LABEL: test_i64_f64: define i64 @test_i64_f64(double %p) { diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll index d08976788e9..a1dec896d34 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -o - | FileCheck %s -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -fast-isel=true -O0 -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -fast-isel=true -O0 -o - | FileCheck %s ; Note, we split the functions in to multiple BBs below to isolate the call ; instruction we want to test, from fast-isel failing to select instructions diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll index 25d874e54cb..fa2343152f7 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-ccmp | FileCheck %s +; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp | FileCheck %s target triple = "arm64-apple-ios7.0.0" @channelColumns = external global i64 diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll index 748bbcca079..2682fa7dcce 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-ccmp -aarch64-stress-ccmp | FileCheck %s +; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp -aarch64-stress-ccmp | FileCheck %s target triple = "arm64-apple-ios" ; CHECK: single_same diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll index e34ef39bcfe..4a3696501fd 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-apple-ios -O3 -aarch64-collect-loh -aarch64-collect-loh-bb-only=true -aarch64-collect-loh-pre-collect-register=false < %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios -O3 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=true -aarch64-collect-loh-pre-collect-register=false < %s -o - | FileCheck %s ; Check that the LOH analysis does not crash when the analysed chained ; contains instructions that are filtered out. ; diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll index 8889cb4bf52..e3df4182ddc 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s ; Test case for <rdar://problem/15942912>. ; AdrpAddStr cannot be used when the store uses same ; register as address and value. Indeed, the related diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll index 3fc0d45f065..b697b6eced3 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s -; RUN: llc -mtriple=arm64-linux-gnu -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s --check-prefix=CHECK-ELF +; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64-linux-gnu -O2 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s --check-prefix=CHECK-ELF ; CHECK-ELF-NOT: .loh ; CHECK-ELF-NOT: AdrpAdrp diff --git a/llvm/test/CodeGen/AArch64/arm64-cse.ll b/llvm/test/CodeGen/AArch64/arm64-cse.ll index 8d4bf5dbeb7..030857df777 100644 --- a/llvm/test/CodeGen/AArch64/arm64-cse.ll +++ b/llvm/test/CodeGen/AArch64/arm64-cse.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 < %s -aarch64-atomic-cfg-tidy=0 -aarch64-gep-opt=false -verify-machineinstrs | FileCheck %s +; RUN: llc -O3 < %s -aarch64-enable-atomic-cfg-tidy=0 -aarch64-enable-gep-opt=false -verify-machineinstrs | FileCheck %s target triple = "arm64-apple-ios" ; rdar://12462006 diff --git a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll index 8164f46664b..388f50c3edb 100644 --- a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -stress-early-ifcvt -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc < %s -stress-early-ifcvt -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s target triple = "arm64-apple-macosx" ; CHECK: mm2 diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll index bcb196e4045..78ba9a01cbe 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s +; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-enable-atomic-cfg-tidy=0 < %s | FileCheck %s @lhs = global fp128 zeroinitializer, align 16 @rhs = global fp128 zeroinitializer, align 16 diff --git a/llvm/test/CodeGen/AArch64/arm64-frame-index.ll b/llvm/test/CodeGen/AArch64/arm64-frame-index.ll index 618bcabe399..0544eaebcc5 100644 --- a/llvm/test/CodeGen/AArch64/arm64-frame-index.ll +++ b/llvm/test/CodeGen/AArch64/arm64-frame-index.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; rdar://11935841 define void @t1() nounwind ssp { diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-add-sub.ll b/llvm/test/CodeGen/AArch64/arm64-neon-add-sub.ll index fbde606538c..40836a73e0c 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-add-sub.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-add-sub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-simd-scalar| FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-enable-simd-scalar| FileCheck %s define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) { ;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b diff --git a/llvm/test/CodeGen/AArch64/arm64-promote-const.ll b/llvm/test/CodeGen/AArch64/arm64-promote-const.ll index 0be2f5c08c0..2b7c782947f 100644 --- a/llvm/test/CodeGen/AArch64/arm64-promote-const.ll +++ b/llvm/test/CodeGen/AArch64/arm64-promote-const.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-stress-promote-const -mcpu=cyclone | FileCheck -check-prefix=PROMOTED %s ; The REGULAR run just checks that the inputs passed to promote const expose ; the appropriate patterns. -; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-promote-const=false -mcpu=cyclone | FileCheck -check-prefix=REGULAR %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-enable-promote-const=false -mcpu=cyclone | FileCheck -check-prefix=REGULAR %s %struct.uint8x16x4_t = type { [4 x <16 x i8>] } diff --git a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll index ff77b19ccf7..5b34017cf36 100644 --- a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll +++ b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-stp-suppress=false -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-enable-stp-suppress=false -verify-machineinstrs | FileCheck %s ; The next set of tests makes sure we can combine the second instruction into ; the first. diff --git a/llvm/test/CodeGen/AArch64/arm64-stp.ll b/llvm/test/CodeGen/AArch64/arm64-stp.ll index d0e1fbe3f2d..e5c512c40e8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-stp.ll +++ b/llvm/test/CodeGen/AArch64/arm64-stp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s ; CHECK-LABEL: stp_int ; CHECK: stp w0, w1, [x2] diff --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll index 6447a7477fc..8b212aa6c1d 100644 --- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll +++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs | FileCheck %s ; ; Get the actual value of the overflow bit. diff --git a/llvm/test/CodeGen/AArch64/blockaddress.ll b/llvm/test/CodeGen/AArch64/blockaddress.ll index e93c69fd3ea..7c0755a13d0 100644 --- a/llvm/test/CodeGen/AArch64/blockaddress.ll +++ b/llvm/test/CodeGen/AArch64/blockaddress.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s @addr = global i8* null diff --git a/llvm/test/CodeGen/AArch64/breg.ll b/llvm/test/CodeGen/AArch64/breg.ll index 42061a851db..311abcacd74 100644 --- a/llvm/test/CodeGen/AArch64/breg.ll +++ b/llvm/test/CodeGen/AArch64/breg.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s @stored_label = global i8* null diff --git a/llvm/test/CodeGen/AArch64/cmp-const-max.ll b/llvm/test/CodeGen/AArch64/cmp-const-max.ll index 0431e391a30..0d5846f0679 100644 --- a/llvm/test/CodeGen/AArch64/cmp-const-max.ll +++ b/llvm/test/CodeGen/AArch64/cmp-const-max.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -aarch64-atomic-cfg-tidy=0 < %s -mtriple=aarch64-none-eabihf -fast-isel=false | FileCheck %s +; RUN: llc -verify-machineinstrs -aarch64-enable-atomic-cfg-tidy=0 < %s -mtriple=aarch64-none-eabihf -fast-isel=false | FileCheck %s define i32 @ule_64_max(i64 %p) { diff --git a/llvm/test/CodeGen/AArch64/directcond.ll b/llvm/test/CodeGen/AArch64/directcond.ll index f89d7603fd3..4cba339ee4a 100644 --- a/llvm/test/CodeGen/AArch64/directcond.ll +++ b/llvm/test/CodeGen/AArch64/directcond.ll @@ -1,5 +1,5 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-NOFP %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -aarch64-enable-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-NOFP %s define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) { ; CHECK-LABEL: test_select_i32: diff --git a/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll index ff57bbb33c4..c749e4d4041 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-darwin -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-darwin -aarch64-enable-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s ; Test if the BBs are reordred according to their branch weights. define i64 @branch_weights_test(i64 %a, i64 %b) { diff --git a/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll b/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll index a407b269dd8..45cc678a0a1 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll @@ -1,4 +1,4 @@ -; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s define i32 @icmp_eq_i1(i1 %a) { ; CHECK-LABEL: icmp_eq_i1 diff --git a/llvm/test/CodeGen/AArch64/fast-isel-cmp-branch.ll b/llvm/test/CodeGen/AArch64/fast-isel-cmp-branch.ll index 1ac358f37aa..ce47bc42453 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-cmp-branch.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-cmp-branch.ll @@ -1,5 +1,5 @@ -; RUN: llc -aarch64-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s -; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -aarch64-enable-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s define i32 @fcmp_oeq(float %x, float %y) { ; CHECK-LABEL: fcmp_oeq diff --git a/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll b/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll index 2a0139ed9b0..89b368fa19b 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-cmp-vec.ll @@ -1,5 +1,5 @@ ; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -verify-machineinstrs \ -; RUN: -aarch64-atomic-cfg-tidy=0 -disable-cgp -disable-branch-fold \ +; RUN: -aarch64-enable-atomic-cfg-tidy=0 -disable-cgp -disable-branch-fold \ ; RUN: < %s | FileCheck %s ; diff --git a/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll b/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll index 93741d6c12d..b974f412d84 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=false -disable-cgp-branch-opts -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=false -disable-cgp-branch-opts -verify-machineinstrs < %s | FileCheck %s ; ; Test folding of the sign-/zero-extend into the load instruction. diff --git a/llvm/test/CodeGen/AArch64/fast-isel-tbz.ll b/llvm/test/CodeGen/AArch64/fast-isel-tbz.ll index c35ae4230dd..af817777143 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-tbz.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-tbz.ll @@ -1,5 +1,5 @@ -; RUN: llc -disable-peephole -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s -; RUN: llc -disable-peephole -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck --check-prefix=CHECK --check-prefix=FAST %s +; RUN: llc -disable-peephole -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -disable-peephole -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck --check-prefix=CHECK --check-prefix=FAST %s define i32 @icmp_eq_i8(i8 zeroext %a) { ; CHECK-LABEL: icmp_eq_i8 diff --git a/llvm/test/CodeGen/AArch64/flags-multiuse.ll b/llvm/test/CodeGen/AArch64/flags-multiuse.ll index 77bbcddc492..0827fb8c9e8 100644 --- a/llvm/test/CodeGen/AArch64/flags-multiuse.ll +++ b/llvm/test/CodeGen/AArch64/flags-multiuse.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s ; LLVM should be able to cope with multiple uses of the same flag-setting ; instruction at different points of a routine. Either by rematerializing the diff --git a/llvm/test/CodeGen/AArch64/gep-nullptr.ll b/llvm/test/CodeGen/AArch64/gep-nullptr.ll index 4c2bc504cd0..e5e359c0b66 100644 --- a/llvm/test/CodeGen/AArch64/gep-nullptr.ll +++ b/llvm/test/CodeGen/AArch64/gep-nullptr.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -aarch64-gep-opt=true < %s |FileCheck %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true < %s |FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128" target triple = "aarch64--linux-gnu" diff --git a/llvm/test/CodeGen/AArch64/global-merge-1.ll b/llvm/test/CodeGen/AArch64/global-merge-1.ll index b93f41c07df..6c9b3cbcd6d 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-1.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-1.ll @@ -1,11 +1,11 @@ -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS @m = internal global i32 0, align 4 @n = internal global i32 0, align 4 diff --git a/llvm/test/CodeGen/AArch64/global-merge-2.ll b/llvm/test/CodeGen/AArch64/global-merge-2.ll index 53bed1d9bc0..10f3a948dc7 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-2.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-2.ll @@ -1,6 +1,6 @@ -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS @x = global i32 0, align 4 @y = global i32 0, align 4 diff --git a/llvm/test/CodeGen/AArch64/global-merge-3.ll b/llvm/test/CodeGen/AArch64/global-merge-3.ll index 481be4017b0..0c208a16641 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-3.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-3.ll @@ -1,6 +1,6 @@ -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS @x = global [1000 x i32] zeroinitializer, align 1 @y = global [1000 x i32] zeroinitializer, align 1 diff --git a/llvm/test/CodeGen/AArch64/global-merge-4.ll b/llvm/test/CodeGen/AArch64/global-merge-4.ll index a5109f6e8ea..036b8910d66 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-4.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-4.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -o - | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" target triple = "arm64-apple-ios7.0.0" diff --git a/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll b/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll index 434c787b28d..f000db8587b 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll @@ -1,6 +1,7 @@ -; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \ -; RUN: -aarch64-global-merge -global-merge-group-by-use -global-merge-ignore-single-use=false \ -; RUN: %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false \ +; RUN: -aarch64-enable-collect-loh=false -aarch64-enable-global-merge \ +; RUN: -global-merge-group-by-use -global-merge-ignore-single-use=false %s \ +; RUN: -o - | FileCheck %s ; We assume that globals of the same size aren't reordered inside a set. diff --git a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll index 39943892577..28a32f148f1 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll @@ -1,6 +1,6 @@ -; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \ -; RUN: -O1 -global-merge-group-by-use -global-merge-ignore-single-use \ -; RUN: %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false \ +; RUN: -aarch64-enable-collect-loh=false -O1 -global-merge-group-by-use \ +; RUN: -global-merge-ignore-single-use %s -o - | FileCheck %s ; Check that, at -O1, we only merge globals used in minsize functions. ; We assume that globals of the same size aren't reordered inside a set. diff --git a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll index c3756a85fef..bc7a8274cbf 100644 --- a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll +++ b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll @@ -1,6 +1,7 @@ -; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \ -; RUN: -aarch64-global-merge -global-merge-group-by-use -global-merge-ignore-single-use \ -; RUN: %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false \ +; RUN: -aarch64-enable-collect-loh=false -aarch64-enable-global-merge \ +; RUN: -global-merge-group-by-use -global-merge-ignore-single-use %s -o - \ +; RUN: | FileCheck %s ; We assume that globals of the same size aren't reordered inside a set. diff --git a/llvm/test/CodeGen/AArch64/jump-table.ll b/llvm/test/CodeGen/AArch64/jump-table.ll index 16682e92c17..d6a7fceac84 100644 --- a/llvm/test/CodeGen/AArch64/jump-table.ll +++ b/llvm/test/CodeGen/AArch64/jump-table.ll @@ -1,6 +1,6 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s -; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s -; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-enable-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s define i32 @test_jumptable(i32 %in) { ; CHECK: test_jumptable diff --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll index a47b013f7f7..35117a147ee 100644 --- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll +++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-enable-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s ; CHECK-LABEL: test_strd_sturd: ; CHECK-NEXT: stp d0, d1, [x0, #-8] diff --git a/llvm/test/CodeGen/AArch64/ldst-opt.ll b/llvm/test/CodeGen/AArch64/ldst-opt.ll index d2133213f18..b1cdf8cdb77 100644 --- a/llvm/test/CodeGen/AArch64/ldst-opt.ll +++ b/llvm/test/CodeGen/AArch64/ldst-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s ; This file contains tests for the AArch64 load/store optimizer. diff --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll index 925d1881f56..9a44b43d14e 100644 --- a/llvm/test/CodeGen/AArch64/sibling-call.ll +++ b/llvm/test/CodeGen/AArch64/sibling-call.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-load-store-opt=0 | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-ldst-opt=0 | FileCheck %s declare void @callee_stack0() declare void @callee_stack8([8 x i32], i64) diff --git a/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll b/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll index bcc8af8d069..c1579336189 100644 --- a/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll +++ b/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-load-store-opt=false -asm-verbose=false -disable-post-ra | FileCheck %s +; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-enable-ldst-opt=false -asm-verbose=false -disable-post-ra | FileCheck %s ; Disable the load/store optimizer to avoid having LDP/STPs and simplify checks. target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll b/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll index 3955877b09b..10c4ba4c31d 100644 --- a/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll +++ b/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-load-store-opt=false -disable-post-ra -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-enable-ldst-opt=false -disable-post-ra -asm-verbose=false | FileCheck %s ; Disable the load/store optimizer to avoid having LDP/STPs and simplify checks. target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/CodeGen/AArch64/tst-br.ll b/llvm/test/CodeGen/AArch64/tst-br.ll index 345c4d9ba95..2472bf45b6a 100644 --- a/llvm/test/CodeGen/AArch64/tst-br.ll +++ b/llvm/test/CodeGen/AArch64/tst-br.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; We've got the usual issues with LLVM reordering blocks here. The ; tests are correct for the current order, but who knows when that |

