summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp8
-rw-r--r--llvm/lib/Target/ARM64/ARM64LoadStoreOptimizer.cpp6
-rw-r--r--llvm/lib/Target/ARM64/ARM64TargetMachine.cpp47
3 files changed, 30 insertions, 31 deletions
diff --git a/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp b/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp
index da280f8be08..5950a8f18e1 100644
--- a/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp
+++ b/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp
@@ -49,10 +49,6 @@ using namespace llvm;
#define DEBUG_TYPE "arm64-simd-scalar"
-static cl::opt<bool>
-AdvSIMDScalar("arm64-simd-scalar",
- cl::desc("enable use of AdvSIMD scalar integer instructions"),
- cl::init(false), cl::Hidden);
// Allow forcing all i64 operations with equivalent SIMD instructions to use
// them. For stress-testing the transformation function.
static cl::opt<bool>
@@ -368,10 +364,6 @@ bool ARM64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
// runOnMachineFunction - Pass entry point from PassManager.
bool ARM64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
- // Early exit if pass disabled.
- if (!AdvSIMDScalar)
- return false;
-
bool Changed = false;
DEBUG(dbgs() << "***** ARM64AdvSIMDScalar *****\n");
diff --git a/llvm/lib/Target/ARM64/ARM64LoadStoreOptimizer.cpp b/llvm/lib/Target/ARM64/ARM64LoadStoreOptimizer.cpp
index da7a8cd3d90..40b39126fad 100644
--- a/llvm/lib/Target/ARM64/ARM64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM64/ARM64LoadStoreOptimizer.cpp
@@ -40,8 +40,6 @@ STATISTIC(NumPreFolded, "Number of pre-index updates folded");
STATISTIC(NumUnscaledPairCreated,
"Number of load/store from unscaled generated");
-static cl::opt<bool> DoLoadStoreOpt("arm64-load-store-opt", cl::init(true),
- cl::Hidden);
static cl::opt<unsigned> ScanLimit("arm64-load-store-scan-limit", cl::init(20),
cl::Hidden);
@@ -923,10 +921,6 @@ bool ARM64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
}
bool ARM64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- // Early exit if pass disabled.
- if (!DoLoadStoreOpt)
- return false;
-
const TargetMachine &TM = Fn.getTarget();
TII = static_cast<const ARM64InstrInfo *>(TM.getInstrInfo());
TRI = TM.getRegisterInfo();
diff --git a/llvm/lib/Target/ARM64/ARM64TargetMachine.cpp b/llvm/lib/Target/ARM64/ARM64TargetMachine.cpp
index b7e1beb8580..f5c187ceb27 100644
--- a/llvm/lib/Target/ARM64/ARM64TargetMachine.cpp
+++ b/llvm/lib/Target/ARM64/ARM64TargetMachine.cpp
@@ -20,24 +20,30 @@
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
-static cl::opt<bool> EnableCCMP("arm64-ccmp",
- cl::desc("Enable the CCMP formation pass"),
- cl::init(true));
+static cl::opt<bool>
+EnableCCMP("arm64-ccmp", cl::desc("Enable the CCMP formation pass"),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+EnableEarlyIfConvert("arm64-early-ifcvt", cl::desc("Enable the early if "
+ "converter pass"), cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+EnableStPairSuppress("arm64-stp-suppress", cl::desc("Suppress STP for ARM64"),
+ cl::init(true), cl::Hidden);
-static cl::opt<bool> EnableStPairSuppress("arm64-stp-suppress", cl::Hidden,
- cl::desc("Suppress STP for ARM64"),
- cl::init(true));
+static cl::opt<bool>
+EnableAdvSIMDScalar("arm64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
+ " integer instructions"), cl::init(false), cl::Hidden);
static cl::opt<bool>
-EnablePromoteConstant("arm64-promote-const", cl::Hidden,
- cl::desc("Enable the promote constant pass"),
- cl::init(true));
+EnablePromoteConstant("arm64-promote-const", cl::desc("Enable the promote "
+ "constant pass"), cl::init(true), cl::Hidden);
static cl::opt<bool>
-EnableCollectLOH("arm64-collect-loh", cl::Hidden,
- cl::desc("Enable the pass that emits the linker"
- " optimization hints (LOH)"),
- cl::init(true));
+EnableCollectLOH("arm64-collect-loh", cl::desc("Enable the pass that emits the"
+ " linker optimization hints (LOH)"), cl::init(true),
+ cl::Hidden);
static cl::opt<bool>
EnableDeadRegisterElimination("arm64-dead-def-elimination", cl::Hidden,
@@ -47,6 +53,10 @@ EnableDeadRegisterElimination("arm64-dead-def-elimination", cl::Hidden,
" register"),
cl::init(true));
+static cl::opt<bool>
+EnableLoadStoreOpt("arm64-load-store-opt", cl::desc("Enable the load/store pair"
+ " optimization pass"), cl::init(true), cl::Hidden);
+
extern "C" void LLVMInitializeARM64Target() {
// Register the target.
RegisterTargetMachine<ARM64leTargetMachine> X(TheARM64leTarget);
@@ -159,7 +169,8 @@ bool ARM64PassConfig::addInstSelector() {
bool ARM64PassConfig::addILPOpts() {
if (EnableCCMP)
addPass(createARM64ConditionalCompares());
- addPass(&EarlyIfConverterID);
+ if (EnableEarlyIfConvert)
+ addPass(&EarlyIfConverterID);
if (EnableStPairSuppress)
addPass(createARM64StorePairSuppressPass());
return true;
@@ -167,13 +178,14 @@ bool ARM64PassConfig::addILPOpts() {
bool ARM64PassConfig::addPreRegAlloc() {
// Use AdvSIMD scalar instructions whenever profitable.
- addPass(createARM64AdvSIMDScalar());
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar)
+ addPass(createARM64AdvSIMDScalar());
return true;
}
bool ARM64PassConfig::addPostRegAlloc() {
// Change dead register definitions to refer to the zero register.
- if (EnableDeadRegisterElimination)
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createARM64DeadRegisterDefinitions());
return true;
}
@@ -182,7 +194,8 @@ bool ARM64PassConfig::addPreSched2() {
// Expand some pseudo instructions to allow proper scheduling.
addPass(createARM64ExpandPseudoPass());
// Use load/store pair instructions when possible.
- addPass(createARM64LoadStoreOptimizationPass());
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
+ addPass(createARM64LoadStoreOptimizationPass());
return true;
}
OpenPOWER on IntegriCloud