summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorRobin Morisset <morisset@google.com>2014-08-21 21:50:01 +0000
committerRobin Morisset <morisset@google.com>2014-08-21 21:50:01 +0000
commit59c23cd946e6df7bef50e02fa28797469fecd805 (patch)
tree90c7d21dd14fd8eb2177c13bec57178f1cebfb33 /llvm/lib
parenta21fee0ee403731a198926ce63e7b6958419f53a (diff)
downloadbcm5719-llvm-59c23cd946e6df7bef50e02fa28797469fecd805.tar.gz
bcm5719-llvm-59c23cd946e6df7bef50e02fa28797469fecd805.zip
Rename AtomicExpandLoadLinked into AtomicExpand
AtomicExpandLoadLinked is currently rather ARM-specific. This patch is the first of a group that aim at making it more target-independent. See http://lists.cs.uiuc.edu/pipermail/llvmdev/2014-August/075873.html for details The command line option is "atomic-expand" llvm-svn: 216231
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp (renamed from llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp)38
-rw-r--r--llvm/lib/CodeGen/CMakeLists.txt2
-rw-r--r--llvm/lib/CodeGen/CodeGen.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetMachine.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.h4
-rw-r--r--llvm/lib/Target/ARM/ARMTargetMachine.cpp2
-rw-r--r--llvm/lib/Target/TargetSubtargetInfo.cpp2
8 files changed, 27 insertions, 27 deletions
diff --git a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 5c40069fd66..d2ed07775bc 100644
--- a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -1,4 +1,4 @@
-//===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===//
+//===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,16 +25,16 @@
using namespace llvm;
-#define DEBUG_TYPE "arm-atomic-expand"
+#define DEBUG_TYPE "atomic-expand"
namespace {
- class AtomicExpandLoadLinked : public FunctionPass {
+ class AtomicExpand: public FunctionPass {
const TargetMachine *TM;
public:
static char ID; // Pass identification, replacement for typeid
- explicit AtomicExpandLoadLinked(const TargetMachine *TM = nullptr)
+ explicit AtomicExpand(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM) {
- initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry());
+ initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
@@ -50,18 +50,18 @@ namespace {
};
}
-char AtomicExpandLoadLinked::ID = 0;
-char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID;
-INITIALIZE_TM_PASS(AtomicExpandLoadLinked, "atomic-ll-sc",
- "Expand Atomic calls in terms of load-linked & store-conditional",
+char AtomicExpand::ID = 0;
+char &llvm::AtomicExpandID = AtomicExpand::ID;
+INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
+ "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
false, false)
-FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) {
- return new AtomicExpandLoadLinked(TM);
+FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
+ return new AtomicExpand(TM);
}
-bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
- if (!TM || !TM->getSubtargetImpl()->enableAtomicExpandLoadLinked())
+bool AtomicExpand::runOnFunction(Function &F) {
+ if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
return false;
SmallVector<Instruction *, 1> AtomicInsts;
@@ -97,7 +97,7 @@ bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
return MadeChange;
}
-bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
+bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
// Load instructions don't actually need a leading fence, even in the
// SequentiallyConsistent case.
AtomicOrdering MemOpOrder =
@@ -119,7 +119,7 @@ bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
return true;
}
-bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
+bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
// The only atomic 64-bit store on ARM is an strexd that succeeds, which means
// we need a loop and the entire instruction is essentially an "atomicrmw
// xchg" that ignores the value loaded.
@@ -133,7 +133,7 @@ bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
return expandAtomicRMW(AI);
}
-bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
+bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
AtomicOrdering Order = AI->getOrdering();
Value *Addr = AI->getPointerOperand();
BasicBlock *BB = AI->getParent();
@@ -233,7 +233,7 @@ bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
return true;
}
-bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
+bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
Value *Addr = CI->getPointerOperand();
@@ -359,7 +359,7 @@ bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return true;
}
-AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
+AtomicOrdering AtomicExpand::insertLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return Ord;
@@ -372,7 +372,7 @@ AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
return Monotonic;
}
-void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
+void AtomicExpand::insertTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord) {
if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
return;
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index 2a247c12e64..07453d75e81 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -2,7 +2,7 @@ add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp
AllocationOrder.cpp
Analysis.cpp
- AtomicExpandLoadLinkedPass.cpp
+ AtomicExpandPass.cpp
BasicTargetTransformInfo.cpp
BranchFolding.cpp
CalcSpillWeights.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index 12b0411065a..307dec548fc 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) {
- initializeAtomicExpandLoadLinkedPass(Registry);
+ initializeAtomicExpandPass(Registry);
initializeBasicTTIPass(Registry);
initializeBranchFolderPassPass(Registry);
initializeCodeGenPreparePass(Registry);
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index f3172a62f03..e04fe1b5a97 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -144,7 +144,7 @@ TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
void AArch64PassConfig::addIRPasses() {
// Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
// ourselves.
- addPass(createAtomicExpandLoadLinkedPass(TM));
+ addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 9c4f05283f2..25f2316a555 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -428,7 +428,7 @@ bool ARMSubtarget::enablePostMachineScheduler() const {
return (!isThumb() || hasThumb2());
}
-bool ARMSubtarget::enableAtomicExpandLoadLinked() const {
+bool ARMSubtarget::enableAtomicExpand() const {
return hasAnyDataBarrier() && !isThumb1Only();
}
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 1263e8b7121..8b40f2f1932 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -436,8 +436,8 @@ public:
/// True for some subtargets at > -O0.
bool enablePostMachineScheduler() const override;
- // enableAtomicExpandLoadLinked - True if we need to expand our atomics.
- bool enableAtomicExpandLoadLinked() const override;
+ // enableAtomicExpand- True if we need to expand our atomics.
+ bool enableAtomicExpand() const override;
/// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index fc164ada35f..3e5840d51b6 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -161,7 +161,7 @@ void ARMPassConfig::addIRPasses() {
if (TM->Options.ThreadModel == ThreadModel::Single)
addPass(createLowerAtomicPass());
else
- addPass(createAtomicExpandLoadLinkedPass(TM));
+ addPass(createAtomicExpandPass(TM));
// Cmpxchg instructions are often used with a subsequent comparison to
// determine whether it succeeded. We can exploit existing control-flow in
diff --git a/llvm/lib/Target/TargetSubtargetInfo.cpp b/llvm/lib/Target/TargetSubtargetInfo.cpp
index 386a813b057..23c038fff66 100644
--- a/llvm/lib/Target/TargetSubtargetInfo.cpp
+++ b/llvm/lib/Target/TargetSubtargetInfo.cpp
@@ -39,7 +39,7 @@ bool TargetSubtargetInfo::useMachineScheduler() const {
return enableMachineScheduler();
}
-bool TargetSubtargetInfo::enableAtomicExpandLoadLinked() const {
+bool TargetSubtargetInfo::enableAtomicExpand() const {
return true;
}
OpenPOWER on IntegriCloud