diff options
author | JF Bastien <jfb@google.com> | 2016-04-06 21:19:33 +0000 |
---|---|---|
committer | JF Bastien <jfb@google.com> | 2016-04-06 21:19:33 +0000 |
commit | 800f87a871282713fc5f41d00692b51b2ea6c207 (patch) | |
tree | b7b4323ac6fb5b8737b63ed9efe8af168551dbf7 /llvm/lib/Target | |
parent | 31994e2eb670e03ec826002df0ae330db8e6c72c (diff) | |
download | bcm5719-llvm-800f87a871282713fc5f41d00692b51b2ea6c207.tar.gz bcm5719-llvm-800f87a871282713fc5f41d00692b51b2ea6c207.zip |
NFC: make AtomicOrdering an enum class
Summary:
In the context of http://wg21.link/lwg2445 C++ uses the concept of
'stronger' ordering but doesn't define it properly. This should be fixed
in C++17 barring a small question that's still open.
The code currently plays fast and loose with the AtomicOrdering
enum. Using an enum class is one step towards tightening things. I later
also want to tighten related enums, such as clang's
AtomicOrderingKind (which should be shared with LLVM as a 'C++ ABI'
enum).
This change touches a few lines of code which can be improved later, I'd
like to keep it as NFC for now as it's already quite complex. I have
related changes for clang.
As a follow-up I'll add:
bool operator<(AtomicOrdering, AtomicOrdering) = delete;
bool operator>(AtomicOrdering, AtomicOrdering) = delete;
bool operator<=(AtomicOrdering, AtomicOrdering) = delete;
bool operator>=(AtomicOrdering, AtomicOrdering) = delete;
This is separate so that clang and LLVM changes don't need to be in sync.
Reviewers: jyknight, reames
Subscribers: jyknight, llvm-commits
Differential Revision: http://reviews.llvm.org/D18775
llvm-svn: 265602
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrAtomics.td | 11 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 46 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMInstrInfo.td | 4 | ||||
-rw-r--r-- | llvm/lib/Target/CppBackend/CPPBackend.cpp | 15 | ||||
-rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/Sparc/SparcISelLowering.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Target/SystemZ/SystemZISelLowering.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/XCore/XCoreISelLowering.cpp | 10 |
11 files changed, 65 insertions, 57 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 134f107698d..e4391996041 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -608,7 +608,7 @@ static bool isWorthFoldingADDlow(SDValue N) { // ldar and stlr have much more restrictive addressing modes (just a // register). - if (cast<MemSDNode>(Use)->getOrdering() > Monotonic) + if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering())) return false; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 2d3ddaa060f..2efe8473832 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10132,7 +10132,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); - bool IsAcquire = isAtLeastAcquire(Ord); + bool IsAcquire = isAcquireOrStronger(Ord); // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i64, i64} and we have to recombine them into a @@ -10174,7 +10174,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); - bool IsRelease = isAtLeastRelease(Ord); + bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i128 intrinsics take two // parameters: "i64, i64". We must marshal Val into the appropriate form diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td index 4923a1161df..a88e7e8491a 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td +++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td @@ -29,7 +29,7 @@ def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>; class acquiring_load<PatFrag base> : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); - return isAtLeastAcquire(Ordering); + return isAcquireOrStronger(Ordering); }]>; // An atomic load operation that does not need either acquire or release @@ -37,7 +37,7 @@ class acquiring_load<PatFrag base> class relaxed_load<PatFrag base> : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); - return !isAtLeastAcquire(Ordering); + return !isAcquireOrStronger(Ordering); }]>; // 8-bit loads @@ -112,15 +112,16 @@ def : Pat<(relaxed_load<atomic_load_64> class releasing_store<PatFrag base> : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); - assert(Ordering != AcquireRelease && "unexpected store ordering"); - return isAtLeastRelease(Ordering); + assert(Ordering != AtomicOrdering::AcquireRelease && + "unexpected store ordering"); + return isReleaseOrStronger(Ordering); }]>; // An atomic store operation that doesn't actually need to be atomic on AArch64. class relaxed_store<PatFrag base> : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); - return !isAtLeastRelease(Ordering); + return !isReleaseOrStronger(Ordering); }]>; // 8-bit stores diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 00446b61ed7..4c006d862fa 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -3011,7 +3011,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, if (Subtarget->isMClass()) { // Only a full system barrier exists in the M-class architectures. Domain = ARM_MB::SY; - } else if (Subtarget->isSwift() && Ord == Release) { + } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) { // Swift happens to implement ISHST barriers in a way that's compatible with // Release semantics but weaker than ISH so we'd be fools not to use // it. Beware: other processors probably don't! @@ -6932,13 +6932,13 @@ void ARMTargetLowering::ExpandDIV_Windows( } static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { - // Monotonic load/store is legal for all targets - if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) - return Op; + if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) + // Acquire/Release load/store is not legal for targets without a dmb or + // equivalent available. + return SDValue(); - // Acquire/Release load/store is not legal for targets without a - // dmb or equivalent available. - return SDValue(); + // Monotonic load/store is legal for all targets. + return Op; } static void ReplaceREADCYCLECOUNTER(SDNode *N, @@ -12076,18 +12076,18 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { switch (Ord) { - case NotAtomic: - case Unordered: + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: llvm_unreachable("Invalid fence: unordered/non-atomic"); - case Monotonic: - case Acquire: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: return nullptr; // Nothing to do - case SequentiallyConsistent: + case AtomicOrdering::SequentiallyConsistent: if (!IsStore) return nullptr; // Nothing to do /*FALLTHROUGH*/ - case Release: - case AcquireRelease: + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: if (Subtarget->isSwift()) return makeDMB(Builder, ARM_MB::ISHST); // FIXME: add a comment with a link to documentation justifying this. @@ -12101,15 +12101,15 @@ Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { switch (Ord) { - case NotAtomic: - case Unordered: + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: llvm_unreachable("Invalid fence: unordered/not-atomic"); - case Monotonic: - case Release: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: return nullptr; // Nothing to do - case Acquire: - case AcquireRelease: - case SequentiallyConsistent: + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::SequentiallyConsistent: return makeDMB(Builder, ARM_MB::ISH); } llvm_unreachable("Unknown fence ordering in emitTrailingFence"); @@ -12204,7 +12204,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); - bool IsAcquire = isAtLeastAcquire(Ord); + bool IsAcquire = isAcquireOrStronger(Ord); // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i32, i32} and we have to recombine them into a @@ -12248,7 +12248,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); - bool IsRelease = isAtLeastRelease(Ord); + bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i64 intrinsics take two // parameters: "i32, i32". We must marshal Val into the appropriate form diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td index ca3b9d95f45..d116e9d08a3 100644 --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -4761,7 +4761,7 @@ def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr), class acquiring_load<PatFrag base> : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); - return isAtLeastAcquire(Ordering); + return isAcquireOrStronger(Ordering); }]>; def atomic_load_acquire_8 : acquiring_load<atomic_load_8>; @@ -4771,7 +4771,7 @@ def atomic_load_acquire_32 : acquiring_load<atomic_load_32>; class releasing_store<PatFrag base> : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering(); - return isAtLeastRelease(Ordering); + return isReleaseOrStronger(Ordering); }]>; def atomic_store_release_8 : releasing_store<atomic_store_8>; diff --git a/llvm/lib/Target/CppBackend/CPPBackend.cpp b/llvm/lib/Target/CppBackend/CPPBackend.cpp index cfa1f2ce619..a086e710d16 100644 --- a/llvm/lib/Target/CppBackend/CPPBackend.cpp +++ b/llvm/lib/Target/CppBackend/CPPBackend.cpp @@ -1091,13 +1091,14 @@ std::string CppWriter::getOpName(const Value* V) { static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return "NotAtomic"; - case Unordered: return "Unordered"; - case Monotonic: return "Monotonic"; - case Acquire: return "Acquire"; - case Release: return "Release"; - case AcquireRelease: return "AcquireRelease"; - case SequentiallyConsistent: return "SequentiallyConsistent"; + case AtomicOrdering::NotAtomic: return "NotAtomic"; + case AtomicOrdering::Unordered: return "Unordered"; + case AtomicOrdering::Monotonic: return "Monotonic"; + case AtomicOrdering::Acquire: return "Acquire"; + case AtomicOrdering::Release: return "Release"; + case AtomicOrdering::AcquireRelease: return "AcquireRelease"; + case AtomicOrdering::SequentiallyConsistent: + return "SequentiallyConsistent"; } llvm_unreachable("Unknown ordering"); } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 70d00c2682a..0aedb419201 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8323,9 +8323,9 @@ static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (Ord == SequentiallyConsistent) + if (Ord == AtomicOrdering::SequentiallyConsistent) return callIntrinsic(Builder, Intrinsic::ppc_sync); - if (isAtLeastRelease(Ord)) + if (isReleaseOrStronger(Ord)) return callIntrinsic(Builder, Intrinsic::ppc_lwsync); return nullptr; } @@ -8333,7 +8333,7 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (IsLoad && isAtLeastAcquire(Ord)) + if (IsLoad && isAcquireOrStronger(Ord)) return callIntrinsic(Builder, Intrinsic::ppc_lwsync); // FIXME: this is too conservative, a dependent branch + isync is enough. // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index ebb77924324..b0fa6a19a9a 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2929,12 +2929,12 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, } static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { - // Monotonic load/stores are legal. - if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) - return Op; - - // Otherwise, expand with a fence. + if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) + // Expand with a fence. return SDValue(); + + // Monotonic load/stores are legal. + return Op; } SDValue SparcTargetLowering:: diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 9848c2a571a..31011063247 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3130,9 +3130,11 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. - if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { + if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && + FenceScope == CrossThread) { return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, - Op.getOperand(0)), 0); + Op.getOperand(0)), + 0); } // MEMBARRIER is a compiler barrier; it codegens to a no-op. diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index eb3f0350846..8ff00194b25 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -20464,7 +20464,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is // lowered to just a load without a fence. A mfence flushes the store buffer, // making the optimization clearly correct. - // FIXME: it is required if isAtLeastRelease(Order) but it is not clear + // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear // otherwise, we might be able to be more aggressive on relaxed idempotent // rmw. In practice, they do not look useful, so we don't try to be // especially clever. @@ -20503,7 +20503,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget, // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. - if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { + if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && + FenceScope == CrossThread) { if (Subtarget.hasMFence()) return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); @@ -20986,7 +20987,8 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { // FIXME: On 32-bit, store -> fist or movq would be more efficient // (The only way to get a 16-byte store is cmpxchg16b) // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. - if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || + if (cast<AtomicSDNode>(Node)->getOrdering() == + AtomicOrdering::SequentiallyConsistent || !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp index 90da23b8cdb..6dbaa84a680 100644 --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -970,8 +970,9 @@ SDValue XCoreTargetLowering:: LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast<AtomicSDNode>(Op); assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); - assert(N->getOrdering() <= Monotonic && - "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + assert((N->getOrdering() == AtomicOrdering::Unordered || + N->getOrdering() == AtomicOrdering::Monotonic) && + "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) report_fatal_error("atomic load must be aligned"); @@ -1000,8 +1001,9 @@ SDValue XCoreTargetLowering:: LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast<AtomicSDNode>(Op); assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); - assert(N->getOrdering() <= Monotonic && - "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + assert((N->getOrdering() == AtomicOrdering::Unordered || + N->getOrdering() == AtomicOrdering::Monotonic) && + "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) report_fatal_error("atomic store must be aligned"); |