summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp14
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h5
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp1
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp1
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.h4
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp1
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h4
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.h7
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp1
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.h3
11 files changed, 30 insertions, 15 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 7df5519d94f..de18bafd382 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -840,6 +840,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// the default expansion. If we are targeting a single threaded system,
// then set them all for expand so we can lower them later into their
// non-atomic form.
+ InsertFencesForAtomic = false;
if (TM.Options.ThreadModel == ThreadModel::Single)
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
else if (Subtarget->hasAnyDataBarrier() && (!Subtarget->isThumb() ||
@@ -852,7 +853,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// if they can be combined with nearby atomic loads and stores.
if (!Subtarget->hasV8Ops()) {
// Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
- setInsertFencesForAtomic(true);
+ InsertFencesForAtomic = true;
}
} else {
// If there's anything we can use as a barrier, go through custom lowering
@@ -11997,9 +11998,6 @@ Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (!getInsertFencesForAtomic())
- return nullptr;
-
switch (Ord) {
case NotAtomic:
case Unordered:
@@ -12025,9 +12023,6 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (!getInsertFencesForAtomic())
- return nullptr;
-
switch (Ord) {
case NotAtomic:
case Unordered:
@@ -12081,6 +12076,11 @@ bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
return true;
}
+bool ARMTargetLowering::shouldInsertFencesForAtomic(
+ const Instruction *I) const {
+ return InsertFencesForAtomic;
+}
+
// This has so far only been implemented for MachO.
bool ARMTargetLowering::useLoadStackGuardNode() const {
return Subtarget->isTargetMachO();
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index a502ca12fa8..205efb294f4 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -453,6 +453,7 @@ namespace llvm {
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
unsigned Factor) const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override;
TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
@@ -486,6 +487,10 @@ namespace llvm {
///
unsigned ARMPCLabelIndex;
+ // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
+ // check.
+ bool InsertFencesForAtomic;
+
void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 3740e7f809e..75244391fdd 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1724,7 +1724,6 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setPrefLoopAlignment(4);
setPrefFunctionAlignment(4);
setMinFunctionAlignment(2);
- setInsertFencesForAtomic(false);
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
if (EnableHexSDNodeSched)
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 2b3912ff02d..2d9667102c6 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -396,7 +396,6 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
}
- setInsertFencesForAtomic(true);
if (!Subtarget.hasMips32r2()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 0dc683e3df2..f1b31243623 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -561,6 +561,10 @@ namespace llvm {
unsigned getJumpTableEncoding() const override;
bool useSoftFloat() const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return true;
+ }
+
/// Emit a sign-extension using sll/sra, seb, or seh appropriately.
MachineBasicBlock *emitSignExtendToI32InReg(MachineInstr *MI,
MachineBasicBlock *BB,
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 7eab758180f..9d1eee0b9f5 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -916,7 +916,6 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
break;
}
- setInsertFencesForAtomic(true);
if (Subtarget.enableMachineScheduler())
setSchedulingPreference(Sched::Source);
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 44bcb8942cf..c83c86f16ed 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -508,6 +508,10 @@ namespace llvm {
unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return true;
+ }
+
Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 37613126d6f..d08d30aa82f 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1603,10 +1603,6 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
}
// ATOMICs.
- // FIXME: We insert fences for each atomics and generate sub-optimal code
- // for PSO/TSO. Also, implement other atomicrmw operations.
-
- setInsertFencesForAtomic(true);
setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32,
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 4e46709cfc0..c470fa8d661 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -180,6 +180,13 @@ namespace llvm {
return VT != MVT::f128;
}
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ // FIXME: We insert fences for each atomics and generate
+ // sub-optimal code for PSO/TSO. (Approximately nobody uses any
+ // mode but TSO, which makes this even more silly)
+ return true;
+ }
+
void ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>& Results,
SelectionDAG &DAG) const override;
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index ef572b7dc51..90da23b8cdb 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -156,7 +156,6 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
// Atomic operations
// We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
// As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
- setInsertFencesForAtomic(true);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.h b/llvm/lib/Target/XCore/XCoreISelLowering.h
index b6f09ff418b..287a77e2653 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -229,6 +229,9 @@ namespace llvm {
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return true;
+ }
};
}
OpenPOWER on IntegriCloud