diff options
author | Sanjay Patel <spatel@rotateright.com> | 2016-02-13 17:26:29 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2016-02-13 17:26:29 +0000 |
commit | e9bf993cee2bd386bcf2b8a1ced27ebf46a6ad92 (patch) | |
tree | 877ff68a253d83cfb2360ee335390058029694c9 /llvm/lib/Target | |
parent | 4b36616af32c838eb37dd6bdc9125069423889e5 (diff) | |
download | bcm5719-llvm-e9bf993cee2bd386bcf2b8a1ced27ebf46a6ad92.tar.gz bcm5719-llvm-e9bf993cee2bd386bcf2b8a1ced27ebf46a6ad92.zip |
[x86-64] allow mfence even with -mno-sse (PR23203)
As shown in:
https://llvm.org/bugs/show_bug.cgi?id=23203
...we currently die because lowering believes that mfence is allowed without SSE2 on x86-64,
but the instruction def doesn't know that.
I don't know if allowing mfence without SSE is right, but if not, at least now it's consistently wrong. :)
Differential Revision: http://reviews.llvm.org/D17219
llvm-svn: 260828
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 1 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86Subtarget.h | 5 |
4 files changed, 11 insertions, 10 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 277fb815382..a1050381cfd 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -19717,13 +19717,6 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { } } -static bool hasMFENCE(const X86Subtarget &Subtarget) { - // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for - // no-sse2). There isn't any reason to disable it if the target processor - // supports it. - return Subtarget.hasSSE2() || Subtarget.is64Bit(); -} - LoadInst * X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32; @@ -19763,7 +19756,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { // the IR level, so we must wrap it in an intrinsic. return nullptr; - if (!hasMFENCE(Subtarget)) + if (!Subtarget.hasMFence()) // FIXME: it might make sense to use a locked operation here but on a // different cache-line to prevent cache-line bouncing. In practice it // is probably a small win, and x86 processors without mfence are rare @@ -19794,7 +19787,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget, // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { - if (hasMFENCE(Subtarget)) + if (Subtarget.hasMFence()) return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); SDValue Chain = Op.getOperand(0); diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 7178f1f6014..712711bea3e 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -845,6 +845,7 @@ def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">; def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">; def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">; def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">; +def HasMFence : Predicate<"Subtarget->hasMFence()">; //===----------------------------------------------------------------------===// // X86 Instruction Format Definitions. diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 499c4c11c59..9c127ff5650 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -3762,6 +3762,8 @@ def PAUSE : I<0x90, RawFrm, (outs), (ins), let SchedRW = [WriteFence] in { // Load, store, and memory fence +// TODO: As with mfence, we may want to ease the availablity of sfence/lfence +// to include any 64-bit target. def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>, PS, Requires<[HasSSE1]>; @@ -3770,7 +3772,7 @@ def LFENCE : I<0xAE, MRM_E8, (outs), (ins), TB, Requires<[HasSSE2]>; def MFENCE : I<0xAE, MRM_F0, (outs), (ins), "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>, - TB, Requires<[HasSSE2]>; + TB, Requires<[HasMFence]>; } // SchedRW def : Pat<(X86SFence), (SFENCE)>; diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h index 501770ca0d0..86f25408650 100644 --- a/llvm/lib/Target/X86/X86Subtarget.h +++ b/llvm/lib/Target/X86/X86Subtarget.h @@ -446,6 +446,11 @@ public: bool isSLM() const { return X86ProcFamily == IntelSLM; } bool useSoftFloat() const { return UseSoftFloat; } + /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for + /// no-sse2). There isn't any reason to disable it if the target processor + /// supports it. + bool hasMFence() const { return hasSSE2() || is64Bit(); } + const Triple &getTargetTriple() const { return TargetTriple; } bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); } |