diff options
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 28 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 6 |
3 files changed, 20 insertions, 20 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 64309bcac4a..8e85cd4f24a 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -752,7 +752,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { return false; LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); if (!LD || - LD->isVolatile() || + !LD->isSimple() || LD->getAddressingMode() != ISD::UNINDEXED || LD->getExtensionType() != ISD::NON_EXTLOAD) return false; @@ -2311,10 +2311,10 @@ bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent, return false; // We can allow a full vector load here since narrowing a load is ok unless - // it's volatile. + // it's volatile or atomic. if (ISD::isNON_EXTLoad(N.getNode())) { LoadSDNode *LD = cast<LoadSDNode>(N); - if (!LD->isVolatile() && + if (LD->isSimple() && IsProfitableToFold(N, LD, Root) && IsLegalToFold(N, Parent, Root, OptLevel)) { PatternNodeWithChain = N; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index f96eaf9cd43..1e3127f1d12 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7569,7 +7569,7 @@ static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl, // the shuffle mask. if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { SDValue Ptr = LD->getBasePtr(); - if (!ISD::isNormalLoad(LD) || LD->isVolatile()) + if (!ISD::isNormalLoad(LD) || !LD->isSimple()) return SDValue(); EVT PVT = LD->getValueType(0); if (PVT != MVT::i32 && PVT != MVT::f32) @@ -12512,7 +12512,7 @@ static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1, // If we can't broadcast from a register, check that the input is a load. if (!BroadcastFromReg && !isShuffleFoldableLoad(V)) return SDValue(); - } else if (MayFoldLoad(V) && !cast<LoadSDNode>(V)->isVolatile()) { + } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) { // 32-bit targets need to load i64 as a f64 and then bitcast the result. if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) { BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements()); @@ -21561,7 +21561,7 @@ static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) { // Splitting volatile memory ops is not allowed unless the operation was not // legal to begin with. We are assuming the input op is legal (this transform // is only used for targets with AVX). - if (Store->isVolatile()) + if (!Store->isSimple()) return SDValue(); MVT StoreVT = StoredVal.getSimpleValueType(); @@ -21597,7 +21597,7 @@ static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT, // Splitting volatile memory ops is not allowed unless the operation was not // legal to begin with. We are assuming the input op is legal (this transform // is only used for targets with AVX). - if (Store->isVolatile()) + if (!Store->isSimple()) return SDValue(); MVT StoreSVT = StoreVT.getScalarType(); @@ -34317,7 +34317,7 @@ static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG, if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() && ISD::isNormalLoad(N->getOperand(0).getNode())) { LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0)); - if (!LN->isVolatile()) { + if (LN->isSimple()) { SDVTList Tys = DAG.getVTList(VT, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; SDValue VZLoad = @@ -35238,7 +35238,7 @@ XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); - if (!LN0 || !LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) + if (!LN0 || !LN0->hasNUsesOfValue(AllowedUses, 0) || !LN0->isSimple()) return SDValue(); // If there's a bitcast before the shuffle, check if the load type and @@ -40877,8 +40877,8 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG, !ExperimentalVectorWideningLegalization) || (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) && isa<LoadSDNode>(St->getValue()) && - !cast<LoadSDNode>(St->getValue())->isVolatile() && - St->getChain().hasOneUse() && !St->isVolatile()) { + cast<LoadSDNode>(St->getValue())->isSimple() && + St->getChain().hasOneUse() && St->isSimple()) { LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode()); SmallVector<SDValue, 8> Ops; @@ -42044,8 +42044,8 @@ static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG, ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) { assert(InVT.is128BitVector() && "Expected 128-bit input vector"); LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0)); - // Unless the load is volatile. - if (!LN->isVolatile()) { + // Unless the load is volatile or atomic. + if (LN->isSimple()) { SDLoc dl(N); unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements(); MVT MemVT = MVT::getIntegerVT(NumBits); @@ -42079,8 +42079,8 @@ static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG, ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) { assert(InVT.is128BitVector() && "Expected 128-bit input vector"); LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0)); - // Unless the load is volatile. - if (!LN->isVolatile()) { + // Unless the load is volatile or atomic. + if (LN->isSimple()) { SDLoc dl(N); unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements(); MVT MemVT = MVT::getFloatingPointVT(NumBits); @@ -43346,7 +43346,7 @@ static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG, if (Subtarget.hasDQI() && VT != MVT::f80) return SDValue(); - if (!Ld->isVolatile() && !VT.isVector() && + if (Ld->isSimple() && !VT.isVector() && ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && !Subtarget.is64Bit() && LdVT == MVT::i64) { SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD( @@ -44873,7 +44873,7 @@ static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG, if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) { auto *Ld = cast<LoadSDNode>(In); - if (!Ld->isVolatile()) { + if (Ld->isSimple()) { MVT SVT = In.getSimpleValueType().getVectorElementType(); ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD; EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT, diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 75958dd6c9e..a492d57fb8c 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -1117,7 +1117,7 @@ def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 2 && !LD->isVolatile(); + return LD->getAlignment() >= 2 && LD->isSimple(); return false; }]>; @@ -1127,7 +1127,7 @@ def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 4 && !LD->isVolatile(); + return LD->getAlignment() >= 4 && LD->isSimple(); return false; }]>; @@ -1184,7 +1184,7 @@ def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [ if (LD->getMemoryVT() == MVT::i32) return true; - return LD->getAlignment() >= 4 && !LD->isVolatile(); + return LD->getAlignment() >= 4 && LD->isSimple(); }]>; |