diff options
author | Eugene Zelenko <eugene.zelenko@gmail.com> | 2016-04-05 20:19:49 +0000 |
---|---|---|
committer | Eugene Zelenko <eugene.zelenko@gmail.com> | 2016-04-05 20:19:49 +0000 |
commit | 1760dc2a232bde2175606ba737938d3032f1e49d (patch) | |
tree | 3859bc6b8b21c8d5073ded16260d7a5b3ff99b8f /llvm/lib/Target/X86 | |
parent | f2fdd013a29b26791490e3a33beda1bacfeec182 (diff) | |
download | bcm5719-llvm-1760dc2a232bde2175606ba737938d3032f1e49d.tar.gz bcm5719-llvm-1760dc2a232bde2175606ba737938d3032f1e49d.zip |
Fix Clang-tidy modernize-deprecated-headers warnings in remaining files; other minor fixes.
Some Include What You Use suggestions were used too.
Use anonymous namespaces in source files.
Differential revision: http://reviews.llvm.org/D18778
llvm-svn: 265454
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 71 |
1 files changed, 38 insertions, 33 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 4105191147a..16f6367f41a 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -34,7 +34,8 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" -#include <stdint.h> +#include <cstdint> + using namespace llvm; #define DEBUG_TYPE "x86-isel" @@ -141,7 +142,7 @@ namespace { } #endif }; -} +} // end anonymous namespace namespace { //===--------------------------------------------------------------------===// @@ -301,7 +302,6 @@ namespace { // Walk all the users of the immediate. for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) { - SDNode *User = *UI; // This user is already selected. Count it as a legitimate use and @@ -393,8 +393,7 @@ namespace { return true; } }; -} - +} // end anonymous namespace bool X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { @@ -459,10 +458,12 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { return true; } +namespace { + /// Replace the original chain operand of the call with /// load's chain operand and move load below the call's chain operand. -static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, - SDValue Call, SDValue OrigChain) { +void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, SDValue Call, + SDValue OrigChain) { SmallVector<SDValue, 8> Ops; SDValue Chain = OrigChain.getOperand(0); if (Chain.getNode() == Load.getNode()) @@ -496,7 +497,7 @@ static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, /// Return the CALLSEQ_START by reference as a second output. /// In the case of a tail call, there isn't a callseq node between the call /// chain and the load. -static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { +bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { // The transformation is somewhat dangerous if the call's chain was glued to // the call. After MoveBelowOrigChain the load is moved between the call and // the chain, this can create a cycle if the load is not folded. So it is @@ -533,6 +534,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { return false; } +} // end anonymous namespace + void X86DAGToDAGISel::PreprocessISelDAG() { // OptFor[Min]Size are used in pattern predicates that isel is matching. OptForSize = MF->getFunction()->optForSize(); @@ -651,7 +654,6 @@ void X86DAGToDAGISel::PreprocessISelDAG() { } } - /// Emit any code that needs to be executed only in the main function. void X86DAGToDAGISel::emitSpecialCodeForMain() { if (Subtarget->isTargetCygMing()) { @@ -676,7 +678,9 @@ void X86DAGToDAGISel::EmitFunctionEntryCode() { emitSpecialCodeForMain(); } -static bool isDispSafeForFrameIndex(int64_t Val) { +namespace { + +bool isDispSafeForFrameIndex(int64_t Val) { // On 64-bit platforms, we can run into an issue where a frame index // includes a displacement that, when added to the explicit displacement, // will overflow the displacement field. Assuming that the frame index @@ -686,6 +690,8 @@ static bool isDispSafeForFrameIndex(int64_t Val) { return isInt<31>(Val); } +} // end anonymous namespace + bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM) { // Cannot combine ExternalSymbol displacements with integer offsets. @@ -705,7 +711,6 @@ bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset, } AM.Disp = Val; return false; - } bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ @@ -896,12 +901,14 @@ bool X86DAGToDAGISel::matchAdd(SDValue N, X86ISelAddressMode &AM, return true; } +namespace { + // Insert a node into the DAG at least before the Pos node's position. This // will reposition the node as needed, and will assign it a node ID that is <= // the Pos node's ID. Note that this does *not* preserve the uniqueness of node // IDs! The selection DAG must no longer depend on their uniqueness when this // is used. -static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { +void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { if (N.getNode()->getNodeId() == -1 || N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { DAG.RepositionNode(Pos.getNode()->getIterator(), N.getNode()); @@ -913,10 +920,9 @@ static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { // safe. This allows us to convert the shift and and into an h-register // extract and a scaled index. Returns false if the simplification is // performed. -static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, - uint64_t Mask, - SDValue Shift, SDValue X, - X86ISelAddressMode &AM) { +bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, uint64_t Mask, + SDValue Shift, SDValue X, + X86ISelAddressMode &AM) { if (Shift.getOpcode() != ISD::SRL || !isa<ConstantSDNode>(Shift.getOperand(1)) || !Shift.hasOneUse()) @@ -956,10 +962,9 @@ static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this // allows us to fold the shift into this addressing mode. Returns false if the // transform succeeded. -static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, - uint64_t Mask, - SDValue Shift, SDValue X, - X86ISelAddressMode &AM) { +bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, uint64_t Mask, + SDValue Shift, SDValue X, + X86ISelAddressMode &AM) { if (Shift.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shift.getOperand(1))) return true; @@ -1023,10 +1028,8 @@ static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, // Note that this function assumes the mask is provided as a mask *after* the // value is shifted. The input chain may or may not match that, but computing // such a mask is trivial. -static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, - uint64_t Mask, - SDValue Shift, SDValue X, - X86ISelAddressMode &AM) { +bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, uint64_t Mask, + SDValue Shift, SDValue X, X86ISelAddressMode &AM) { if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || !isa<ConstantSDNode>(Shift.getOperand(1))) return true; @@ -1104,6 +1107,8 @@ static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, return false; } +} // end anonymous namespace + bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, unsigned Depth) { SDLoc dl(N); @@ -1418,7 +1423,6 @@ bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) { bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment) { - MaskedGatherScatterSDNode *Mgs = dyn_cast<MaskedGatherScatterSDNode>(Parent); if (!Mgs) return false; @@ -1541,7 +1545,6 @@ bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, return false; } - bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) { if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { uint64_t ImmVal = CN->getZExtValue(); @@ -1695,7 +1698,6 @@ bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base, return true; } - bool X86DAGToDAGISel::tryFoldLoad(SDNode *P, SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, @@ -1718,9 +1720,11 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() { return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode(); } +namespace { + /// Test whether the given X86ISD::CMP node has any uses which require the SF /// or OF bits to be accurate. -static bool hasNoSignedComparisonUses(SDNode *N) { +bool hasNoSignedComparisonUses(SDNode *N) { // Examine each user of the node. for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE; ++UI) { @@ -1782,10 +1786,9 @@ static bool hasNoSignedComparisonUses(SDNode *N) { /// Check whether or not the chain ending in StoreNode is suitable for doing /// the {load; increment or decrement; store} to modify transformation. -static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, - SDValue StoredVal, SelectionDAG *CurDAG, - LoadSDNode* &LoadNode, SDValue &InputChain) { - +bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, + SDValue StoredVal, SelectionDAG *CurDAG, + LoadSDNode* &LoadNode, SDValue &InputChain) { // is the value stored the result of a DEC or INC? if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; @@ -1867,7 +1870,7 @@ static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, /// Get the appropriate X86 opcode for an in-memory increment or decrement. /// Opc should be X86ISD::DEC or X86ISD::INC. -static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { +unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { if (Opc == X86ISD::DEC) { if (LdVT == MVT::i64) return X86::DEC64m; if (LdVT == MVT::i32) return X86::DEC32m; @@ -1883,6 +1886,8 @@ static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { llvm_unreachable("unrecognized size for LdVT"); } +} // end anonymous namespace + /// Customized ISel for GATHER operations. SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) { // Operands of Gather: VSrc, Base, VIdx, VMask, Scale |