diff options
Diffstat (limited to 'llvm/lib/Target/X86')
| -rw-r--r-- | llvm/lib/Target/X86/X86FixupBWInsts.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86FixupLEAs.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86FrameLowering.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 22 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 10 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 10 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86OptimizeLEAs.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86PadShortFunction.cpp | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86SelectionDAGInfo.cpp | 2 |
11 files changed, 30 insertions, 30 deletions
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp index be4479f871e..a0282ffd5f2 100644 --- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -150,7 +150,7 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { this->MF = &MF; TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); - OptForSize = MF.getFunction().optForSize(); + OptForSize = MF.getFunction().hasOptSize(); MLI = &getAnalysis<MachineLoopInfo>(); LiveRegs.init(TII->getRegisterInfo()); diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp index 9a227311f32..311957a656c 100644 --- a/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -200,7 +200,7 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { bool IsSlowLEA = ST.slowLEA(); bool IsSlow3OpsLEA = ST.slow3OpsLEA(); - OptIncDec = !ST.slowIncDec() || Func.getFunction().optForSize(); + OptIncDec = !ST.slowIncDec() || Func.getFunction().hasOptSize(); OptLEA = ST.LEAusesAG() || IsSlowLEA || IsSlow3OpsLEA; if (!OptLEA && !OptIncDec) diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index ebe739257a3..6d8bcd7cd01 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -2810,7 +2810,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); if (StackAdjustment) { - if (!(F.optForMinSize() && + if (!(F.hasMinSize() && adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, /*InEpilogue=*/false); diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 77c3aa73b7e..3a9ef4189ff 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -183,8 +183,8 @@ namespace { "indirect-tls-seg-refs"); // OptFor[Min]Size are used in pattern predicates that isel is matching. - OptForSize = MF.getFunction().optForSize(); - OptForMinSize = MF.getFunction().optForMinSize(); + OptForSize = MF.getFunction().hasOptSize(); + OptForMinSize = MF.getFunction().hasMinSize(); assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d599a61e70d..71ea61b677b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7759,7 +7759,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp, // TODO: If multiple splats are generated to load the same constant, // it may be detrimental to overall size. There needs to be a way to detect // that condition to know if this is truly a size win. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); // Handle broadcasting a single constant scalar from the constant pool // into a vector. @@ -10666,7 +10666,7 @@ static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1, case MVT::v32i16: case MVT::v64i8: { // Attempt to lower to a bitmask if we can. Only if not optimizing for size. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if (!OptForSize) { if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG)) @@ -16982,7 +16982,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, // Bits [3:0] of the constant are the zero mask. The DAG Combiner may // combine either bitwise AND or insert of float 0.0 to set these bits. - bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize(); + bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize(); if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { // If this is an insertion of 32-bits into the low 32-bits of // a vector, we prefer to generate a blend with immediate rather @@ -17636,7 +17636,7 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget, "Unexpected funnel shift type!"); // Expand slow SHLD/SHRD cases if we are not optimizing for size. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if (!OptForSize && Subtarget.isSHLDSlow()) return SDValue(); @@ -18895,7 +18895,7 @@ static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { /// implementation, and likely shuffle complexity of the alternate sequence. static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize(); + bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize(); bool HasFastHOps = Subtarget.hasFastHorizontalOps(); return !IsSingleSource || IsOptimizingSize || HasFastHOps; } @@ -19376,7 +19376,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, !cast<ConstantSDNode>(Op0)->getAPIntValue().isSignedIntN(8)) || (isa<ConstantSDNode>(Op1) && !cast<ConstantSDNode>(Op1)->getAPIntValue().isSignedIntN(8))) && - !DAG.getMachineFunction().getFunction().optForMinSize() && + !DAG.getMachineFunction().getFunction().hasMinSize() && !Subtarget.isAtom()) { unsigned ExtendOp = isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; @@ -19550,7 +19550,7 @@ static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, } else { // Use BT if the immediate can't be encoded in a TEST instruction or we // are optimizing for size and the immedaite won't fit in a byte. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) && isPowerOf2_64(AndRHSVal)) { Src = AndLHS; @@ -35932,7 +35932,7 @@ static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG, // pmulld is supported since SSE41. It is better to use pmulld // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than // the expansion. - bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize(); + bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize(); if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) return SDValue(); @@ -36240,7 +36240,7 @@ static SDValue combineMul(SDNode *N, SelectionDAG &DAG, if (!MulConstantOptimization) return SDValue(); // An imul is usually smaller than the alternative sequence. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) @@ -37659,7 +37659,7 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG, return SDValue(); // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); unsigned Bits = VT.getScalarSizeInBits(); // SHLD/SHRD instructions have lower register pressure, but on some @@ -39938,7 +39938,7 @@ static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG, // If we have to respect NaN inputs, this takes at least 3 instructions. // Favor a library call when operating on a scalar and minimizing code size. - if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize()) + if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 7042a2981ec..c481de02421 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -829,7 +829,7 @@ namespace llvm { } bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return false; return true; } diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 4aa365cf8fe..3ed88d9840b 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1453,7 +1453,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, case X86::VBLENDPDrri: case X86::VBLENDPSrri: // If we're optimizing for size, try to use MOVSD/MOVSS. - if (MI.getParent()->getParent()->getFunction().optForSize()) { + if (MI.getParent()->getParent()->getFunction().hasOptSize()) { unsigned Mask, Opc; switch (MI.getOpcode()) { default: llvm_unreachable("Unreachable!"); @@ -4820,14 +4820,14 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( // For CPUs that favor the register form of a call or push, // do not fold loads into calls or pushes, unless optimizing for size // aggressively. - if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() && + if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || MI.getOpcode() == X86::PUSH64r)) return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; @@ -4995,7 +4995,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; @@ -5195,7 +5195,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( if (NoFusing) return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index e07553f10d6..f5ff8d24303 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -925,12 +925,12 @@ def IsNotPIC : Predicate<"!TM.isPositionIndependent()">; // the Function object through the <Target>Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def OptForSize : Predicate<"MF->getFunction().optForSize()">; - def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">; - def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">; + def OptForSize : Predicate<"MF->getFunction().hasOptSize()">; + def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">; + def OptForSpeed : Predicate<"!MF->getFunction().hasOptSize()">; def UseIncDec : Predicate<"!Subtarget->slowIncDec() || " - "MF->getFunction().optForSize()">; - def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || " + "MF->getFunction().hasOptSize()">; + def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().hasOptSize() || " "!Subtarget->hasSSE41()">; } diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp index 6cec0b80604..d415197b55c 100644 --- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp +++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp @@ -700,7 +700,7 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) { // Remove redundant address calculations. Do it only for -Os/-Oz since only // a code size gain is expected from this part of the pass. - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) Changed |= removeRedundantAddrCalc(LEAs); } diff --git a/llvm/lib/Target/X86/X86PadShortFunction.cpp b/llvm/lib/Target/X86/X86PadShortFunction.cpp index 99df3d60d98..fb8b0af5fcf 100644 --- a/llvm/lib/Target/X86/X86PadShortFunction.cpp +++ b/llvm/lib/Target/X86/X86PadShortFunction.cpp @@ -97,7 +97,7 @@ bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(MF.getFunction())) return false; - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) return false; if (!MF.getSubtarget<X86Subtarget>().padShortFunctions()) diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp index fea4d84e98c..fb18525b592 100644 --- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -248,7 +248,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy( Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32; if (Repeats.BytesLeft() > 0 && - DAG.getMachineFunction().getFunction().optForMinSize()) { + DAG.getMachineFunction().getFunction().hasMinSize()) { // When aggressively optimizing for size, avoid generating the code to // handle BytesLeft. Repeats.AVT = MVT::i8; |

