summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86CallFrameOptimization.cpp6
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp1
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp5
-rw-r--r--llvm/lib/Target/X86/X86PadShortFunction.cpp3
5 files changed, 10 insertions, 13 deletions
diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
index 323c5bfd408..a7e4ad9cf5a 100644
--- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -170,11 +170,7 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
return true;
// Don't do this when not optimizing for size.
- bool OptForSize =
- MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
- MF.getFunction()->hasFnAttribute(Attribute::MinSize);
-
- if (!OptForSize)
+ if (!MF.getFunction()->optForSize())
return false;
unsigned StackAlign = TFL->getStackAlignment();
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index d5351d25d6e..d7be9fe238c 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -462,6 +462,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
+ // FIXME: Use Function::optForSize().
OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 08f7bd11b9c..c9244efdc39 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -5189,6 +5189,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
const Function *F = DAG.getMachineFunction().getFunction();
+ // FIXME: Use Function::optForSize().
bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// Handle broadcasting a single constant scalar from the constant pool
@@ -11118,8 +11119,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
- const Function *F = DAG.getMachineFunction().getFunction();
- bool MinSize = F->hasFnAttribute(Attribute::MinSize);
+ bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
@@ -13195,8 +13195,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
// if we're optimizing for size, however, as that'll allow better folding
// of memory operations.
if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
- !DAG.getMachineFunction().getFunction()->hasFnAttribute(
- Attribute::MinSize) &&
+ !DAG.getMachineFunction().getFunction()->optForMinSize() &&
!Subtarget->isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@@ -23962,6 +23961,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
MachineFunction &MF = DAG.getMachineFunction();
+ // FIXME: Use Function::optForSize().
bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 00fa6e2f425..810fdb77a0d 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4875,8 +4875,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
- if (isCallRegIndirect &&
- !MF.getFunction()->hasFnAttribute(Attribute::MinSize) &&
+ if (isCallRegIndirect && !MF.getFunction()->optForMinSize() &&
(MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r ||
MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r ||
MI->getOpcode() == X86::PUSH64r))
@@ -5242,6 +5241,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
+ // FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
@@ -5351,6 +5351,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
+ // FIXME: Use Function::optForSize().
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
diff --git a/llvm/lib/Target/X86/X86PadShortFunction.cpp b/llvm/lib/Target/X86/X86PadShortFunction.cpp
index 143e70bda9e..0466175ef71 100644
--- a/llvm/lib/Target/X86/X86PadShortFunction.cpp
+++ b/llvm/lib/Target/X86/X86PadShortFunction.cpp
@@ -93,8 +93,7 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
- if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
- MF.getFunction()->hasFnAttribute(Attribute::MinSize)) {
+ if (MF.getFunction()->optForSize()) {
return false;
}
OpenPOWER on IntegriCloud