summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp4
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h4
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp4
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.h4
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h2
7 files changed, 12 insertions, 13 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b03c65ccf2c..51f7d731a48 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4677,7 +4677,7 @@ SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) {
SDValue N1 = N->getOperand(1);
// Do we actually prefer shifts over mask?
- if (!TLI.preferShiftsToClearExtremeBits(N0))
+ if (!TLI.shouldFoldMaskToVariableShiftPair(N0))
return SDValue();
// Try to match (-1 '[outer] logical shift' y)
@@ -6850,7 +6850,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// Only fold this if the inner shift has no other uses -- if it does, folding
// this will increase the total number of instructions.
if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
- TLI.shouldFoldShiftPairToMask(N, Level)) {
+ TLI.shouldFoldConstantShiftPairToMask(N, Level)) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
if (N0C1->getAPIntValue().ult(OpSizeInBits)) {
uint64_t c1 = N0C1->getZExtValue();
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index cbf47965a43..0281d68fcfd 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -10483,9 +10483,8 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
return false;
}
-bool
-ARMTargetLowering::shouldFoldShiftPairToMask(const SDNode *N,
- CombineLevel Level) const {
+bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
+ const SDNode *N, CombineLevel Level) const {
if (!Subtarget->isThumb1Only())
return true;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 97283831913..911db14e52e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -598,8 +598,8 @@ class VectorType;
bool isDesirableToCommuteWithShift(const SDNode *N,
CombineLevel Level) const override;
- bool shouldFoldShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
+ bool shouldFoldConstantShiftPairToMask(const SDNode *N,
+ CombineLevel Level) const override;
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index f942d3f97ae..99de079788e 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -1190,8 +1190,8 @@ bool MipsTargetLowering::isCheapToSpeculateCtlz() const {
return Subtarget.hasMips32();
}
-bool MipsTargetLowering::shouldFoldShiftPairToMask(const SDNode *N,
- CombineLevel Level) const {
+bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
+ const SDNode *N, CombineLevel Level) const {
if (N->getOperand(0).getValueType().isVector())
return false;
return true;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 431387b5ed0..b93e2c31acb 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -284,8 +284,8 @@ class TargetRegisterClass;
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
- bool shouldFoldShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
+ bool shouldFoldConstantShiftPairToMask(const SDNode *N,
+ CombineLevel Level) const override;
/// Return the register type for a given MVT, ensuring vectors are treated
/// as a series of gpr sized integers.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e379c350ee2..f51d3570405 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -5007,7 +5007,7 @@ bool X86TargetLowering::hasAndNot(SDValue Y) const {
return Subtarget.hasSSE2();
}
-bool X86TargetLowering::preferShiftsToClearExtremeBits(SDValue Y) const {
+bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
EVT VT = Y.getValueType();
// For vectors, we don't have a preference, but we probably want a mask.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 193d04094e2..1742e743f05 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -814,7 +814,7 @@ namespace llvm {
bool hasAndNot(SDValue Y) const override;
- bool preferShiftsToClearExtremeBits(SDValue Y) const override;
+ bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override;
bool
shouldTransformSignedTruncationCheck(EVT XVT,
OpenPOWER on IntegriCloud