summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp35
1 files changed, 15 insertions, 20 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 771dfdbc9df..2e6612127a3 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -4821,7 +4821,7 @@ static bool getTargetShuffleMaskIndices(SDValue MaskNode,
// We can always decode if the buildvector is all zero constants,
// but can't use isBuildVectorAllZeros as it might contain UNDEFs.
- if (llvm::all_of(MaskNode->ops(), X86::isZeroNode)) {
+ if (all_of(MaskNode->ops(), X86::isZeroNode)) {
RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0);
return true;
}
@@ -5087,7 +5087,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
// Check if we're getting a shuffle mask with zero'd elements.
if (!AllowSentinelZero)
- if (llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
+ if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return false;
// If we have a fake unary shuffle, the shuffle mask is spread across two
@@ -5197,11 +5197,10 @@ static bool resolveTargetShuffleInputs(SDValue Op, SDValue &Op0, SDValue &Op1,
return false;
int NumElts = Mask.size();
- bool Op0InUse = std::any_of(Mask.begin(), Mask.end(), [NumElts](int Idx) {
+ bool Op0InUse = any_of(Mask, [NumElts](int Idx) {
return 0 <= Idx && Idx < NumElts;
});
- bool Op1InUse = std::any_of(Mask.begin(), Mask.end(),
- [NumElts](int Idx) { return NumElts <= Idx; });
+ bool Op1InUse = any_of(Mask, [NumElts](int Idx) { return NumElts <= Idx; });
Op0 = Op0InUse ? Ops[0] : SDValue();
Op1 = Op1InUse ? Ops[1] : SDValue();
@@ -10352,8 +10351,8 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
// with a pack.
SDValue V = V1;
- int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
- int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
+ std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
for (int i = 0; i < 16; ++i)
if (Mask[i] >= 0)
(i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
@@ -10364,10 +10363,8 @@ static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
// Check if any of the odd lanes in the v16i8 are used. If not, we can mask
// them out and avoid using UNPCK{L,H} to extract the elements of V as
// i16s.
- if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
- [](int M) { return M >= 0 && M % 2 == 1; }) &&
- std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
- [](int M) { return M >= 0 && M % 2 == 1; })) {
+ if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
+ none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
// Use a mask to drop the high bytes.
VLoHalf = DAG.getBitcast(MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
@@ -14338,9 +14335,8 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
Opcode == X86ISD::CMPP);
};
- if (IsPackableComparison(In) ||
- (In.getOpcode() == ISD::CONCAT_VECTORS &&
- std::all_of(In->op_begin(), In->op_end(), IsPackableComparison))) {
+ if (IsPackableComparison(In) || (In.getOpcode() == ISD::CONCAT_VECTORS &&
+ all_of(In->ops(), IsPackableComparison))) {
if (SDValue V = truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget))
return V;
}
@@ -25282,7 +25278,7 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
return false;
bool MaskContainsZeros =
- llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
+ any_of(Mask, [](int M) { return M == SM_SentinelZero; });
// If we have a single input shuffle with different shuffle patterns in the
// the 128-bit lanes use the variable mask to VPERMILPS.
@@ -25578,11 +25574,11 @@ static bool combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,
}
// Handle the all undef/zero cases early.
- if (llvm::all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
+ if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType()));
return true;
}
- if (llvm::all_of(Mask, [](int Idx) { return Idx < 0; })) {
+ if (all_of(Mask, [](int Idx) { return Idx < 0; })) {
// TODO - should we handle the mixed zero/undef case as well? Just returning
// a zero mask will lose information on undef elements possibly reducing
// future combine possibilities.
@@ -25596,8 +25592,7 @@ static bool combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,
for (int i = 0, e = Ops.size(); i < e; ++i) {
int lo = UsedOps.size() * MaskWidth;
int hi = lo + MaskWidth;
- if (std::any_of(Mask.begin(), Mask.end(),
- [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
+ if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
UsedOps.push_back(Ops[i]);
continue;
}
@@ -30124,7 +30119,7 @@ static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL,
MVT InSVT = InVT.getScalarType();
assert(DAG.getTargetLoweringInfo().getBooleanContents(InVT) ==
- llvm::TargetLoweringBase::ZeroOrNegativeOneBooleanContent &&
+ TargetLoweringBase::ZeroOrNegativeOneBooleanContent &&
"Expected comparison result to be zero/all bits");
// Check we have a truncation suited for PACKSS.
OpenPOWER on IntegriCloud