summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2018-10-13 14:28:40 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2018-10-13 14:28:40 +0000
commita03379527af1c017d19bb0a232c928d8e675ec0b (patch)
treebc385820d8c26f6f6b81c855e01ba73ea9b08866 /llvm/lib/Target/X86
parent10434cbae131c770a4e794b14c8e72005539aec9 (diff)
downloadbcm5719-llvm-a03379527af1c017d19bb0a232c928d8e675ec0b.tar.gz
bcm5719-llvm-a03379527af1c017d19bb0a232c928d8e675ec0b.zip
[X86] Pull out target constant splat helper function. NFCI.
The code in LowerScalarImmediateShift is just a more powerful version of ISD::isConstantSplatVector. llvm-svn: 344451
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp44
1 files changed, 27 insertions, 17 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 835e272f52b..d6699c6e678 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -5830,6 +5830,30 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
return false;
}
+static bool isConstantSplat(SDValue Op, APInt &SplatVal) {
+ APInt UndefElts;
+ SmallVector<APInt, 16> EltBits;
+ if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
+ UndefElts, EltBits, true, false)) {
+ int SplatIndex = -1;
+ for (int i = 0, e = EltBits.size(); i != e; ++i) {
+ if (UndefElts[i])
+ continue;
+ if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
+ SplatIndex = -1;
+ break;
+ }
+ SplatIndex = i;
+ }
+ if (0 <= SplatIndex) {
+ SplatVal = EltBits[SplatIndex];
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool getTargetShuffleMaskIndices(SDValue MaskNode,
unsigned MaskEltSizeInBits,
SmallVectorImpl<uint64_t> &RawMask) {
@@ -23600,7 +23624,6 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
- unsigned EltSizeInBits = VT.getScalarSizeInBits();
unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
@@ -23644,24 +23667,11 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
};
// Optimize shl/srl/sra with constant shift amount.
- APInt UndefElts;
- SmallVector<APInt, 8> EltBits;
- if (!getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits,
- true, false))
- return SDValue();
-
- int SplatIndex = -1;
- for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
- if (UndefElts[i])
- continue;
- if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex])
- return SDValue();
- SplatIndex = i;
- }
- if (SplatIndex < 0)
+ APInt APIntShiftAmt;
+ if (!isConstantSplat(Amt, APIntShiftAmt))
return SDValue();
+ uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
- uint64_t ShiftAmt = EltBits[SplatIndex].getZExtValue();
if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
OpenPOWER on IntegriCloud