diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-07-03 15:46:08 +0000 | 
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-07-03 15:46:08 +0000 | 
| commit | 783dbe402f1ff8b3d37577a95b92ab68956b7dc1 (patch) | |
| tree | 568b08a8989d5f38e84e1f7cdd0ed0e796288ad9 /llvm/lib | |
| parent | 4e225deab4de4c0c9ca353e611b1a5b1c7d89ec2 (diff) | |
| download | bcm5719-llvm-783dbe402f1ff8b3d37577a95b92ab68956b7dc1.tar.gz bcm5719-llvm-783dbe402f1ff8b3d37577a95b92ab68956b7dc1.zip | |
[X86][AVX] combineX86ShufflesRecursively - peek through extract_subvector
If we have more then 2 shuffle ops to combine, try to use combineX86ShuffleChainWithExtract to see if some are from the same super vector.
llvm-svn: 365050
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 45 | 
1 files changed, 25 insertions, 20 deletions
| diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 3e75756cffd..fc264a74975 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -32738,29 +32738,34 @@ static SDValue combineX86ShufflesRecursively(      return Cst;    // We can only combine unary and binary shuffle mask cases. -  if (Ops.size() > 2) -    return SDValue(); - -  // Minor canonicalization of the accumulated shuffle mask to make it easier -  // to match below. All this does is detect masks with sequential pairs of -  // elements, and shrink them to the half-width mask. It does this in a loop -  // so it will reduce the size of the mask to the minimal width mask which -  // performs an equivalent shuffle. -  SmallVector<int, 64> WidenedMask; -  while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) { -    Mask = std::move(WidenedMask); -  } +  if (Ops.size() <= 2) { +    // Minor canonicalization of the accumulated shuffle mask to make it easier +    // to match below. All this does is detect masks with sequential pairs of +    // elements, and shrink them to the half-width mask. It does this in a loop +    // so it will reduce the size of the mask to the minimal width mask which +    // performs an equivalent shuffle. +    SmallVector<int, 64> WidenedMask; +    while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) { +      Mask = std::move(WidenedMask); +    } + +    // Canonicalization of binary shuffle masks to improve pattern matching by +    // commuting the inputs. +    if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) { +      ShuffleVectorSDNode::commuteMask(Mask); +      std::swap(Ops[0], Ops[1]); +    } -  // Canonicalization of binary shuffle masks to improve pattern matching by -  // commuting the inputs. -  if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) { -    ShuffleVectorSDNode::commuteMask(Mask); -    std::swap(Ops[0], Ops[1]); +    // Finally, try to combine into a single shuffle instruction. +    return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask, +                                  AllowVariableMask, DAG, Subtarget);    } -  // Finally, try to combine into a single shuffle instruction. -  return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask, -                                AllowVariableMask, DAG, Subtarget); +  // If that failed and any input is extracted then try to combine as a +  // shuffle with the larger type. +  return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth, +                                           HasVariableMask, AllowVariableMask, +                                           DAG, Subtarget);  }  /// Helper entry wrapper to combineX86ShufflesRecursively. | 

