diff options
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 20 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll | 16 |
2 files changed, 21 insertions, 15 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b3ae9d767ef..8be2ff4c246 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5769,6 +5769,26 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask, Ops.push_back(IsAndN ? N1 : N0); return true; } + case X86ISD::PINSRW: { + // Attempt to recognise a PINSRW(ASSERTZEXT(PEXTRW)) shuffle pattern. + // TODO: Expand this to support PINSRB/INSERT_VECTOR_ELT/etc. + SDValue InVec = N.getOperand(0); + SDValue InScl = N.getOperand(1); + uint64_t InIdx = N.getConstantOperandVal(2); + assert(0 <= InIdx && InIdx < NumElts && "Illegal insertion index"); + if (InScl.getOpcode() != ISD::AssertZext || + InScl.getOperand(0).getOpcode() != X86ISD::PEXTRW) + return false; + + SDValue ExVec = InScl.getOperand(0).getOperand(0); + uint64_t ExIdx = InScl.getOperand(0).getConstantOperandVal(1); + assert(0 <= ExIdx && ExIdx < NumElts && "Illegal extraction index"); + Ops.push_back(InVec); + Ops.push_back(ExVec); + for (unsigned i = 0; i != NumElts; ++i) + Mask.push_back(i == InIdx ? NumElts + ExIdx : i); + return true; + } case X86ISD::VSHLI: case X86ISD::VSRLI: { uint64_t ShiftVal = N.getConstantOperandVal(1); diff --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll index 156348f37dc..9256717f155 100644 --- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -94,21 +94,7 @@ define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind { ; ; AVX-LABEL: _clearupper8xi16a: ; AVX: # BB#0: -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: vpextrw $2, %xmm0, %ecx -; AVX-NEXT: vpextrw $3, %xmm0, %edx -; AVX-NEXT: vpextrw $4, %xmm0, %esi -; AVX-NEXT: vpextrw $5, %xmm0, %edi -; AVX-NEXT: vpextrw $6, %xmm0, %r8d -; AVX-NEXT: vpextrw $7, %xmm0, %r9d -; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $3, %edx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $4, %esi, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $6, %r8d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <8 x i16> %0, i32 0 %x1 = extractelement <8 x i16> %0, i32 1 |