diff options
author | Craig Topper <craig.topper@gmail.com> | 2017-02-11 22:57:12 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2017-02-11 22:57:12 +0000 |
commit | 1c37e991e66592f31338b7b59f76ca7652da2aa3 (patch) | |
tree | dcb40b7a65e08b6afd5ee4f126dffba503314816 /llvm/lib | |
parent | b633adedc75c0e450cef812dabe55dd0ee19040b (diff) | |
download | bcm5719-llvm-1c37e991e66592f31338b7b59f76ca7652da2aa3.tar.gz bcm5719-llvm-1c37e991e66592f31338b7b59f76ca7652da2aa3.zip |
[X86] Move code for using blendi for insert_subvector out to an isel pattern. This gives the DAG combiner more opportunity to optimize without needing to dig through the blend.
llvm-svn: 294876
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 27 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 53 |
2 files changed, 53 insertions, 27 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index e7417b75ae7..0de7d1c173c 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -34152,33 +34152,6 @@ static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG, MVT OpVT = N->getSimpleValueType(0); MVT SubVecVT = SubVec.getSimpleValueType(); - // For insertion into the zero index (low half) of a 256-bit vector, it is - // more efficient to generate a blend with immediate instead of an insert*128. - // We are still creating an INSERT_SUBVECTOR below with an undef node to - // extend the subvector to the size of the result vector. Make sure that - // we are not recursing on that node by checking for undef here. - if (IdxVal == 0 && OpVT.is256BitVector() && !Vec.isUndef()) { - SDValue Vec256 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, - DAG.getUNDEF(OpVT), SubVec, N->getOperand(2)); - - // Integers must be cast to 32-bit because there is only vpblendd; - // vpblendw can't be used for this because it has a handicapped mask. - // If we don't have AVX2, then cast to float. Using a wrong domain blend - // is still more efficient than using the wrong domain vinsertf128 that - // will be created by InsertSubVector(). - MVT CastVT = OpVT; - if (OpVT.isInteger()) - CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32; - - // The blend instruction, and therefore its mask, depend on the data type. - unsigned MaskVal = CastVT.getScalarSizeInBits() == 64 ? 0x03 : 0x0f; - SDValue Mask = DAG.getConstant(MaskVal, dl, MVT::i8); - Vec = DAG.getBitcast(CastVT, Vec); - Vec256 = DAG.getBitcast(CastVT, Vec256); - Vec256 = DAG.getNode(X86ISD::BLENDI, dl, CastVT, Vec, Vec256, Mask); - return DAG.getBitcast(OpVT, Vec256); - } - // If we're inserting into the upper half of a 256-bit vector with a vector // that was extracted from the upper half of a 256-bit vector, we should // use a blend instead. diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index ddd7b808450..e1191309219 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -6984,6 +6984,19 @@ let Constraints = "$src1 = $dst" in { SSE_DPPD_ITINS>; } +// For insertion into the zero index (low half) of a 256-bit vector, it is +// more efficient to generate a blend with immediate instead of an insert*128. +let Predicates = [HasAVX] in { +def : Pat<(insert_subvector (v4f64 VR256:$src1), (v2f64 VR128:$src2), (iPTR 0)), + (VBLENDPDYrri VR256:$src1, + (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0x3)>; +def : Pat<(insert_subvector (v8f32 VR256:$src1), (v4f32 VR128:$src2), (iPTR 0)), + (VBLENDPSYrri VR256:$src1, + (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +} + /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr, RegisterClass RC, X86MemOperand x86memop, @@ -8178,6 +8191,46 @@ defm VPBLENDD : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v4i32, defm VPBLENDDY : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v8i32, VR256, loadv4i64, i256mem>, VEX_L; +// For insertion into the zero index (low half) of a 256-bit vector, it is +// more efficient to generate a blend with immediate instead of an insert*128. +let Predicates = [HasAVX2] in { +def : Pat<(insert_subvector (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR 0)), + (VPBLENDDYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +def : Pat<(insert_subvector (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR 0)), + (VPBLENDDYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +def : Pat<(insert_subvector (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR 0)), + (VPBLENDDYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +def : Pat<(insert_subvector (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR 0)), + (VPBLENDDYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +} + +let Predicates = [HasAVX1Only] in { +def : Pat<(insert_subvector (v8i32 VR256:$src1), (v4i32 VR128:$src2), (iPTR 0)), + (VBLENDPSYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +def : Pat<(insert_subvector (v4i64 VR256:$src1), (v2i64 VR128:$src2), (iPTR 0)), + (VBLENDPSYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +def : Pat<(insert_subvector (v16i16 VR256:$src1), (v8i16 VR128:$src2), (iPTR 0)), + (VBLENDPSYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +def : Pat<(insert_subvector (v32i8 VR256:$src1), (v16i8 VR128:$src2), (iPTR 0)), + (VBLENDPSYrri VR256:$src1, + (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), + VR128:$src2, sub_xmm), 0xf)>; +} + //===----------------------------------------------------------------------===// // VPBROADCAST - Load from memory and broadcast to all elements of the // destination operand |