diff options
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 11 |
1 files changed, 0 insertions, 11 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 73976f30374..7b4ce08b578 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1287,7 +1287,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); } if (HasInt256) @@ -21081,17 +21080,7 @@ static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget, if (St->isTruncatingStore()) return SDValue(); - // If this is a 256-bit store of concatenated ops, we are better off splitting - // that store into two 128-bit stores. This avoids spurious use of 256-bit ops - // and each half can execute independently. Some cores would split the op into - // halves anyway, so the concat (vinsertf128) is purely an extra op. MVT StoreVT = StoredVal.getSimpleValueType(); - if (StoreVT.is256BitVector()) { - if (StoredVal.getOpcode() != ISD::CONCAT_VECTORS || !StoredVal.hasOneUse()) - return SDValue(); - return split256BitStore(St, DAG); - } - assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && "Unexpected VT"); if (DAG.getTargetLoweringInfo().getTypeAction(*DAG.getContext(), StoreVT) != |

