summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-04-10 17:02:48 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-04-10 17:02:48 +0000
commitd263fdc51230e7f5de74aecc6caa17f4a7b128dd (patch)
tree9c4acfa9c049fb9266348647e38ecb136a0db729 /llvm/lib
parent751ed0a06a9b5f667d12365c72acb2c67de16a3b (diff)
downloadbcm5719-llvm-d263fdc51230e7f5de74aecc6caa17f4a7b128dd.tar.gz
bcm5719-llvm-d263fdc51230e7f5de74aecc6caa17f4a7b128dd.zip
[X86][AVX512BW] Add support for v64i8 multiplies
Extend the existing lowering of vXi8 multiplies to support v64i8 on avx512bw targets. I added the Lower512IntArith helper function to help with this - not sure how often this could be used in the future, but it seemed better than putting all that logic inside LowerMUL. Differential Revision: http://reviews.llvm.org/D18937 llvm-svn: 265902
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp41
1 files changed, 38 insertions, 3 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index dc46d0e499e..0bc6de0af79 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1547,6 +1547,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SUB, MVT::v32i16, Legal);
setOperationAction(ISD::SUB, MVT::v64i8, Legal);
setOperationAction(ISD::MUL, MVT::v32i16, Legal);
+ setOperationAction(ISD::MUL, MVT::v64i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
@@ -18967,6 +18968,35 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
+/// Break a 512-bit integer operation into two new 256-bit ones and then
+/// concatenate the result back.
+static SDValue Lower512IntArith(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+
+ assert(VT.is512BitVector() && VT.isInteger() &&
+ "Unsupported value type for operation");
+
+ unsigned NumElems = VT.getVectorNumElements();
+ SDLoc dl(Op);
+
+ // Extract the LHS vectors
+ SDValue LHS = Op.getOperand(0);
+ SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
+
+ // Extract the RHS vectors
+ SDValue RHS = Op.getOperand(1);
+ SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
+
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
+ DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
+}
+
static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
if (Op.getValueType() == MVT::i1)
return DAG.getNode(ISD::XOR, SDLoc(Op), Op.getValueType(),
@@ -19009,10 +19039,15 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
- // Lower v16i8/v32i8 mul as sign-extension to v8i16/v16i16 vector
- // pairs, multiply and truncate.
- if (VT == MVT::v16i8 || VT == MVT::v32i8) {
+ // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
+ // vector pairs, multiply and truncate.
+ if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
if (Subtarget.hasInt256()) {
+ // For 512-bit vectors, split into 256-bit vectors to allow the
+ // sign-extension to occur.
+ if (VT == MVT::v64i8)
+ return Lower512IntArith(Op, DAG);
+
// For 256-bit vectors, split into 128-bit vectors to allow the
// sign-extension to occur. We don't need this on AVX512BW as we can
// safely sign-extend to v32i16.
OpenPOWER on IntegriCloud