summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp6
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp50
-rw-r--r--llvm/lib/Target/X86/X86InstrVecCompiler.td17
3 files changed, 58 insertions, 15 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index df302cadd52..bcac241bb3a 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -457,7 +457,7 @@ namespace {
static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
unsigned Opcode = N->getOpcode();
if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMU ||
- Opcode == X86ISD::CMPM_RND) {
+ Opcode == X86ISD::CMPM_RND || Opcode == X86ISD::VFPCLASS) {
// We can get 256-bit 8 element types here without VLX being enabled. When
// this happens we will use 512-bit operations and the mask will not be
// zero extended.
@@ -467,6 +467,10 @@ static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
return true;
}
+ // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
+ if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
+ Opcode == X86ISD::FSETCCM_RND)
+ return true;
return false;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9225881a01f..3a89c195907 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -19948,6 +19948,7 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
+ assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
SDValue IMask = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Mask);
if (Op.getOpcode() == X86ISD::FSETCCM ||
Op.getOpcode() == X86ISD::FSETCCM_RND ||
@@ -20417,9 +20418,11 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm);
SDValue FPclassMask = getVectorMaskingNode(FPclass, Mask, SDValue(),
Subtarget, DAG);
+ // Need to fill with zeros to ensure the bitcast will produce zeroes
+ // for the upper bits in the v2i1/v4i1 case.
SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
- DAG.getUNDEF(BitcastVT), FPclassMask,
- DAG.getIntPtrConstant(0, dl));
+ DAG.getConstant(0, dl, BitcastVT),
+ FPclassMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(Op.getValueType(), Res);
}
case FPCLASSS: {
@@ -20429,8 +20432,12 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
Subtarget, DAG);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, FPclassMask,
- DAG.getIntPtrConstant(0, dl));
+ // Need to fill with zeros to ensure the bitcast will produce zeroes
+ // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
+ SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
+ DAG.getConstant(0, dl, MVT::v8i1),
+ FPclassMask, DAG.getIntPtrConstant(0, dl));
+ return DAG.getBitcast(MVT::i8, Ins);
}
case CMP_MASK: {
// Comparison intrinsics with masks.
@@ -20438,7 +20445,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// (i8 (int_x86_avx512_mask_pcmpeq_q_128
// (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
// (i8 (bitcast
- // (v8i1 (insert_subvector undef,
+ // (v8i1 (insert_subvector zero,
// (v2i1 (and (PCMPEQM %a, %b),
// (extract_subvector
// (v8i1 (bitcast %mask)), 0))), 0))))
@@ -20451,9 +20458,11 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Op.getOperand(2));
SDValue CmpMask = getVectorMaskingNode(Cmp, Mask, SDValue(),
Subtarget, DAG);
+ // Need to fill with zeros to ensure the bitcast will produce zeroes
+ // for the upper bits in the v2i1/v4i1 case.
SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
- DAG.getUNDEF(BitcastVT), CmpMask,
- DAG.getIntPtrConstant(0, dl));
+ DAG.getConstant(0, dl, BitcastVT),
+ CmpMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(Op.getValueType(), Res);
}
@@ -20497,8 +20506,12 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
Subtarget, DAG);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CmpMask,
- DAG.getIntPtrConstant(0, dl));
+ // Need to fill with zeros to ensure the bitcast will produce zeroes
+ // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
+ SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
+ DAG.getConstant(0, dl, MVT::v8i1),
+ CmpMask, DAG.getIntPtrConstant(0, dl));
+ return DAG.getBitcast(MVT::i8, Ins);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
@@ -20551,8 +20564,13 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
else
FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::v1i1, LHS, RHS,
DAG.getConstant(CondVal, dl, MVT::i8), Sae);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, FCmp,
- DAG.getIntPtrConstant(0, dl));
+ // Need to fill with zeros to ensure the bitcast will produce zeroes
+ // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
+ SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
+ DAG.getConstant(0, dl, MVT::v16i1),
+ FCmp, DAG.getIntPtrConstant(0, dl));
+ return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
+ DAG.getBitcast(MVT::i16, Ins));
}
case VSHIFT:
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
@@ -33382,9 +33400,13 @@ static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
SDValue FSetCC =
DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
DAG.getConstant(x86cc, DL, MVT::i8));
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
- N->getSimpleValueType(0), FSetCC,
- DAG.getIntPtrConstant(0, DL));
+ // Need to fill with zeros to ensure the bitcast will produce zeroes
+ // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
+ SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
+ DAG.getConstant(0, DL, MVT::v16i1),
+ FSetCC, DAG.getIntPtrConstant(0, DL));
+ return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
+ N->getSimpleValueType(0));
}
SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
CMP00.getValueType(), CMP00, CMP01,
diff --git a/llvm/lib/Target/X86/X86InstrVecCompiler.td b/llvm/lib/Target/X86/X86InstrVecCompiler.td
index 1aef98ba49d..db3dfe56531 100644
--- a/llvm/lib/Target/X86/X86InstrVecCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrVecCompiler.td
@@ -427,6 +427,7 @@ class maskzeroupper<ValueType vt, RegisterClass RC> :
return isMaskZeroExtended(N);
}]>;
+def maskzeroupperv1i1 : maskzeroupper<v1i1, VK1>;
def maskzeroupperv2i1 : maskzeroupper<v2i1, VK2>;
def maskzeroupperv4i1 : maskzeroupper<v4i1, VK4>;
def maskzeroupperv8i1 : maskzeroupper<v8i1, VK8>;
@@ -438,11 +439,18 @@ def maskzeroupperv32i1 : maskzeroupper<v32i1, VK32>;
// zeroing.
let Predicates = [HasBWI] in {
def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
+ maskzeroupperv1i1:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS VK1:$src, VK32)>;
+ def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
maskzeroupperv8i1:$src, (iPTR 0))),
(COPY_TO_REGCLASS VK8:$src, VK32)>;
def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
maskzeroupperv16i1:$src, (iPTR 0))),
(COPY_TO_REGCLASS VK16:$src, VK32)>;
+
+ def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
+ maskzeroupperv1i1:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS VK1:$src, VK64)>;
def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
maskzeroupperv8i1:$src, (iPTR 0))),
(COPY_TO_REGCLASS VK8:$src, VK64)>;
@@ -456,10 +464,19 @@ let Predicates = [HasBWI] in {
let Predicates = [HasAVX512] in {
def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
+ maskzeroupperv1i1:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS VK1:$src, VK16)>;
+ def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
maskzeroupperv8i1:$src, (iPTR 0))),
(COPY_TO_REGCLASS VK8:$src, VK16)>;
}
+let Predicates = [HasDQI] in {
+ def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
+ maskzeroupperv1i1:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS VK1:$src, VK8)>;
+}
+
let Predicates = [HasVLX, HasDQI] in {
def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
maskzeroupperv2i1:$src, (iPTR 0))),
OpenPOWER on IntegriCloud