summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86CallingConv.td42
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp7
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp115
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td132
-rw-r--r--llvm/lib/Target/X86/X86InstrFragmentsSIMD.td6
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.td4
-rw-r--r--llvm/test/CodeGen/X86/avx512-cmp.ll81
-rw-r--r--llvm/test/CodeGen/X86/avx512-cvt.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512-ext.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512-fsel.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512-i1test.ll5
-rw-r--r--llvm/test/CodeGen/X86/avx512-insert-extract.ll220
-rw-r--r--llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll5
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll72
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics.ll281
-rw-r--r--llvm/test/CodeGen/X86/avx512-load-store.ll8
-rw-r--r--llvm/test/CodeGen/X86/avx512-mask-bugfix.ll57
-rw-r--r--llvm/test/CodeGen/X86/avx512-mask-op.ll151
-rw-r--r--llvm/test/CodeGen/X86/avx512-memfold.ll5
-rw-r--r--llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll32
-rw-r--r--llvm/test/CodeGen/X86/avx512-scalar_mask.ll14
-rw-r--r--llvm/test/CodeGen/X86/avx512-select.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll7
-rw-r--r--llvm/test/CodeGen/X86/avx512dq-intrinsics.ll10
-rw-r--r--llvm/test/CodeGen/X86/avx512er-intrinsics.ll6
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-load-i1.ll4
-rw-r--r--llvm/test/CodeGen/X86/fma-fneg-combine.ll5
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll34
-rw-r--r--llvm/test/CodeGen/X86/pr27591.ll18
-rw-r--r--llvm/test/CodeGen/X86/pr28173.ll20
-rw-r--r--llvm/test/CodeGen/X86/pr32241.ll68
-rw-r--r--llvm/test/CodeGen/X86/pr32256.ll36
-rw-r--r--llvm/test/CodeGen/X86/pr32284.ll12
-rw-r--r--llvm/test/CodeGen/X86/pr32451.ll6
-rw-r--r--llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll8
-rw-r--r--llvm/test/CodeGen/X86/xmulo.ll8
-rw-r--r--llvm/test/CodeGen/X86/xor-select-i1-combine.ll6
37 files changed, 608 insertions, 933 deletions
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 6781d761a1c..7d146d050a5 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -73,8 +73,8 @@ def CC_#NAME : CallingConv<[
CCIfSubtarget<"is64Bit()", CCIfByVal<CCPassByVal<8, 8>>>,
CCIfByVal<CCPassByVal<4, 4>>,
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// Promote v8i1/v16i1/v32i1 arguments to i32.
CCIfType<[v8i1, v16i1, v32i1], CCPromoteToType<i32>>,
@@ -146,8 +146,8 @@ def CC_#NAME : CallingConv<[
]>;
def RetCC_#NAME : CallingConv<[
- // Promote i1, v8i1 arguments to i8.
- CCIfType<[i1, v8i1], CCPromoteToType<i8>>,
+ // Promote i1, v1i1, v8i1 arguments to i8.
+ CCIfType<[i1, v1i1, v8i1], CCPromoteToType<i8>>,
// Promote v16i1 arguments to i16.
CCIfType<[v16i1], CCPromoteToType<i16>>,
@@ -207,6 +207,7 @@ def RetCC_X86Common : CallingConv<[
//
// For code that doesn't care about the ABI, we allow returning more than two
// integer values in registers.
+ CCIfType<[v1i1], CCPromoteToType<i8>>,
CCIfType<[i1], CCPromoteToType<i8>>,
CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
@@ -375,6 +376,7 @@ def RetCC_X86_64_Swift : CallingConv<[
CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
// For integers, ECX, R8D can be used as extra return registers.
+ CCIfType<[v1i1], CCPromoteToType<i8>>,
CCIfType<[i1], CCPromoteToType<i8>>,
CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
@@ -485,8 +487,8 @@ def CC_X86_64_C : CallingConv<[
// Handles byval parameters.
CCIfByVal<CCPassByVal<8, 8>>,
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// The 'nest' parameter, if any, is passed in R10.
CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
@@ -584,8 +586,8 @@ def CC_X86_Win64_C : CallingConv<[
// FIXME: Handle byval stuff.
// FIXME: Handle varargs.
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// The 'nest' parameter, if any, is passed in R10.
CCIfNest<CCAssignToReg<[R10]>>,
@@ -796,8 +798,8 @@ def CC_X86_32_Common : CallingConv<[
]>;
def CC_X86_32_C : CallingConv<[
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// The 'nest' parameter, if any, is passed in ECX.
CCIfNest<CCAssignToReg<[ECX]>>,
@@ -816,8 +818,8 @@ def CC_X86_32_MCU : CallingConv<[
// puts arguments in registers.
CCIfByVal<CCPassByVal<4, 4>>,
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// If the call is not a vararg call, some arguments may be passed
// in integer registers.
@@ -828,8 +830,8 @@ def CC_X86_32_MCU : CallingConv<[
]>;
def CC_X86_32_FastCall : CallingConv<[
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// The 'nest' parameter, if any, is passed in EAX.
CCIfNest<CCAssignToReg<[EAX]>>,
@@ -858,15 +860,15 @@ def CC_X86_32_ThisCall_Common : CallingConv<[
]>;
def CC_X86_32_ThisCall_Mingw : CallingConv<[
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
CCDelegateTo<CC_X86_32_ThisCall_Common>
]>;
def CC_X86_32_ThisCall_Win : CallingConv<[
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// Pass sret arguments indirectly through stack.
CCIfSRet<CCAssignToStack<4, 4>>,
@@ -885,8 +887,8 @@ def CC_X86_32_FastCC : CallingConv<[
// puts arguments in registers.
CCIfByVal<CCPassByVal<4, 4>>,
- // Promote i1/i8/i16 arguments to i32.
- CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Promote i1/i8/i16/v1i1 arguments to i32.
+ CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
// The 'nest' parameter, if any, is passed in EAX.
CCIfNest<CCAssignToReg<[EAX]>>,
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index fc3b4836c17..3cfb924abd0 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -3647,13 +3647,6 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type");
case MVT::i1:
- if (Subtarget->hasAVX512()) {
- // Need to copy to a VK1 register.
- unsigned ResultReg = createResultReg(&X86::VK1RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY), ResultReg).addReg(SrcReg);
- return ResultReg;
- }
case MVT::i8:
return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
X86::sub_8bit);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 157616f6aff..37b248416e4 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1140,7 +1140,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
- addRegisterClass(MVT::i1, &X86::VK1RegClass);
+ addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
@@ -1155,16 +1155,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
}
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::SETCC, MVT::i1, Custom);
- setOperationAction(ISD::SETCCE, MVT::i1, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
- setOperationAction(ISD::XOR, MVT::i1, Legal);
- setOperationAction(ISD::OR, MVT::i1, Legal);
- setOperationAction(ISD::AND, MVT::i1, Legal);
- setOperationAction(ISD::SUB, MVT::i1, Custom);
- setOperationAction(ISD::ADD, MVT::i1, Custom);
- setOperationAction(ISD::MUL, MVT::i1, Custom);
for (MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i32, MVT::v4i64, MVT::v8i16,
MVT::v16i8, MVT::v16i16, MVT::v32i8, MVT::v16i32,
@@ -1233,7 +1223,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MSTORE, VT, Custom);
}
}
- setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
@@ -1311,7 +1300,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
@@ -1699,7 +1690,7 @@ EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext& Context,
EVT VT) const {
if (!VT.isVector())
- return Subtarget.hasAVX512() ? MVT::i1: MVT::i8;
+ return MVT::i8;
if (VT.isSimple()) {
MVT VVT = VT.getSimpleVT();
@@ -2480,6 +2471,9 @@ static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
SelectionDAG &DAG) {
SDValue ValReturned = ValArg;
+ if (ValVT == MVT::v1i1)
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
+
if (ValVT == MVT::v64i1) {
// In 32 bit machine, this case is handled by getv64i1Argument
assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
@@ -2502,7 +2496,6 @@ static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
}
-
return DAG.getBitcast(ValVT, ValReturned);
}
@@ -2809,8 +2802,11 @@ X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
SDValue Val = DAG.getLoad(
ValVT, dl, Chain, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
- return ExtendedInMem ? DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val)
- : Val;
+ return ExtendedInMem
+ ? (VA.getValVT().isVector()
+ ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
+ : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
+ : Val;
}
// FIXME: Get this from tablegen.
@@ -2960,7 +2956,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
else if (RegVT == MVT::x86mmx)
RC = &X86::VR64RegClass;
- else if (RegVT == MVT::i1)
+ else if (RegVT == MVT::v1i1)
RC = &X86::VK1RegClass;
else if (RegVT == MVT::v8i1)
RC = &X86::VK8RegClass;
@@ -6871,7 +6867,7 @@ static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
if (!In.isUndef())
- Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
+ Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
}
SDLoc dl(Op);
MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
@@ -6914,7 +6910,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
if (!isa<ConstantSDNode>(In))
NonConstIdx.push_back(idx);
else {
- Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
+ Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
HasConstElts = true;
}
if (SplatIdx < 0)
@@ -13946,7 +13942,6 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
SDValue Idx = Op.getOperand(1);
MVT EltVT = Op.getSimpleValueType();
- assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
"Unexpected vector type in ExtractBitFromMaskVector");
@@ -13980,8 +13975,8 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
DAG.getConstant(MaxSift, dl, MVT::i8));
- return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
- DAG.getIntPtrConstant(0, dl));
+ return DAG.getNode(X86ISD::VEXTRACT, dl, Op.getSimpleValueType(), Vec,
+ DAG.getIntPtrConstant(0, dl));
}
SDValue
@@ -13992,7 +13987,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
MVT VecVT = Vec.getSimpleValueType();
SDValue Idx = Op.getOperand(1);
- if (Op.getSimpleValueType() == MVT::i1)
+ if (VecVT.getVectorElementType() == MVT::i1)
return ExtractBitFromMaskVector(Op, DAG);
if (!isa<ConstantSDNode>(Idx)) {
@@ -14163,10 +14158,13 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
return EltInVec;
}
- // Insertion of one bit into first or last position
- // can be done with two SHIFTs + OR.
+ // Insertion of one bit into first position
if (IdxVal == 0 ) {
- // EltInVec already at correct index and other bits are 0.
+ // Clean top bits of vector.
+ EltInVec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, EltInVec,
+ DAG.getConstant(NumElems - 1, dl, MVT::i8));
+ EltInVec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, EltInVec,
+ DAG.getConstant(NumElems - 1, dl, MVT::i8));
// Clean the first bit in source vector.
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
DAG.getConstant(1 , dl, MVT::i8));
@@ -14175,6 +14173,7 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
}
+ // Insertion of one bit into last position
if (IdxVal == NumElems -1) {
// Move the bit to the last position inside the vector.
EltInVec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, EltInVec,
@@ -17322,8 +17321,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
- assert(((!Subtarget.hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
- && "SetCC type must be 8-bit or 1-bit integer");
+ assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDLoc dl(Op);
@@ -17457,7 +17455,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (SSECC != 8) {
if (Subtarget.hasAVX512()) {
- SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::i1, CondOp0,
+ SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0,
CondOp1, DAG.getConstant(SSECC, DL, MVT::i8));
return DAG.getNode(VT.isVector() ? X86ISD::SELECT : X86ISD::SELECTS,
DL, VT, Cmp, Op1, Op2);
@@ -17505,9 +17503,10 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
// AVX512 fallback is to lower selects of scalar floats to masked moves.
- if (Cond.getValueType() == MVT::i1 && (VT == MVT::f64 || VT == MVT::f32) &&
- Subtarget.hasAVX512())
- return DAG.getNode(X86ISD::SELECTS, DL, VT, Cond, Op1, Op2);
+ if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
+ SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
+ return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
+ }
if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
SDValue Op1Scalar;
@@ -19048,8 +19047,8 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
/// \brief Creates an SDNode for a predicated scalar operation.
/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
-/// The mask is coming as MVT::i8 and it should be truncated
-/// to MVT::i1 while lowering masking intrinsics.
+/// The mask is coming as MVT::i8 and it should be transformed
+/// to MVT::v1i1 while lowering masking intrinsics.
/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
/// "X86select" instead of "vselect". We just can't create the "vselect" node
/// for a scalar instruction.
@@ -19064,11 +19063,10 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
- // The mask should be of type MVT::i1
- SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
+ SDValue IMask = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Mask);
if (Op.getOpcode() == X86ISD::FSETCCM ||
- Op.getOpcode() == X86ISD::FSETCCM_RND)
+ Op.getOpcode() == X86ISD::FSETCCM_RND)
return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
if (Op.getOpcode() == X86ISD::VFPCLASSS)
return DAG.getNode(ISD::OR, dl, VT, Op, IMask);
@@ -19507,10 +19505,11 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
SDValue Src1 = Op.getOperand(1);
SDValue Imm = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
- SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::i1, Src1, Imm);
+ SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask,
DAG.getTargetConstant(0, dl, MVT::i1), Subtarget, DAG);
- return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, FPclassMask);
+ return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i8, FPclassMask,
+ DAG.getIntPtrConstant(0, dl));
}
case CMP_MASK:
case CMP_MASK_CC: {
@@ -19570,18 +19569,18 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
if (IntrData->Opc1 != 0) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd))
- Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::i1, Src1, Src2, CC, Rnd);
+ Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Rnd);
}
//default rounding mode
if(!Cmp.getNode())
- Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::i1, Src1, Src2, CC);
+ Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
SDValue CmpMask = getScalarMaskingNode(Cmp, Mask,
DAG.getTargetConstant(0, dl,
MVT::i1),
Subtarget, DAG);
-
- return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, CmpMask);
+ return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i8, CmpMask,
+ DAG.getIntPtrConstant(0, dl));
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
@@ -19629,13 +19628,13 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
SDValue FCmp;
if (isRoundModeCurDirection(Sae))
- FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::i1, LHS, RHS,
- DAG.getConstant(CondVal, dl, MVT::i8));
+ FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
+ DAG.getConstant(CondVal, dl, MVT::i8));
else
- FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::i1, LHS, RHS,
- DAG.getConstant(CondVal, dl, MVT::i8), Sae);
- // AnyExt just uses KMOVW %kreg, %r32; ZeroExt emits "and $1, %reg"
- return DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, FCmp);
+ FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::v1i1, LHS, RHS,
+ DAG.getConstant(CondVal, dl, MVT::i8), Sae);
+ return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i32, FCmp,
+ DAG.getIntPtrConstant(0, dl));
}
case VSHIFT:
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
@@ -23385,8 +23384,6 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
"Unexpected request for vector widening");
- EVT EltVT = NVT.getVectorElementType();
-
SDLoc dl(InOp);
if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
InOp.getNumOperands() == 2) {
@@ -23404,6 +23401,8 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
for (unsigned i = 0; i < InNumElts; ++i)
Ops.push_back(InOp.getOperand(i));
+ EVT EltVT = InOp.getOperand(0).getValueType();
+
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
DAG.getUNDEF(EltVT);
for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
@@ -29595,7 +29594,7 @@ combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
if (FValIsAllZeros && Subtarget.hasAVX512() && Cond.hasOneUse() &&
CondVT.getVectorElementType() == MVT::i1) {
// Invert the cond to not(cond) : xor(op,allones)=not(op)
- SDValue CondNew = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
+ SDValue CondNew = DAG.getNode(ISD::XOR, DL, CondVT, Cond,
DAG.getAllOnesConstant(DL, CondVT));
// Vselect cond, op1, op2 = Vselect not(cond), op2, op1
return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
@@ -31342,13 +31341,11 @@ static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
// See X86ATTInstPrinter.cpp:printSSECC().
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
if (Subtarget.hasAVX512()) {
- SDValue FSetCC = DAG.getNode(X86ISD::FSETCCM, DL, MVT::i1, CMP00,
- CMP01,
- DAG.getConstant(x86cc, DL, MVT::i8));
- if (N->getValueType(0) != MVT::i1)
- return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
- FSetCC);
- return FSetCC;
+ SDValue FSetCC =
+ DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
+ DAG.getConstant(x86cc, DL, MVT::i8));
+ return DAG.getNode(X86ISD::VEXTRACT, DL, N->getSimpleValueType(0),
+ FSetCC, DAG.getIntPtrConstant(0, DL));
}
SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
CMP00.getValueType(), CMP00, CMP01,
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 71d395244b4..f9344413bbc 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -31,8 +31,7 @@ class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
// The mask VT.
- ValueType KVT = !cast<ValueType>(!if (!eq (NumElts, 1), "i1",
- "v" # NumElts # "i1"));
+ ValueType KVT = !cast<ValueType>("v" # NumElts # "i1");
// Suffix used in the instruction mnemonic.
string Suffix = suffix;
@@ -2263,7 +2262,7 @@ let Predicates = [HasAVX512, NoDQI] in {
let Predicates = [HasAVX512] in {
def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
(KMOVWmk addr:$dst, VK16:$src)>;
- def : Pat<(i1 (load addr:$src)),
+ def : Pat<(v1i1 (load addr:$src)),
(COPY_TO_REGCLASS (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), VK1)>;
def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
(KMOVWkm addr:$src)>;
@@ -2280,77 +2279,45 @@ let Predicates = [HasBWI] in {
}
let Predicates = [HasAVX512] in {
- def : Pat<(i1 (trunc (i64 GR64:$src))),
- (COPY_TO_REGCLASS (AND32ri8 (EXTRACT_SUBREG $src, sub_32bit),
- (i32 1)), VK1)>;
+ multiclass operation_gpr_mask_copy_lowering<RegisterClass maskRC, ValueType maskVT> {
+ def : Pat<(maskVT (scalar_to_vector GR32:$src)),
+ (COPY_TO_REGCLASS GR32:$src, maskRC)>;
- def : Pat<(i1 (trunc (i32 GR32:$src))),
- (COPY_TO_REGCLASS (AND32ri8 $src, (i32 1)), VK1)>;
+ def : Pat<(i32 (X86Vextract maskRC:$src, (iPTR 0))),
+ (COPY_TO_REGCLASS maskRC:$src, GR32)>;
- def : Pat<(i1 (trunc (i32 (assertzext_i1 GR32:$src)))),
- (COPY_TO_REGCLASS GR32:$src, VK1)>;
+ def : Pat<(maskVT (scalar_to_vector GR8:$src)),
+ (COPY_TO_REGCLASS (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, sub_8bit), maskRC)>;
- def : Pat<(i1 (trunc (i8 GR8:$src))),
- (COPY_TO_REGCLASS
- (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
- GR8:$src, sub_8bit), (i32 1)), VK1)>;
+ def : Pat<(i8 (X86Vextract maskRC:$src, (iPTR 0))),
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS maskRC:$src, GR32)), sub_8bit)>;
- def : Pat<(i1 (trunc (i16 GR16:$src))),
- (COPY_TO_REGCLASS
- (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
- GR16:$src, sub_16bit), (i32 1)), VK1)>;
-
- def : Pat<(i32 (zext VK1:$src)),
- (AND32ri8 (COPY_TO_REGCLASS VK1:$src, GR32), (i32 1))>;
-
- def : Pat<(i32 (anyext VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, GR32)>;
-
- def : Pat<(i8 (zext VK1:$src)),
- (EXTRACT_SUBREG
- (AND32ri8 (COPY_TO_REGCLASS VK1:$src, GR32), (i32 1)), sub_8bit)>;
-
- def : Pat<(i8 (anyext VK1:$src)),
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK1:$src, GR32)), sub_8bit)>;
+ def : Pat<(i32 (anyext (i8 (X86Vextract maskRC:$src, (iPTR 0))))),
+ (COPY_TO_REGCLASS maskRC:$src, GR32)>;
+ }
- def : Pat<(i64 (zext VK1:$src)),
- (SUBREG_TO_REG (i64 0),
- (AND32ri8 (COPY_TO_REGCLASS VK1:$src, GR32), (i32 1)), sub_32bit)>;
+ defm : operation_gpr_mask_copy_lowering<VK1, v1i1>;
+ defm : operation_gpr_mask_copy_lowering<VK2, v2i1>;
+ defm : operation_gpr_mask_copy_lowering<VK4, v4i1>;
+ defm : operation_gpr_mask_copy_lowering<VK8, v8i1>;
+ defm : operation_gpr_mask_copy_lowering<VK16, v16i1>;
+ defm : operation_gpr_mask_copy_lowering<VK32, v32i1>;
+ defm : operation_gpr_mask_copy_lowering<VK64, v64i1>;
- def : Pat<(i64 (anyext VK1:$src)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (i32 (COPY_TO_REGCLASS VK1:$src, GR32)), sub_32bit)>;
+ def : Pat<(X86kshiftr (X86kshiftl (v1i1 (scalar_to_vector GR8:$src)), (i8 15)), (i8 15)) ,
+ (COPY_TO_REGCLASS
+ (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
+ GR8:$src, sub_8bit), (i32 1))), VK1)>;
+ def : Pat<(X86kshiftr (X86kshiftl (v16i1 (scalar_to_vector GR8:$src)), (i8 15)), (i8 15)) ,
+ (COPY_TO_REGCLASS
+ (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
+ GR8:$src, sub_8bit), (i32 1))), VK16)>;
+ def : Pat<(X86kshiftr (X86kshiftl (v8i1 (scalar_to_vector GR8:$src)), (i8 15)), (i8 15)) ,
+ (COPY_TO_REGCLASS
+ (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
+ GR8:$src, sub_8bit), (i32 1))), VK8)>;
- def : Pat<(i16 (zext VK1:$src)),
- (EXTRACT_SUBREG
- (AND32ri8 (COPY_TO_REGCLASS VK1:$src, GR32), (i32 1)), sub_16bit)>;
-
- def : Pat<(i16 (anyext VK1:$src)),
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK1:$src, GR32)), sub_16bit)>;
-}
-def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, VK16)>;
-def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, VK8)>;
-def : Pat<(v4i1 (scalar_to_vector VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, VK4)>;
-def : Pat<(v2i1 (scalar_to_vector VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, VK2)>;
-def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, VK32)>;
-def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
- (COPY_TO_REGCLASS VK1:$src, VK64)>;
-
-def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
-def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
-def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
-
-def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))), (COPY_TO_REGCLASS VK64:$src, VK1)>;
-def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))), (COPY_TO_REGCLASS VK32:$src, VK1)>;
-def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))), (COPY_TO_REGCLASS VK16:$src, VK1)>;
-def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))), (COPY_TO_REGCLASS VK8:$src, VK1)>;
-def : Pat<(i1 (X86Vextract VK4:$src, (iPTR 0))), (COPY_TO_REGCLASS VK4:$src, VK1)>;
-def : Pat<(i1 (X86Vextract VK2:$src, (iPTR 0))), (COPY_TO_REGCLASS VK2:$src, VK1)>;
+}
// Mask unary operation
// - KNOT
@@ -2551,14 +2518,11 @@ let Predicates = [HasAVX512] in {
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
def : Pat<(v4i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK4)>;
def : Pat<(v2i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK2)>;
+ def : Pat<(v1i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK1)>;
def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
def : Pat<(v4i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK4)>;
def : Pat<(v2i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK2)>;
- let AddedComplexity = 10 in { // To optimize isel table.
- def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
- def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
- def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
- }
+ def : Pat<(v1i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK1)>;
}
// Patterns for kmask insert_subvector/extract_subvector to/from index=0
@@ -2570,6 +2534,12 @@ multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subV
def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
(VT (COPY_TO_REGCLASS subRC:$src, RC))>;
}
+defm : operation_subvector_mask_lowering<VK1, v1i1, VK2, v2i1>;
+defm : operation_subvector_mask_lowering<VK1, v1i1, VK4, v4i1>;
+defm : operation_subvector_mask_lowering<VK1, v1i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK1, v1i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK1, v1i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK1, v1i1, VK64, v64i1>;
defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
@@ -3249,7 +3219,7 @@ multiclass avx512_move_scalar_lowering<string InstrStr, SDNode OpNode,
def : Pat<(_.VT (OpNode _.RC:$src0,
(_.VT (scalar_to_vector
- (_.EltVT (X86selects (i1 (trunc GR32:$mask)),
+ (_.EltVT (X86selects (scalar_to_vector (and (i8 (trunc GR32:$mask)), (i8 1))),
(_.EltVT _.FRC:$src1),
(_.EltVT _.FRC:$src2))))))),
(COPY_TO_REGCLASS (!cast<Instruction>(InstrStr#rrk)
@@ -3260,7 +3230,7 @@ def : Pat<(_.VT (OpNode _.RC:$src0,
def : Pat<(_.VT (OpNode _.RC:$src0,
(_.VT (scalar_to_vector
- (_.EltVT (X86selects (i1 (trunc GR32:$mask)),
+ (_.EltVT (X86selects (scalar_to_vector (and (i8 (trunc GR32:$mask)), (i8 1))),
(_.EltVT _.FRC:$src1),
(_.EltVT ZeroFP))))))),
(COPY_TO_REGCLASS (!cast<Instruction>(InstrStr#rrkz)
@@ -3279,7 +3249,7 @@ def : Pat<(masked_store addr:$dst, Mask,
(iPTR 0))),
(iPTR 0)))),
(!cast<Instruction>(InstrStr#mrk) addr:$dst,
- (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)),
+ (COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
(COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
}
@@ -3296,7 +3266,7 @@ def : Pat<(masked_store addr:$dst, Mask,
(iPTR 0))),
(iPTR 0)))),
(!cast<Instruction>(InstrStr#mrk) addr:$dst,
- (i1 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM)),
+ (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
(COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
}
@@ -3310,7 +3280,7 @@ def : Pat<(_.info128.VT (extract_subvector
(v16i32 immAllZerosV))))),
(iPTR 0))),
(!cast<Instruction>(InstrStr#rmkz)
- (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)),
+ (COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
addr:$srcAddr)>;
def : Pat<(_.info128.VT (extract_subvector
@@ -3322,7 +3292,7 @@ def : Pat<(_.info128.VT (extract_subvector
(iPTR 0))))),
(iPTR 0))),
(!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
- (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)),
+ (COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
addr:$srcAddr)>;
}
@@ -3338,7 +3308,7 @@ def : Pat<(_.info128.VT (extract_subvector
(v16i32 immAllZerosV))))),
(iPTR 0))),
(!cast<Instruction>(InstrStr#rmkz)
- (i1 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM)),
+ (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
addr:$srcAddr)>;
def : Pat<(_.info128.VT (extract_subvector
@@ -3350,7 +3320,7 @@ def : Pat<(_.info128.VT (extract_subvector
(iPTR 0))))),
(iPTR 0))),
(!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
- (i1 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM)),
+ (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
addr:$srcAddr)>;
}
@@ -3381,7 +3351,7 @@ def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
VK1WM:$mask, (v2f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
- (VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), GR8:$mask, sub_8bit)), VK1WM)),
+ (VMOVSSZmrk addr:$dst, (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), GR8:$mask, sub_8bit)), VK1WM),
(COPY_TO_REGCLASS VR128X:$src, FR32X))>;
let hasSideEffects = 0 in
diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 9867ba84bb9..e2e228f5544 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -274,7 +274,7 @@ def X86select : SDNode<"X86ISD::SELECT",
SDTCisSameNumEltsAs<0, 1>]>>;
def X86selects : SDNode<"X86ISD::SELECTS",
- SDTypeProfile<1, 3, [SDTCisVT<1, i1>,
+ SDTypeProfile<1, 3, [SDTCisVT<1, v1i1>,
SDTCisSameAs<0, 2>,
SDTCisSameAs<2, 3>]>>;
@@ -441,7 +441,7 @@ def X86Vfpclass : SDNode<"X86ISD::VFPCLASS",
SDTCisSameNumEltsAs<0,1>,
SDTCisVT<2, i32>]>, []>;
def X86Vfpclasss : SDNode<"X86ISD::VFPCLASSS",
- SDTypeProfile<1, 2, [SDTCisVT<0, i1>,
+ SDTypeProfile<1, 2, [SDTCisVT<0, v1i1>,
SDTCisFP<1>, SDTCisVT<2, i32>]>,[]>;
def X86SubVBroadcast : SDNode<"X86ISD::SUBV_BROADCAST",
@@ -451,7 +451,7 @@ def X86SubVBroadcast : SDNode<"X86ISD::SUBV_BROADCAST",
def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>;
def X86Vextract : SDNode<"X86ISD::VEXTRACT", SDTypeProfile<1, 2,
- [SDTCisEltOfVec<0, 1>, SDTCisVec<1>,
+ [SDTCisVec<1>,
SDTCisPtrTy<2>]>, []>;
def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.td b/llvm/lib/Target/X86/X86RegisterInfo.td
index d235d2b40b1..3a61a7247c7 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -511,7 +511,7 @@ def VR256X : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 31)>;
// Mask registers
-def VK1 : RegisterClass<"X86", [i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
+def VK1 : RegisterClass<"X86", [v1i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
def VK2 : RegisterClass<"X86", [v2i1], 16, (add VK1)> {let Size = 16;}
def VK4 : RegisterClass<"X86", [v4i1], 16, (add VK2)> {let Size = 16;}
def VK8 : RegisterClass<"X86", [v8i1], 16, (add VK4)> {let Size = 16;}
@@ -519,7 +519,7 @@ def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
-def VK1WM : RegisterClass<"X86", [i1], 16, (sub VK1, K0)> {let Size = 16;}
+def VK1WM : RegisterClass<"X86", [v1i1], 16, (sub VK1, K0)> {let Size = 16;}
def VK2WM : RegisterClass<"X86", [v2i1], 16, (sub VK2, K0)> {let Size = 16;}
def VK4WM : RegisterClass<"X86", [v4i1], 16, (sub VK4, K0)> {let Size = 16;}
def VK8WM : RegisterClass<"X86", [v8i1], 16, (sub VK8, K0)> {let Size = 16;}
diff --git a/llvm/test/CodeGen/X86/avx512-cmp.ll b/llvm/test/CodeGen/X86/avx512-cmp.ll
index c1b64743f89..eae7b94f513 100644
--- a/llvm/test/CodeGen/X86/avx512-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-cmp.ll
@@ -47,16 +47,20 @@ l2:
ret float %c1
}
-; FIXME: Can use vcmpeqss and extract from the mask here in AVX512.
define i32 @test3(float %a, float %b) {
-; ALL-LABEL: test3:
-; ALL: ## BB#0:
-; ALL-NEXT: vucomiss %xmm1, %xmm0
-; ALL-NEXT: setnp %al
-; ALL-NEXT: sete %cl
-; ALL-NEXT: andb %al, %cl
-; ALL-NEXT: movzbl %cl, %eax
-; ALL-NEXT: retq
+; KNL-LABEL: test3:
+; KNL: ## BB#0:
+; KNL-NEXT: vcmpeqss %xmm1, %xmm0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test3:
+; SKX: ## BB#0:
+; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: retq
%cmp10.i = fcmp oeq float %a, %b
%conv11.i = zext i1 %cmp10.i to i32
@@ -69,7 +73,7 @@ define float @test5(float %p) #0 {
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vucomiss %xmm1, %xmm0
; ALL-NEXT: jne LBB3_1
-; ALL-NEXT: jp LBB3_1
+; ALL-NEXT: jp LBB3_1
; ALL-NEXT: ## BB#2: ## %return
; ALL-NEXT: retq
; ALL-NEXT: LBB3_1: ## %if.end
@@ -158,47 +162,22 @@ B:
}
define i32 @test10(i64 %b, i64 %c, i1 %d) {
-; KNL-LABEL: test10:
-; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %edx
-; KNL-NEXT: kmovw %edx, %k0
-; KNL-NEXT: cmpq %rsi, %rdi
-; KNL-NEXT: sete %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: korw %k1, %k0, %k1
-; KNL-NEXT: kxorw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
-; KNL-NEXT: je LBB8_1
-; KNL-NEXT: ## BB#2: ## %if.end.i
-; KNL-NEXT: movl $6, %eax
-; KNL-NEXT: retq
-; KNL-NEXT: LBB8_1: ## %if.then.i
-; KNL-NEXT: movl $5, %eax
-; KNL-NEXT: retq
-;
-; SKX-LABEL: test10:
-; SKX: ## BB#0:
-; SKX-NEXT: andl $1, %edx
-; SKX-NEXT: kmovd %edx, %k0
-; SKX-NEXT: cmpq %rsi, %rdi
-; SKX-NEXT: sete %al
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: korw %k1, %k0, %k1
-; SKX-NEXT: kxorw %k1, %k0, %k0
-; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
-; SKX-NEXT: je LBB8_1
-; SKX-NEXT: ## BB#2: ## %if.end.i
-; SKX-NEXT: movl $6, %eax
-; SKX-NEXT: retq
-; SKX-NEXT: LBB8_1: ## %if.then.i
-; SKX-NEXT: movl $5, %eax
-; SKX-NEXT: retq
+; ALL-LABEL: test10:
+; ALL: ## BB#0:
+; ALL-NEXT: movl %edx, %eax
+; ALL-NEXT: andb $1, %al
+; ALL-NEXT: cmpq %rsi, %rdi
+; ALL-NEXT: sete %cl
+; ALL-NEXT: orb %dl, %cl
+; ALL-NEXT: andb $1, %cl
+; ALL-NEXT: cmpb %cl, %al
+; ALL-NEXT: je LBB8_1
+; ALL-NEXT: ## BB#2: ## %if.end.i
+; ALL-NEXT: movl $6, %eax
+; ALL-NEXT: retq
+; ALL-NEXT: LBB8_1: ## %if.then.i
+; ALL-NEXT: movl $5, %eax
+; ALL-NEXT: retq
%cmp8.i = icmp eq i64 %b, %c
%or1 = or i1 %d, %cmp8.i
diff --git a/llvm/test/CodeGen/X86/avx512-cvt.ll b/llvm/test/CodeGen/X86/avx512-cvt.ll
index 2b55372f306..33ac15de9de 100644
--- a/llvm/test/CodeGen/X86/avx512-cvt.ll
+++ b/llvm/test/CodeGen/X86/avx512-cvt.ll
@@ -1552,10 +1552,10 @@ define <2 x float> @uitofp_2i1_float(<2 x i32> %a) {
; NOVL-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NOVL-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; NOVL-NEXT: vpextrq $1, %xmm0, %rax
+; NOVL-NEXT: vpextrb $8, %xmm0, %eax
; NOVL-NEXT: andl $1, %eax
; NOVL-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm1
-; NOVL-NEXT: vmovq %xmm0, %rax
+; NOVL-NEXT: vpextrb $0, %xmm0, %eax
; NOVL-NEXT: andl $1, %eax
; NOVL-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
; NOVL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
diff --git a/llvm/test/CodeGen/X86/avx512-ext.ll b/llvm/test/CodeGen/X86/avx512-ext.ll
index b31b00e54e8..2145f5fb09a 100644
--- a/llvm/test/CodeGen/X86/avx512-ext.ll
+++ b/llvm/test/CodeGen/X86/avx512-ext.ll
@@ -1434,26 +1434,26 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-LABEL: trunc_i32_to_i1:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %edi
-; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: movw $-4, %ax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kshiftrw $1, %k1, %k1
-; KNL-NEXT: kshiftlw $1, %k1, %k1
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %eax, %k0
+; KNL-NEXT: kshiftrw $1, %k0, %k0
+; KNL-NEXT: kshiftlw $1, %k0, %k0
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
; SKX: ## BB#0:
-; SKX-NEXT: andl $1, %edi
-; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: movw $-4, %ax
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: kshiftrw $1, %k1, %k1
-; SKX-NEXT: kshiftlw $1, %k1, %k1
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovd %eax, %k0
+; SKX-NEXT: kshiftrw $1, %k0, %k0
+; SKX-NEXT: kshiftlw $1, %k0, %k0
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-fsel.ll b/llvm/test/CodeGen/X86/avx512-fsel.ll
index a9b8914ee1f..7777ba79541 100644
--- a/llvm/test/CodeGen/X86/avx512-fsel.ll
+++ b/llvm/test/CodeGen/X86/avx512-fsel.ll
@@ -10,25 +10,11 @@ define i32 @test(float %a, float %b) {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: Lcfi0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: vucomiss %xmm1, %xmm0
-; CHECK-NEXT: setp %al
-; CHECK-NEXT: setne %cl
-; CHECK-NEXT: setnp %dl
-; CHECK-NEXT: sete %sil
-; CHECK-NEXT: andb %dl, %sil
-; CHECK-NEXT: ## implicit-def: %EDI
-; CHECK-NEXT: movb %sil, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: orb %al, %cl
-; CHECK-NEXT: ## implicit-def: %EDI
-; CHECK-NEXT: movb %cl, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kmovw %k1, %edi
-; CHECK-NEXT: movb %dil, %al
-; CHECK-NEXT: testb $1, %al
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vcmpeqss %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: xorb $-1, %cl
+; CHECK-NEXT: testb $1, %cl
; CHECK-NEXT: jne LBB0_1
; CHECK-NEXT: jmp LBB0_2
; CHECK-NEXT: LBB0_1: ## %L_0
diff --git a/llvm/test/CodeGen/X86/avx512-i1test.ll b/llvm/test/CodeGen/X86/avx512-i1test.ll
index 69fafdfff9a..321f26674e1 100644
--- a/llvm/test/CodeGen/X86/avx512-i1test.ll
+++ b/llvm/test/CodeGen/X86/avx512-i1test.ll
@@ -66,14 +66,13 @@ L_30: ; preds = %bb51, %L_10
define i64 @func2(i1 zeroext %i, i32 %j) {
; CHECK-LABEL: func2:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB1_1
; CHECK-NEXT: # BB#2: # %if.then
; CHECK-NEXT: jmp bar # TAILCALL
; CHECK-NEXT: .LBB1_1: # %return
-; CHECK-NEXT: orq $-2, %rdi
-; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orq $-2, %rax
; CHECK-NEXT: retq
entry:
%tobool = icmp eq i32 %j, 0
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 87928348a85..29a5325a0ae 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -260,8 +260,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
+; KNL-NEXT: testb $1, %al
; KNL-NEXT: je LBB10_2
; KNL-NEXT: ## BB#1: ## %A
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -276,8 +275,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; SKX-NEXT: kshiftlw $11, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
+; SKX-NEXT: testb $1, %al
; SKX-NEXT: je LBB10_2
; SKX-NEXT: ## BB#1: ## %A
; SKX-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -299,13 +297,10 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; KNL-LABEL: test12:
; KNL: ## BB#0:
; KNL-NEXT: vpcmpgtq %zmm0, %zmm2, %k0
-; KNL-NEXT: vpcmpgtq %zmm1, %zmm3, %k1
-; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kshiftlw $15, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
+; KNL-NEXT: testb $1, %al
; KNL-NEXT: cmoveq %rsi, %rdi
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: retq
@@ -313,13 +308,10 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; SKX-LABEL: test12:
; SKX: ## BB#0:
; SKX-NEXT: vpcmpgtq %zmm0, %zmm2, %k0
-; SKX-NEXT: vpcmpgtq %zmm1, %zmm3, %k1
-; SKX-NEXT: kunpckbw %k0, %k1, %k0
-; SKX-NEXT: kshiftlw $15, %k0, %k0
-; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kshiftlb $7, %k0, %k0
+; SKX-NEXT: kshiftrb $7, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
+; SKX-NEXT: testb $1, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
; SKX-NEXT: vzeroupper
@@ -335,13 +327,13 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
+; KNL-NEXT: movw $-4, %cx
+; KNL-NEXT: kmovw %ecx, %k0
+; KNL-NEXT: kshiftrw $1, %k0, %k0
+; KNL-NEXT: kshiftlw $1, %k0, %k0
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kshiftrw $1, %k1, %k1
-; KNL-NEXT: kshiftlw $1, %k1, %k1
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
@@ -350,13 +342,13 @@ define i16 @test13(i32 %a, i32 %b) {
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
+; SKX-NEXT: movw $-4, %cx
+; SKX-NEXT: kmovd %ecx, %k0
+; SKX-NEXT: kshiftrw $1, %k0, %k0
+; SKX-NEXT: kshiftlw $1, %k0, %k0
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: movw $-4, %ax
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: kshiftrw $1, %k1, %k1
-; SKX-NEXT: kshiftlw $1, %k1, %k1
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
@@ -373,8 +365,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
+; KNL-NEXT: testb $1, %al
; KNL-NEXT: cmoveq %rsi, %rdi
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: retq
@@ -385,8 +376,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; SKX-NEXT: kshiftlb $3, %k0, %k0
; SKX-NEXT: kshiftrb $7, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
+; SKX-NEXT: testb $1, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
; SKX-NEXT: vzeroupper
@@ -424,14 +414,13 @@ define i16 @test15(i1 *%addr) {
define i16 @test16(i1 *%addr, i16 %a) {
; KNL-LABEL: test16:
; KNL: ## BB#0:
-; KNL-NEXT: movzbl (%rdi), %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kmovw %eax, %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
-; KNL-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; KNL-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -440,14 +429,13 @@ define i16 @test16(i1 *%addr, i16 %a) {
;
; SKX-LABEL: test16:
; SKX: ## BB#0:
-; SKX-NEXT: movzbl (%rdi), %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: movb (%rdi), %al
+; SKX-NEXT: kmovd %esi, %k0
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2d %k1, %zmm0
; SKX-NEXT: vpmovm2d %k0, %zmm1
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
-; SKX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovd2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
@@ -463,14 +451,13 @@ define i16 @test16(i1 *%addr, i16 %a) {
define i8 @test17(i1 *%addr, i8 %a) {
; KNL-LABEL: test17:
; KNL: ## BB#0:
-; KNL-NEXT: movzbl (%rdi), %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kmovw %eax, %k2
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
-; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -479,14 +466,13 @@ define i8 @test17(i1 *%addr, i8 %a) {
;
; SKX-LABEL: test17:
; SKX: ## BB#0:
-; SKX-NEXT: movzbl (%rdi), %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: movb (%rdi), %al
+; SKX-NEXT: kmovd %esi, %k0
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2q %k1, %zmm0
; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
-; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1283,12 +1269,11 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
-; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k0
+; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k1
+; SKX-NEXT: kunpckwd %k0, %k1, %k0
+; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1
-; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2
-; SKX-NEXT: kunpckwd %k1, %k2, %k1
-; SKX-NEXT: vpmovm2w %k1, %zmm0
; SKX-NEXT: vpmovm2w %k0, %zmm1
; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; SKX-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
@@ -1308,33 +1293,29 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
-; KNL-NEXT: vpextrd $1, %xmm0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
-; KNL-NEXT: vmovd %xmm0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; KNL-NEXT: vpextrb $4, %xmm0, %ecx
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; KNL-NEXT: vpextrb $0, %xmm0, %ecx
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
; KNL-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
-; KNL-NEXT: vptestmq %zmm1, %zmm1, %k2
-; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
; KNL-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
-; KNL-NEXT: vpextrd $3, %xmm0, %eax
-; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: vpextrb $12, %xmm0, %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
@@ -1349,10 +1330,9 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
-; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1
-; SKX-NEXT: vpmovm2d %k1, %xmm0
; SKX-NEXT: vpmovm2d %k0, %xmm1
; SKX-NEXT: vpbroadcastq %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
@@ -1373,16 +1353,14 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; KNL-NEXT: vmovq %xmm0, %rax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; KNL-NEXT: vpextrb $0, %xmm0, %ecx
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
@@ -1396,13 +1374,12 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
+; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: kshiftlw $1, %k1, %k1
-; SKX-NEXT: kshiftrw $1, %k1, %k1
; SKX-NEXT: kshiftlw $1, %k0, %k0
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kshiftrw $1, %k0, %k0
+; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
@@ -1422,8 +1399,10 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpextrb $0, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v2i1:
@@ -1432,11 +1411,10 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
%t2 = extractelement <2 x i1> %t1, i32 0
@@ -1452,8 +1430,10 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpextrb $0, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: extractelement_v2i1_alt:
@@ -1462,11 +1442,10 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
%t2 = extractelement <2 x i1> %t1, i32 0
@@ -1535,8 +1514,10 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpextrb $15, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v64i1:
@@ -1544,11 +1525,10 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%t1 = icmp ugt <64 x i8> %a, %b
@@ -1566,8 +1546,10 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpextrb $15, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: extractelement_v64i1_alt:
@@ -1575,11 +1557,10 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%t1 = icmp ugt <64 x i8> %a, %b
@@ -2332,7 +2313,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b,
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; SKX-NEXT: andl $1, %edi
-; SKX-NEXT: movl -24(%rsp,%rdi,8), %eax
+; SKX-NEXT: movzbl -24(%rsp,%rdi,8), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
@@ -2362,7 +2343,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b,
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; SKX-NEXT: andl $3, %edi
-; SKX-NEXT: movl -24(%rsp,%rdi,4), %eax
+; SKX-NEXT: movzbl -24(%rsp,%rdi,4), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <4 x i32> %a, %b
@@ -2391,7 +2372,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 %zmm0, (%rsp)
; KNL-NEXT: andl $7, %edi
-; KNL-NEXT: movl (%rsp,%rdi,8), %eax
+; KNL-NEXT: movzbl (%rsp,%rdi,8), %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
@@ -2414,7 +2395,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
; SKX-NEXT: andl $7, %edi
-; SKX-NEXT: movl (%rsp,%rdi,8), %eax
+; SKX-NEXT: movzbl (%rsp,%rdi,8), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
@@ -2444,7 +2425,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa32 %zmm0, (%rsp)
; KNL-NEXT: andl $15, %edi
-; KNL-NEXT: movl (%rsp,%rdi,4), %eax
+; KNL-NEXT: movzbl (%rsp,%rdi,4), %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
@@ -2467,7 +2448,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
; SKX-NEXT: andl $15, %edi
-; SKX-NEXT: movl (%rsp,%rdi,4), %eax
+; SKX-NEXT: movzbl (%rsp,%rdi,4), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
@@ -2500,9 +2481,8 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; KNL-NEXT: vmovdqa %ymm0, (%rsp)
; KNL-NEXT: andl $31, %edi
; KNL-NEXT: movq %rsp, %rax
-; KNL-NEXT: movb (%rdi,%rax), %al
-; KNL-NEXT: andb $1, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: movzbl (%rdi,%rax), %eax
+; KNL-NEXT: andl $1, %eax
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
@@ -2524,7 +2504,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: vmovdqu16 %zmm0, (%rsp)
; SKX-NEXT: andl $31, %edi
-; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
+; SKX-NEXT: movzbl (%rsp,%rdi,2), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll b/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll
index a1d1a7dae19..a099b80898e 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract_i1.ll
@@ -22,9 +22,8 @@ define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b,
; SKX-NEXT: vmovdqu8 %zmm0, (%rsp)
; SKX-NEXT: andl $63, %edi
; SKX-NEXT: movq %rsp, %rax
-; SKX-NEXT: movb (%rdi,%rax), %al
-; SKX-NEXT: andb $1, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: movzbl (%rdi,%rax), %eax
+; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
; SKX-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 0e7a8d25c56..32da0a70218 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -2881,23 +2881,23 @@ define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8
; CHECK-LABEL: test_mask_vextractf32x4:
; CHECK: ## BB#0:
; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm1
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kshiftlw $12, %k1, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kshiftlw $13, %k1, %k2
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kshiftlw $12, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: kshiftlw $13, %k0, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kshiftlw $15, %k1, %k3
+; CHECK-NEXT: kshiftlw $15, %k0, %k3
; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kshiftlw $14, %k1, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kshiftlw $14, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: kmovw %k3, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm2
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
; CHECK-NEXT: kmovw %k2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
; CHECK-NEXT: vpslld $31, %xmm2, %xmm2
; CHECK-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -2911,23 +2911,23 @@ define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask)
; CHECK-LABEL: test_mask_vextracti64x4:
; CHECK: ## BB#0:
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kshiftlw $12, %k1, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kshiftlw $13, %k1, %k2
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kshiftlw $12, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: kshiftlw $13, %k0, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kshiftlw $15, %k1, %k3
+; CHECK-NEXT: kshiftlw $15, %k0, %k3
; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kshiftlw $14, %k1, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kshiftlw $14, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: kmovw %k3, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm2
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
; CHECK-NEXT: kmovw %k2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
; CHECK-NEXT: vpslld $31, %xmm2, %xmm2
; CHECK-NEXT: vpmovsxdq %xmm2, %ymm2
; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
@@ -2942,23 +2942,23 @@ define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: test_maskz_vextracti32x4:
; CHECK: ## BB#0:
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kshiftlw $12, %k1, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kshiftlw $13, %k1, %k2
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kshiftlw $12, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: kshiftlw $13, %k0, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kshiftlw $15, %k1, %k3
+; CHECK-NEXT: kshiftlw $15, %k0, %k3
; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kshiftlw $14, %k1, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kshiftlw $14, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: kmovw %k3, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm1
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; CHECK-NEXT: kmovw %k2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm0, %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index cc5e9e038e0..563cad04b8c 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -112,6 +112,8 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) {
}
declare i16 @llvm.x86.avx512.kxnor.w(i16, i16) nounwind readnone
+; TODO: the two kxnor instructions here a no op and should be elimintaed,
+; probably by FoldConstantArithmetic in SelectionDAG.
define i16 @test_kxnor(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kxnor:
; CHECK: ## BB#0:
@@ -121,6 +123,8 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) {
; CHECK-NEXT: kmovw %eax, %k2
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: kxnorw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -269,7 +273,6 @@ declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_sqrt_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
@@ -296,7 +299,6 @@ declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, <
define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_sqrt_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
@@ -2214,7 +2216,6 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2226,7 +2227,6 @@ define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2238,7 +2238,6 @@ define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_ru:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2250,7 +2249,6 @@ define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rz:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2262,7 +2260,6 @@ define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_current:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2274,7 +2271,6 @@ define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <
define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2294,7 +2290,6 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -2311,7 +2306,6 @@ define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1
define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2329,7 +2323,6 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2341,7 +2334,6 @@ define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2353,7 +2345,6 @@ define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_ru:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2365,7 +2356,6 @@ define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rz:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2377,7 +2367,6 @@ define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_current:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2389,7 +2378,6 @@ define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1
define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_sd_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2409,7 +2397,6 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -2424,7 +2411,6 @@ define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double*
define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_sd_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2440,7 +2426,6 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2452,7 +2437,6 @@ define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x
define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2472,7 +2456,6 @@ define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2484,7 +2467,6 @@ define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2504,7 +2486,6 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -2521,7 +2502,6 @@ define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x f
define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2538,7 +2518,6 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2550,7 +2529,6 @@ define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2570,7 +2548,6 @@ define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2582,7 +2559,6 @@ define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2602,7 +2578,6 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -2617,7 +2592,6 @@ define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2
define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -3651,16 +3625,15 @@ declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4
define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_getexp_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
-; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm1
-; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm5, %xmm4, %xmm1
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
@@ -3678,16 +3651,15 @@ declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>,
define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_getexp_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm5 {%k1} {z}
; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm1
-; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm3, %xmm5, %xmm1
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
@@ -3705,11 +3677,9 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32
define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3720,18 +3690,18 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8
define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
; CHECK: ## BB#0:
+; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vcmpunordsd {sae}, %xmm1, %xmm0, %k0
-; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k1
-; CHECK-NEXT: korw %k0, %k1, %k0
-; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k1
-; CHECK-NEXT: vcmpneqsd %xmm1, %xmm0, %k2
-; CHECK-NEXT: korw %k1, %k2, %k1
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k2
-; CHECK-NEXT: kandw %k2, %k1, %k1
-; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %edx
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %esi
+; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: orb %cl, %dl
+; CHECK-NEXT: orb %sil, %al
+; CHECK-NEXT: orb %dl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3751,11 +3721,9 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3767,17 +3735,17 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %
define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k1
-; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %edx
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k2 {%k1}
-; CHECK-NEXT: kmovw %k2, %ecx
-; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1}
-; CHECK-NEXT: kmovw %k1, %edx
-; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %esi
+; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andb %cl, %al
+; CHECK-NEXT: andb %cl, %dl
+; CHECK-NEXT: andb %sil, %al
; CHECK-NEXT: andb %dl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3898,15 +3866,14 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm5 {%k1} {z}
; CHECK-NEXT: vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vaddpd %xmm5, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm1
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 4)
@@ -3924,14 +3891,13 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 {%k1} {z}
-; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
; CHECK-NEXT: vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm1
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm1
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 %x3, i32 4)
@@ -4056,7 +4022,6 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x flo
define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
@@ -4073,7 +4038,6 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x doubl
define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -4596,7 +4560,6 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
@@ -4620,16 +4583,15 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
+; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 8)
@@ -4690,16 +4652,15 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
-; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1}
+; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm4
-; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
@@ -4714,7 +4675,6 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x doubl
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4815,17 +4775,16 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
-; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1}
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm4
-; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vmovapd %xmm0, %xmm5
-; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5 {%k1}
-; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm1
-; CHECK-NEXT: vaddpd %xmm5, %xmm0, %xmm0
+; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5
+; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm1
+; CHECK-NEXT: vaddpd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
@@ -4843,17 +4802,16 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3 {%k1}
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vmovaps %xmm0, %xmm5
-; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm5 {%k1}
-; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm1
-; CHECK-NEXT: vaddps %xmm5, %xmm0, %xmm0
+; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm5
+; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm1
+; CHECK-NEXT: vaddps %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
@@ -4871,7 +4829,6 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4889,7 +4846,6 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4903,17 +4859,16 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm5
-; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
-; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
@@ -4931,17 +4886,16 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
-; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm5
-; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
-; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
@@ -4958,7 +4912,6 @@ define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
; CHECK-LABEL: fmadd_ss_mask_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1}
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -4986,7 +4939,6 @@ define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
; CHECK-LABEL: fmadd_ss_maskz_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -5014,7 +4966,6 @@ define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
; CHECK-LABEL: fmadd_sd_mask_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1}
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -5038,7 +4989,6 @@ define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
; CHECK-LABEL: fmadd_sd_maskz_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -5063,17 +5013,16 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm5
-; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
-; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
@@ -5091,17 +5040,16 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
-; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm5
-; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
-; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
@@ -5119,17 +5067,16 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm5
-; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
-; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddpd %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
@@ -5147,17 +5094,16 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
-; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm5
-; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5 {%k1}
-; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm0
-; CHECK-NEXT: vaddps %xmm5, %xmm2, %xmm1
+; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
@@ -5173,7 +5119,6 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -5187,7 +5132,6 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd132ss (%rdi), %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -5201,7 +5145,8 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%q = load float, float* %ptr_b
diff --git a/llvm/test/CodeGen/X86/avx512-load-store.ll b/llvm/test/CodeGen/X86/avx512-load-store.ll
index 3295c66c6d4..4fd985bf24c 100644
--- a/llvm/test/CodeGen/X86/avx512-load-store.ll
+++ b/llvm/test/CodeGen/X86/avx512-load-store.ll
@@ -12,7 +12,7 @@ define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x
; CHECK32-LABEL: test_mm_mask_move_ss:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
; CHECK32-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -37,7 +37,7 @@ define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4
; CHECK32-LABEL: test_mm_maskz_move_ss:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK32-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1}
@@ -62,7 +62,7 @@ define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2
; CHECK32-LABEL: test_mm_mask_move_sd:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
; CHECK32-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
@@ -87,7 +87,7 @@ define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <
; CHECK32-LABEL: test_mm_maskz_move_sd:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK32-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1}
diff --git a/llvm/test/CodeGen/X86/avx512-mask-bugfix.ll b/llvm/test/CodeGen/X86/avx512-mask-bugfix.ll
deleted file mode 100644
index 1940680f1c1..00000000000
--- a/llvm/test/CodeGen/X86/avx512-mask-bugfix.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
-
-; ModuleID = 'foo.ll'
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) #0
-
-; Function Attrs: nounwind readnone
-declare i64 @llvm.cttz.i64(i64, i1) #0
-
-; Function Attrs: nounwind
-define void @foo(float* noalias %aFOO, float %b, i32 %a) {
-allocas:
- %full_mask_memory.i57 = alloca <8 x float>
- %return_value_memory.i60 = alloca i1
- %cmp.i = icmp eq i32 %a, 65535
- br i1 %cmp.i, label %all_on, label %some_on
-
-all_on:
- %mask0 = load <8 x float>, <8 x float>* %full_mask_memory.i57
- %v0.i.i.i70 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) #0
- %allon.i.i76 = icmp eq i32 %v0.i.i.i70, 65535
- br i1 %allon.i.i76, label %check_neighbors.i.i121, label %domixed.i.i100
-
-domixed.i.i100:
- br label %check_neighbors.i.i121
-
-check_neighbors.i.i121:
- %v1.i5.i.i116 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) #0
- %alleq.i.i120 = icmp eq i32 %v1.i5.i.i116, 65535
- br i1 %alleq.i.i120, label %all_equal.i.i123, label %not_all_equal.i.i124
-
-; CHECK: kxnorw %k0, %k0, %k0
-; CHECK: kshiftrw $15, %k0, %k0
-; CHECK: jmp
-; CHECK: kxorw %k0, %k0, %k0
-
-all_equal.i.i123:
- br label %reduce_equal___vyi.exit128
-
-not_all_equal.i.i124:
- br label %reduce_equal___vyi.exit128
-
-reduce_equal___vyi.exit128:
- %calltmp2.i125 = phi i1 [ true, %all_equal.i.i123 ], [ false, %not_all_equal.i.i124 ]
- store i1 %calltmp2.i125, i1* %return_value_memory.i60
- %return_value.i126 = load i1, i1* %return_value_memory.i60
- %. = select i1 %return_value.i126, i32 1, i32 0
- %select_to_float = sitofp i32 %. to float
- ret void
-
-some_on:
- ret void
-}
-
diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index 7103efe050a..01153a9e45f 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -418,7 +418,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: kshiftlw $10, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: andb $1, %al
; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
@@ -428,7 +428,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: kshiftlw $10, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: andb $1, %al
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -439,7 +439,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: andb $1, %al
; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -450,7 +450,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: andl $1, %eax
+; AVX512DQ-NEXT: andb $1, %al
; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
@@ -965,8 +965,8 @@ define <64 x i8> @test16(i64 %x) {
; SKX-LABEL: test16:
; SKX: ## BB#0:
; SKX-NEXT: kmovq %rdi, %k0
-; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $15, %k1, %k1
+; SKX-NEXT: movb $1, %al
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm1
@@ -981,8 +981,8 @@ define <64 x i8> @test16(i64 %x) {
; AVX512BW-LABEL: test16:
; AVX512BW: ## BB#0:
; AVX512BW-NEXT: kmovq %rdi, %k0
-; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
-; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
+; AVX512BW-NEXT: movb $1, %al
+; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm1
@@ -1085,7 +1085,6 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; SKX-NEXT: kmovq %rdi, %k0
; SKX-NEXT: cmpl %edx, %esi
; SKX-NEXT: setg %al
-; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
@@ -1103,7 +1102,6 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; AVX512BW-NEXT: kmovq %rdi, %k0
; AVX512BW-NEXT: cmpl %edx, %esi
; AVX512BW-NEXT: setg %al
-; AVX512BW-NEXT: andl $1, %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0
@@ -1166,21 +1164,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-LABEL: test18:
; KNL: ## BB#0:
; KNL-NEXT: kmovw %edi, %k1
-; KNL-NEXT: kmovw %esi, %k2
-; KNL-NEXT: kshiftlw $7, %k2, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: kshiftlw $6, %k2, %k2
+; KNL-NEXT: kmovw %esi, %k0
+; KNL-NEXT: kshiftlw $7, %k0, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
+; KNL-NEXT: kmovw %k2, %eax
+; KNL-NEXT: kshiftlw $6, %k0, %k0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
-; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
-; KNL-NEXT: kshiftlw $1, %k1, %k1
-; KNL-NEXT: kshiftrw $1, %k1, %k1
-; KNL-NEXT: kshiftlw $7, %k0, %k0
-; KNL-NEXT: korw %k0, %k1, %k1
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; KNL-NEXT: kshiftlw $1, %k0, %k0
+; KNL-NEXT: kshiftrw $1, %k0, %k0
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kshiftlw $7, %k1, %k1
+; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: retq
@@ -1191,16 +1193,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
+; SKX-NEXT: kmovd %k2, %eax
; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
+; SKX-NEXT: kmovd %k1, %ecx
; SKX-NEXT: vpmovm2q %k0, %zmm0
-; SKX-NEXT: vpmovm2q %k1, %zmm1
+; SKX-NEXT: kmovd %ecx, %k0
+; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k0
-; SKX-NEXT: kshiftlb $7, %k2, %k1
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: kshiftlb $7, %k1, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper
@@ -1209,21 +1215,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-LABEL: test18:
; AVX512BW: ## BB#0:
; AVX512BW-NEXT: kmovd %edi, %k1
-; AVX512BW-NEXT: kmovd %esi, %k2
-; AVX512BW-NEXT: kshiftlw $7, %k2, %k0
-; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
-; AVX512BW-NEXT: kshiftlw $6, %k2, %k2
+; AVX512BW-NEXT: kmovd %esi, %k0
+; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
+; AVX512BW-NEXT: kmovd %k2, %eax
+; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %ecx
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512BW-NEXT: kmovd %ecx, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpsllq $63, %zmm2, %zmm0
-; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
-; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
-; AVX512BW-NEXT: kshiftrw $1, %k1, %k1
-; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
-; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512BW-NEXT: vzeroupper
@@ -1235,16 +1245,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
+; AVX512DQ-NEXT: kmovw %k2, %eax
; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
+; AVX512DQ-NEXT: kmovw %ecx, %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
-; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
+; AVX512DQ-NEXT: kmovw %eax, %k1
+; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k1, %k0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -1383,10 +1397,8 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; KNL-LABEL: store_v1i1:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %edi
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kxnorw %k0, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: movb %al, (%rsi)
@@ -1394,20 +1406,16 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
;
; SKX-LABEL: store_v1i1:
; SKX: ## BB#0:
-; SKX-NEXT: andl $1, %edi
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovb %k0, (%rsi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v1i1:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: andl $1, %edi
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
-; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, (%rsi)
@@ -1415,10 +1423,8 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
;
; AVX512DQ-LABEL: store_v1i1:
; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: andl $1, %edi
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
-; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rsi)
; AVX512DQ-NEXT: retq
@@ -1613,59 +1619,14 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
@f1.v = internal unnamed_addr global i1 false, align 4
define void @f1(i32 %c) {
-; KNL-LABEL: f1:
-; KNL: ## BB#0: ## %entry
-; KNL-NEXT: movzbl {{.*}}(%rip), %edi
-; KNL-NEXT: movl %edi, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: kxnorw %k0, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kxorw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: movb %al, {{.*}}(%rip)
-; KNL-NEXT: xorl $1, %edi
-; KNL-NEXT: jmp _f2 ## TAILCALL
-;
-; SKX-LABEL: f1:
-; SKX: ## BB#0: ## %entry
-; SKX-NEXT: movzbl {{.*}}(%rip), %edi
-; SKX-NEXT: movl %edi, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $15, %k1, %k1
-; SKX-NEXT: kxorw %k1, %k0, %k0
-; SKX-NEXT: kmovb %k0, {{.*}}(%rip)
-; SKX-NEXT: xorl $1, %edi
-; SKX-NEXT: jmp _f2 ## TAILCALL
-;
-; AVX512BW-LABEL: f1:
-; AVX512BW: ## BB#0: ## %entry
-; AVX512BW-NEXT: movzbl {{.*}}(%rip), %edi
-; AVX512BW-NEXT: movl %edi, %eax
-; AVX512BW-NEXT: andl $1, %eax
-; AVX512BW-NEXT: kmovd %eax, %k0
-; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
-; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
-; AVX512BW-NEXT: kxorw %k1, %k0, %k0
-; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: movb %al, {{.*}}(%rip)
-; AVX512BW-NEXT: xorl $1, %edi
-; AVX512BW-NEXT: jmp _f2 ## TAILCALL
-;
-; AVX512DQ-LABEL: f1:
-; AVX512DQ: ## BB#0: ## %entry
-; AVX512DQ-NEXT: movzbl {{.*}}(%rip), %edi
-; AVX512DQ-NEXT: movl %edi, %eax
-; AVX512DQ-NEXT: andl $1, %eax
-; AVX512DQ-NEXT: kmovw %eax, %k0
-; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
-; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
-; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
-; AVX512DQ-NEXT: kmovb %k0, {{.*}}(%rip)
-; AVX512DQ-NEXT: xorl $1, %edi
-; AVX512DQ-NEXT: jmp _f2 ## TAILCALL
+; CHECK-LABEL: f1:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: movzbl {{.*}}(%rip), %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: movb %al, {{.*}}(%rip)
+; CHECK-NEXT: xorl $1, %edi
+; CHECK-NEXT: jmp _f2 ## TAILCALL
entry:
%.b1 = load i1, i1* @f1.v, align 4
%not..b1 = xor i1 %.b1, true
diff --git a/llvm/test/CodeGen/X86/avx512-memfold.ll b/llvm/test/CodeGen/X86/avx512-memfold.ll
index d754b2b78f6..17cb30255f7 100644
--- a/llvm/test/CodeGen/X86/avx512-memfold.ll
+++ b/llvm/test/CodeGen/X86/avx512-memfold.ll
@@ -4,11 +4,9 @@
define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%b.val = load float, float* %b
@@ -24,7 +22,6 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define <4 x float> @test_mask_max_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -41,7 +38,6 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_maskz_add_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -61,7 +57,6 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, double* %c, i8 %mask){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
index 33409791785..f43d5b3e11d 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -1,16 +1,10 @@
-; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=X32 %s
-; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=WIN64 %s
+; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=ALL --check-prefix=X32 %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=ALL --check-prefix=WIN64 %s
; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=LINUXOSX64 %s
-; X32-LABEL: test_argReti1:
-; X32: kmov{{.*}} %eax, %k{{[0-7]}}
-; X32: kmov{{.*}} %k{{[0-7]}}, %eax
-; X32: ret{{.*}}
-
-; WIN64-LABEL: test_argReti1:
-; WIN64: kmov{{.*}} %eax, %k{{[0-7]}}
-; WIN64: kmov{{.*}} %k{{[0-7]}}, %eax
-; WIN64: ret{{.*}}
+; ALL-LABEL: test_argReti1:
+; ALL: incb %al
+; ALL: ret{{.*}}
; Test regcall when receiving/returning i1
define x86_regcallcc i1 @test_argReti1(i1 %a) {
@@ -18,17 +12,11 @@ define x86_regcallcc i1 @test_argReti1(i1 %a) {
ret i1 %add
}
-; X32-LABEL: test_CallargReti1:
-; X32: kmov{{.*}} %k{{[0-7]}}, %eax
-; X32: call{{.*}} {{.*}}test_argReti1
-; X32: kmov{{.*}} %eax, %k{{[0-7]}}
-; X32: ret{{.*}}
-
-; WIN64-LABEL: test_CallargReti1:
-; WIN64: kmov{{.*}} %k{{[0-7]}}, %eax
-; WIN64: call{{.*}} {{.*}}test_argReti1
-; WIN64: kmov{{.*}} %eax, %k{{[0-7]}}
-; WIN64: ret{{.*}}
+; ALL-LABEL: test_CallargReti1:
+; ALL: movzbl %al, %eax
+; ALL: call{{.*}}test_argReti1
+; ALL: incb %al
+; ALL: ret{{.*}}
; Test regcall when passing/retrieving i1
define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
diff --git a/llvm/test/CodeGen/X86/avx512-scalar_mask.ll b/llvm/test/CodeGen/X86/avx512-scalar_mask.ll
index 47c6813fa8d..f6ee8ff4c0f 100644
--- a/llvm/test/CodeGen/X86/avx512-scalar_mask.ll
+++ b/llvm/test/CodeGen/X86/avx512-scalar_mask.ll
@@ -7,7 +7,6 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
; CHECK-LABEL: test_var_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -18,7 +17,6 @@ define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %
define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
; CHECK-LABEL: test_var_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -30,7 +28,8 @@ define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float>
define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const0_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4)
@@ -41,7 +40,8 @@ define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float
define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const0_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4)
@@ -52,7 +52,8 @@ define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x floa
define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const2_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: movb $2, %al
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4)
@@ -63,7 +64,8 @@ define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float
define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const2_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: movb $2, %al
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4)
diff --git a/llvm/test/CodeGen/X86/avx512-select.ll b/llvm/test/CodeGen/X86/avx512-select.ll
index 1859b1bcfaf..e81f983d9fe 100644
--- a/llvm/test/CodeGen/X86/avx512-select.ll
+++ b/llvm/test/CodeGen/X86/avx512-select.ll
@@ -161,7 +161,7 @@ define i64 @pr30249() {
define double @pr30561_f64(double %b, double %a, i1 %c) {
; CHECK-LABEL: pr30561_f64:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: andb $1, %dil
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -172,7 +172,7 @@ define double @pr30561_f64(double %b, double %a, i1 %c) {
define float @pr30561_f32(float %b, float %a, i1 %c) {
; CHECK-LABEL: pr30561_f32:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: andb $1, %dil
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index c5478dad422..cf79819734a 100644
--- a/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -13,10 +13,9 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0,
; CHECK-NEXT: kshiftlb $6, %k0, %k0
; CHECK-NEXT: kshiftrb $7, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vmovq %rax, %xmm2
-; CHECK-NEXT: kmovw %k1, %eax
-; CHECK-NEXT: vmovq %rax, %xmm3
-; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; CHECK-NEXT: kmovw %k1, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm2
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
; CHECK-NEXT: vpsllq $63, %xmm2, %xmm2
; CHECK-NEXT: vpsraq $63, %zmm2, %zmm2
; CHECK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll b/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll
index 000390404b5..06ee237593e 100644
--- a/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -262,7 +262,6 @@ declare <4 x float> @llvm.x86.avx512.mask.reduce.ss(<4 x float>, <4 x float>,<4
define <4 x float>@test_int_x86_avx512_mask_reduce_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducess $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vreducess $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -279,7 +278,6 @@ declare <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float>, <4 x float>,<4 x
define <4 x float>@test_int_x86_avx512_mask_range_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -296,7 +294,6 @@ declare <2 x double> @llvm.x86.avx512.mask.reduce.sd(<2 x double>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_reduce_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducesd $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vreducesd $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -313,7 +310,6 @@ declare <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double>, <2 x double>,<
define <2 x double>@test_int_x86_avx512_mask_range_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vrangesd $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -367,14 +363,11 @@ declare i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_sd(<2 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclasssd $2, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
-; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addb %cl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -389,14 +382,11 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ss(<4 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
-; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addb %cl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/avx512er-intrinsics.ll b/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
index b8531e25bfa..0e4922f37bb 100644
--- a/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -121,7 +121,6 @@ declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x flo
define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -132,7 +131,6 @@ define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
@@ -144,7 +142,6 @@ define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x
define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -155,7 +152,6 @@ define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
@@ -169,7 +165,6 @@ declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2
define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01]
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -182,7 +177,6 @@ define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem_offset:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01]
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
; CHECK-NEXT: retq # encoding: [0xc3]
diff --git a/llvm/test/CodeGen/X86/fast-isel-load-i1.ll b/llvm/test/CodeGen/X86/fast-isel-load-i1.ll
index 2f3c6c4b84b..f515d38cbb9 100644
--- a/llvm/test/CodeGen/X86/fast-isel-load-i1.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-load-i1.ll
@@ -4,9 +4,7 @@
define i1 @test_i1(i1* %b) {
; CHECK-LABEL: test_i1:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: movzbl (%rdi), %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: testb $1, %al
+; CHECK-NEXT: testb $1, (%rdi)
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # BB#1: # %in
; CHECK-NEXT: xorl %eax, %eax
diff --git a/llvm/test/CodeGen/X86/fma-fneg-combine.ll b/llvm/test/CodeGen/X86/fma-fneg-combine.ll
index bb332f7282a..d1d69c68af7 100644
--- a/llvm/test/CodeGen/X86/fma-fneg-combine.ll
+++ b/llvm/test/CodeGen/X86/fma-fneg-combine.ll
@@ -141,7 +141,6 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze
; SKX-LABEL: test11:
; SKX: # BB#0: # %entry
; SKX-NEXT: vxorps {{.*}}(%rip){1to4}, %xmm2, %xmm0
-; SKX-NEXT: andl $1, %edi
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -150,7 +149,6 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze
; KNL: # BB#0: # %entry
; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm0
; KNL-NEXT: vxorps %xmm0, %xmm2, %xmm0
-; KNL-NEXT: andl $1, %edi
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
@@ -186,7 +184,6 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i
; SKX-LABEL: test13:
; SKX: # BB#0: # %entry
; SKX-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
-; SKX-NEXT: andl $1, %edi
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -194,10 +191,10 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i
; KNL-LABEL: test13:
; KNL: # BB#0: # %entry
; KNL-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
-; KNL-NEXT: andl $1, %edi
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
+
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index c5de8dd96cb..91087f650ad 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -300,8 +300,8 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
;
; KNL_32-LABEL: test6:
; KNL_32: # BB#0:
-; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vpmovsxdq %ymm1, %zmm2
+; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: kxnorw %k0, %k0, %k2
; KNL_32-NEXT: vpgatherqd (,%zmm2), %ymm1 {%k2}
; KNL_32-NEXT: vpscatterqd %ymm0, (,%zmm2) {%k1}
@@ -1575,7 +1575,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) {
; Check non-power-of-2 case. It should be scalarized.
declare <3 x i32> @llvm.masked.gather.v3i32.v3p0i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>)
define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) {
-; ALL-LABEL: test30:
+; ALL-LABEL: test30
; ALL-NOT: gather
%sext_ind = sext <3 x i32> %ind to <3 x i64>
@@ -1691,12 +1691,12 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_32-LABEL: test_gather_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi4:
+; KNL_32-NEXT: .Lcfi0:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi5:
+; KNL_32-NEXT: .Lcfi1:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi6:
+; KNL_32-NEXT: .Lcfi2:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1814,12 +1814,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_32-LABEL: test_gather_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi7:
+; KNL_32-NEXT: .Lcfi3:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi8:
+; KNL_32-NEXT: .Lcfi4:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi9:
+; KNL_32-NEXT: .Lcfi5:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1936,12 +1936,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_32-LABEL: test_scatter_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi10:
+; KNL_32-NEXT: .Lcfi6:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi11:
+; KNL_32-NEXT: .Lcfi7:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi12:
+; KNL_32-NEXT: .Lcfi8:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2058,12 +2058,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_32-LABEL: test_scatter_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi13:
+; KNL_32-NEXT: .Lcfi9:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi14:
+; KNL_32-NEXT: .Lcfi10:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi15:
+; KNL_32-NEXT: .Lcfi11:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2139,12 +2139,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_32-LABEL: test_pr28312:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi16:
+; KNL_32-NEXT: .Lcfi12:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi17:
+; KNL_32-NEXT: .Lcfi13:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi18:
+; KNL_32-NEXT: .Lcfi14:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-32, %esp
; KNL_32-NEXT: subl $32, %esp
diff --git a/llvm/test/CodeGen/X86/pr27591.ll b/llvm/test/CodeGen/X86/pr27591.ll
index 3ff6c096d09..b71cb8c4b3a 100644
--- a/llvm/test/CodeGen/X86/pr27591.ll
+++ b/llvm/test/CodeGen/X86/pr27591.ll
@@ -9,12 +9,6 @@ define void @test1(i32 %x) #0 {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setne %al
-; CHECK-NEXT: # implicit-def: %EDI
-; CHECK-NEXT: movb %al, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kmovd %k0, %edi
-; CHECK-NEXT: movb %dil, %al
; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: movzbl %al, %edi
; CHECK-NEXT: callq callee1
@@ -32,17 +26,9 @@ define void @test2(i32 %x) #0 {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setne %al
-; CHECK-NEXT: # implicit-def: %EDI
-; CHECK-NEXT: movb %al, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kmovd %k0, %edi
+; CHECK-NEXT: movzbl %al, %edi
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: movb %dil, %al
-; CHECK-NEXT: xorl %edi, %edi
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: movl $-1, %ecx
-; CHECK-NEXT: cmovnel %ecx, %edi
+; CHECK-NEXT: negl %edi
; CHECK-NEXT: callq callee2
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pr28173.ll b/llvm/test/CodeGen/X86/pr28173.ll
index d9622b99bd9..3279982e464 100644
--- a/llvm/test/CodeGen/X86/pr28173.ll
+++ b/llvm/test/CodeGen/X86/pr28173.ll
@@ -8,9 +8,8 @@ target triple = "x86_64-unknown-linux-gnu"
define i64 @foo64(i1 zeroext %i) #0 {
; CHECK-LABEL: foo64:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; CHECK-NEXT: orq $-2, %rdi
-; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orq $-2, %rax
; CHECK-NEXT: retq
br label %bb
@@ -26,8 +25,9 @@ end:
define i16 @foo16(i1 zeroext %i) #0 {
; CHECK-LABEL: foo16:
; CHECK: # BB#0:
-; CHECK-NEXT: orl $65534, %edi # imm = 0xFFFE
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
br label %bb
@@ -43,9 +43,9 @@ end:
define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 {
; CHECK-LABEL: foo16_1:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: orl $2, %edi
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orl $2, %eax
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
br label %bb
@@ -61,8 +61,8 @@ end:
define i32 @foo32(i1 zeroext %i) #0 {
; CHECK-LABEL: foo32:
; CHECK: # BB#0:
-; CHECK-NEXT: orl $-2, %edi
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orl $-2, %eax
; CHECK-NEXT: retq
br label %bb
diff --git a/llvm/test/CodeGen/X86/pr32241.ll b/llvm/test/CodeGen/X86/pr32241.ll
index d8ce230057e..e1f726f0c62 100644
--- a/llvm/test/CodeGen/X86/pr32241.ll
+++ b/llvm/test/CodeGen/X86/pr32241.ll
@@ -4,49 +4,57 @@
define i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: subl $20, %esp
+; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .Lcfi0:
-; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: subl $24, %esp
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .cfi_offset %esi, -8
+; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: movw $10959, {{[0-9]+}}(%esp) # imm = 0x2ACF
; CHECK-NEXT: movw $-15498, {{[0-9]+}}(%esp) # imm = 0xC376
; CHECK-NEXT: movw $19417, {{[0-9]+}}(%esp) # imm = 0x4BD9
-; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movw {{[0-9]+}}(%esp), %cx
-; CHECK-NEXT: kxnorw %k0, %k0, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: testw %cx, %cx
-; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpw $0, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: jmp .LBB0_1
-; CHECK-NEXT: .LBB0_1: # %lor.rhs
+; CHECK-NEXT: # BB#1: # %lor.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jmp .LBB0_2
; CHECK-NEXT: .LBB0_2: # %lor.end
-; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload
-; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
-; CHECK-NEXT: kmovw %k1, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al # 1-byte Reload
+; CHECK-NEXT: movb $1, %cl
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movzbl %al, %edx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; CHECK-NEXT: subl %edx, %esi
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movzbl %al, %edx
+; CHECK-NEXT: xorl $-1, %edx
+; CHECK-NEXT: cmpl $0, %edx
+; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_4
-; CHECK-NEXT: jmp .LBB0_3
-; CHECK-NEXT: .LBB0_3: # %lor.rhs4
+; CHECK-NEXT: # BB#3: # %lor.rhs4
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jmp .LBB0_4
; CHECK-NEXT: .LBB0_4: # %lor.end5
-; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: movw %ax, %cx
-; CHECK-NEXT: movw %cx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al # 1-byte Reload
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movzbl %al, %ecx
+; CHECK-NEXT: movw %cx, %dx
+; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: addl $20, %esp
+; CHECK-NEXT: addl $24, %esp
+; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
entry:
%aa = alloca i16, align 2
diff --git a/llvm/test/CodeGen/X86/pr32256.ll b/llvm/test/CodeGen/X86/pr32256.ll
index cb26c13e53e..e29b56236e2 100644
--- a/llvm/test/CodeGen/X86/pr32256.ll
+++ b/llvm/test/CodeGen/X86/pr32256.ll
@@ -7,39 +7,27 @@
define void @_Z1av() {
; CHECK-LABEL: _Z1av:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: subl $6, %esp
+; CHECK-NEXT: subl $2, %esp
; CHECK-NEXT: .Lcfi0:
-; CHECK-NEXT: .cfi_def_cfa_offset 10
+; CHECK-NEXT: .cfi_def_cfa_offset 6
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: movb c, %cl
-; CHECK-NEXT: # implicit-def: %EAX
-; CHECK-NEXT: movb %cl, %al
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: kmovq %k1, %k2
-; CHECK-NEXT: kxnorw %k0, %k0, %k3
-; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kxorw %k3, %k1, %k1
-; CHECK-NEXT: kmovd %k1, %eax
; CHECK-NEXT: movb %al, %cl
-; CHECK-NEXT: testb $1, %cl
-; CHECK-NEXT: kmovw %k2, {{[0-9]+}}(%esp) # 2-byte Spill
-; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill
+; CHECK-NEXT: movb c, %dl
+; CHECK-NEXT: xorb $-1, %dl
+; CHECK-NEXT: testb $1, %dl
+; CHECK-NEXT: movb %cl, (%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_1
; CHECK-NEXT: jmp .LBB0_2
; CHECK-NEXT: .LBB0_1: # %land.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: movb %cl, (%esp) # 1-byte Spill
; CHECK-NEXT: jmp .LBB0_2
; CHECK-NEXT: .LBB0_2: # %land.end
-; CHECK-NEXT: kmovw (%esp), %k0 # 2-byte Reload
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: movb %al, %cl
-; CHECK-NEXT: andb $1, %cl
-; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp)
-; CHECK-NEXT: addl $6, %esp
+; CHECK-NEXT: movb (%esp), %al # 1-byte Reload
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp)
+; CHECK-NEXT: addl $2, %esp
; CHECK-NEXT: retl
entry:
%b = alloca i8, align 1
diff --git a/llvm/test/CodeGen/X86/pr32284.ll b/llvm/test/CodeGen/X86/pr32284.ll
index 143e3af82eb..571dd677490 100644
--- a/llvm/test/CodeGen/X86/pr32284.ll
+++ b/llvm/test/CodeGen/X86/pr32284.ll
@@ -39,12 +39,6 @@ define void @foo() {
; X86-O0-NEXT: movzbl %al, %edx
; X86-O0-NEXT: subl %ecx, %edx
; X86-O0-NEXT: setle %al
-; X86-O0-NEXT: # implicit-def: %ECX
-; X86-O0-NEXT: movb %al, %cl
-; X86-O0-NEXT: andl $1, %ecx
-; X86-O0-NEXT: kmovd %ecx, %k0
-; X86-O0-NEXT: kmovd %k0, %ecx
-; X86-O0-NEXT: movb %cl, %al
; X86-O0-NEXT: andb $1, %al
; X86-O0-NEXT: movzbl %al, %ecx
; X86-O0-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@@ -77,12 +71,6 @@ define void @foo() {
; X64-O0-NEXT: movzbl %al, %edx
; X64-O0-NEXT: subl %ecx, %edx
; X64-O0-NEXT: setle %al
-; X64-O0-NEXT: # implicit-def: %ECX
-; X64-O0-NEXT: movb %al, %cl
-; X64-O0-NEXT: andl $1, %ecx
-; X64-O0-NEXT: kmovd %ecx, %k0
-; X64-O0-NEXT: kmovd %k0, %ecx
-; X64-O0-NEXT: movb %cl, %al
; X64-O0-NEXT: andb $1, %al
; X64-O0-NEXT: movzbl %al, %ecx
; X64-O0-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/pr32451.ll b/llvm/test/CodeGen/X86/pr32451.ll
index d980b7ff284..e4643a863f9 100644
--- a/llvm/test/CodeGen/X86/pr32451.ll
+++ b/llvm/test/CodeGen/X86/pr32451.ll
@@ -25,12 +25,6 @@ define i8** @japi1_convert_690(i8**, i8***, i32) {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; CHECK-NEXT: movl 4(%ecx), %edx
; CHECK-NEXT: movb (%edx), %bl
-; CHECK-NEXT: # implicit-def: %EDX
-; CHECK-NEXT: movb %bl, %dl
-; CHECK-NEXT: andl $1, %edx
-; CHECK-NEXT: kmovw %edx, %k0
-; CHECK-NEXT: kmovw %k0, %edx
-; CHECK-NEXT: movb %dl, %bl
; CHECK-NEXT: andb $1, %bl
; CHECK-NEXT: movzbl %bl, %edx
; CHECK-NEXT: movl %edx, (%esp)
diff --git a/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll b/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll
index f711dc61574..4b2af6fce8d 100644
--- a/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll
+++ b/llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll
@@ -1119,9 +1119,9 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
;
; AVX512-LABEL: add_ss_mask:
; AVX512: # BB#0:
-; AVX512-NEXT: andl $1, %edi
+; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: kmovw %edi, %k1
-; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1}
; AVX512-NEXT: vmovaps %xmm2, %xmm0
; AVX512-NEXT: retq
%1 = extractelement <4 x float> %a, i64 0
@@ -1174,9 +1174,9 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
;
; AVX512-LABEL: add_sd_mask:
; AVX512: # BB#0:
-; AVX512-NEXT: andl $1, %edi
+; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: kmovw %edi, %k1
-; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
+; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1}
; AVX512-NEXT: vmovapd %xmm2, %xmm0
; AVX512-NEXT: retq
%1 = extractelement <2 x double> %a, i64 0
diff --git a/llvm/test/CodeGen/X86/xmulo.ll b/llvm/test/CodeGen/X86/xmulo.ll
index aed305058f0..03f284d87a6 100644
--- a/llvm/test/CodeGen/X86/xmulo.ll
+++ b/llvm/test/CodeGen/X86/xmulo.ll
@@ -712,17 +712,11 @@ define i1 @bug27873(i64 %c1, i1 %c2) {
;
; KNL-LABEL: bug27873:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %esi
; KNL-NEXT: movl $160, %ecx
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: mulq %rcx
-; KNL-NEXT: kmovw %esi, %k0
; KNL-NEXT: seto %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: korw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: orb %sil, %al
; KNL-NEXT: retq
%mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160)
%mul.overflow = extractvalue { i64, i1 } %mul, 1
diff --git a/llvm/test/CodeGen/X86/xor-select-i1-combine.ll b/llvm/test/CodeGen/X86/xor-select-i1-combine.ll
index 6507ddcc769..c9383282a0c 100644
--- a/llvm/test/CodeGen/X86/xor-select-i1-combine.ll
+++ b/llvm/test/CodeGen/X86/xor-select-i1-combine.ll
@@ -7,10 +7,10 @@
define i32 @main(i8 %small) {
; CHECK-LABEL: main:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: movl $n, %eax
-; CHECK-NEXT: movl $m, %ecx
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: cmovneq %rax, %rcx
+; CHECK-NEXT: movl $m, %eax
+; CHECK-NEXT: movl $n, %ecx
+; CHECK-NEXT: cmoveq %rax, %rcx
; CHECK-NEXT: movl (%rcx), %eax
; CHECK-NEXT: retq
entry:
OpenPOWER on IntegriCloud