diff options
Diffstat (limited to 'llvm/lib/Target/X86/X86ISelLowering.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 974 |
1 files changed, 460 insertions, 514 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index cf3165e7513..079880ef4a1 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1892,7 +1892,7 @@ X86TargetLowering::LowerReturn(SDValue Chain, SmallVector<SDValue, 6> RetOps; RetOps.push_back(Chain); // Operand #0 = Chain (updated below) // Operand #1 = Bytes To Pop - RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl, + RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), MVT::i16)); // Copy the result values into the output registers. @@ -2095,7 +2095,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, if (CopyVT != VA.getValVT()) Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, // This truncation won't change the value. - DAG.getIntPtrConstant(1, dl)); + DAG.getIntPtrConstant(1)); InFlag = Chain.getValue(2); InVals.push_back(Val); @@ -2155,7 +2155,7 @@ static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl) { - SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); + SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), /*isVolatile*/false, /*AlwaysInline=*/true, @@ -2503,7 +2503,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, unsigned Offset = FuncInfo->getVarArgsGPOffset(); for (SDValue Val : LiveGPRs) { SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, - DAG.getIntPtrConstant(Offset, dl)); + DAG.getIntPtrConstant(Offset)); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo::getFixedStack( @@ -2519,9 +2519,9 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, SaveXMMOps.push_back(Chain); SaveXMMOps.push_back(ALVal); SaveXMMOps.push_back(DAG.getIntPtrConstant( - FuncInfo->getRegSaveFrameIndex(), dl)); + FuncInfo->getRegSaveFrameIndex())); SaveXMMOps.push_back(DAG.getIntPtrConstant( - FuncInfo->getVarArgsFPOffset(), dl)); + FuncInfo->getVarArgsFPOffset())); SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(), LiveXMMRegs.end()); MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, @@ -2621,7 +2621,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, int UnwindHelpFI = MFI->CreateStackObject(8, 8, /*isSS=*/false); SDValue StackSlot = DAG.getFrameIndex(UnwindHelpFI, MVT::i64); MMI.getWinEHFuncInfo(MF.getFunction()).UnwindHelpFrameIdx = UnwindHelpFI; - SDValue Neg2 = DAG.getConstant(-2, dl, MVT::i64); + SDValue Neg2 = DAG.getConstant(-2, MVT::i64); Chain = DAG.getStore(Chain, dl, Neg2, StackSlot, MachinePointerInfo::getFixedStack(UnwindHelpFI), /*isVolatile=*/true, @@ -2638,7 +2638,7 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const { unsigned LocMemOffset = VA.getLocMemOffset(); - SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); if (Flags.isByVal()) return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); @@ -2784,7 +2784,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (!IsSibcall) Chain = DAG.getCALLSEQ_START( - Chain, DAG.getIntPtrConstant(NumBytesToPush, dl, true), dl); + Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl); SDValue RetAddrFrIdx; // Load return address for tail calls. @@ -2916,8 +2916,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, && "SSE registers cannot be used when SSE is disabled"); RegsToPass.push_back(std::make_pair(unsigned(X86::AL), - DAG.getConstant(NumXMMRegs, dl, - MVT::i8))); + DAG.getConstant(NumXMMRegs, MVT::i8))); } if (isVarArg && IsMustTail) { @@ -2961,7 +2960,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (Flags.isByVal()) { // Copy relative to framepointer. - SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl); + SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); if (!StackPtr.getNode()) StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), @@ -3087,8 +3086,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (!IsSibcall && isTailCall) { Chain = DAG.getCALLSEQ_END(Chain, - DAG.getIntPtrConstant(NumBytesToPop, dl, true), - DAG.getIntPtrConstant(0, dl, true), InFlag, dl); + DAG.getIntPtrConstant(NumBytesToPop, true), + DAG.getIntPtrConstant(0, true), InFlag, dl); InFlag = Chain.getValue(1); } @@ -3096,7 +3095,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Ops.push_back(Callee); if (isTailCall) - Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32)); + Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); // Add argument registers to the end of the list so that they are known live // into the call. @@ -3145,8 +3144,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Returns a flag for retval copy to use. if (!IsSibcall) { Chain = DAG.getCALLSEQ_END(Chain, - DAG.getIntPtrConstant(NumBytesToPop, dl, true), - DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl, + DAG.getIntPtrConstant(NumBytesToPop, true), + DAG.getIntPtrConstant(NumBytesForCalleeToPop, true), InFlag, dl); InFlag = Chain.getValue(1); @@ -3533,8 +3532,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, case X86ISD::PSHUFLW: case X86ISD::VPERMILPI: case X86ISD::VPERMI: - return DAG.getNode(Opc, dl, VT, V1, - DAG.getConstant(TargetMask, dl, MVT::i8)); + return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); } } @@ -3644,13 +3642,13 @@ static bool isX86CCUnsigned(unsigned X86CC) { /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 /// specific condition code, returning the condition code and the LHS/RHS of the /// comparison to make. -static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, SDLoc DL, bool isFP, +static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { if (!isFP) { if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { // X > -1 -> X == 0, jump !sign. - RHS = DAG.getConstant(0, DL, RHS.getValueType()); + RHS = DAG.getConstant(0, RHS.getValueType()); return X86::COND_NS; } if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { @@ -3659,7 +3657,7 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, SDLoc DL, bool isFP, } if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { // X < 1 -> X <= 0 - RHS = DAG.getConstant(0, DL, RHS.getValueType()); + RHS = DAG.getConstant(0, RHS.getValueType()); return X86::COND_LE; } } @@ -3962,26 +3960,26 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, SDValue Vec; if (VT.is128BitVector()) { // SSE if (Subtarget->hasSSE2()) { // SSE2 - SDValue Cst = DAG.getConstant(0, dl, MVT::i32); + SDValue Cst = DAG.getConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); } else { // SSE1 - SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32); + SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); } } else if (VT.is256BitVector()) { // AVX if (Subtarget->hasInt256()) { // AVX2 - SDValue Cst = DAG.getConstant(0, dl, MVT::i32); + SDValue Cst = DAG.getConstant(0, MVT::i32); SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops); } else { // 256-bit logic and arithmetic instructions in AVX are all // floating-point, no support for integer ops. Emit fp zeroed vectors. - SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32); + SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32); SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops); } } else if (VT.is512BitVector()) { // AVX-512 - SDValue Cst = DAG.getConstant(0, dl, MVT::i32); + SDValue Cst = DAG.getConstant(0, MVT::i32); SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops); @@ -3991,7 +3989,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, && "Unexpected vector type"); assert((Subtarget->hasVLX() || VT.getVectorNumElements() >= 8) && "Unexpected vector type"); - SDValue Cst = DAG.getConstant(0, dl, MVT::i1); + SDValue Cst = DAG.getConstant(0, MVT::i1); SmallVector<SDValue, 64> Ops(VT.getVectorNumElements(), Cst); return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); } else @@ -4029,7 +4027,7 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal, makeArrayRef(Vec->op_begin() + NormalizedIdxVal, ElemsPerChunk)); - SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal, dl); + SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx); } @@ -4073,7 +4071,7 @@ static SDValue InsertSubVector(SDValue Result, SDValue Vec, unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth) * ElemsPerChunk); - SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal, dl); + SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx); } @@ -4095,7 +4093,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, if (IdxVal == 0 && Result.getValueType().is256BitVector() && Result.getOpcode() != ISD::UNDEF) { EVT ResultVT = Result.getValueType(); - SDValue ZeroIndex = DAG.getIntPtrConstant(0, dl); + SDValue ZeroIndex = DAG.getIntPtrConstant(0); SDValue Undef = DAG.getUNDEF(ResultVT); SDValue Vec256 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Undef, Vec, ZeroIndex); @@ -4107,7 +4105,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, unsigned ScalarSize = ScalarType.getSizeInBits(); assert((ScalarSize == 64 || ScalarSize == 32) && "Unknown float type"); unsigned MaskVal = (ScalarSize == 64) ? 0x03 : 0x0f; - SDValue Mask = DAG.getConstant(MaskVal, dl, MVT::i8); + SDValue Mask = DAG.getConstant(MaskVal, MVT::i8); return DAG.getNode(X86ISD::BLENDI, dl, ResultVT, Result, Vec256, Mask); } @@ -4123,7 +4121,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, // will be created by InsertSubVector(). MVT CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32; - SDValue Mask = DAG.getConstant(0x0f, dl, MVT::i8); + SDValue Mask = DAG.getConstant(0x0f, MVT::i8); Vec256 = DAG.getNode(ISD::BITCAST, dl, CastVT, Vec256); Vec256 = DAG.getNode(X86ISD::BLENDI, dl, CastVT, Result, Vec256, Mask); return DAG.getNode(ISD::BITCAST, dl, ResultVT, Vec256); @@ -4164,7 +4162,7 @@ static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, SDLoc dl) { assert(VT.isVector() && "Expected a vector type"); - SDValue Cst = DAG.getConstant(~0U, dl, MVT::i32); + SDValue Cst = DAG.getConstant(~0U, MVT::i32); SDValue Vec; if (VT.is256BitVector()) { if (HasInt256) { // AVX2 @@ -4494,7 +4492,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, } V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v16i8, V, Op.getOperand(i), - DAG.getIntPtrConstant(i, dl)); + DAG.getIntPtrConstant(i)); } } @@ -4522,7 +4520,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, if (ThisIsNonZero) { ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, - ThisElt, DAG.getConstant(8, dl, MVT::i8)); + ThisElt, DAG.getConstant(8, MVT::i8)); if (LastIsNonZero) ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); } else @@ -4530,7 +4528,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, if (ThisElt.getNode()) V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, - DAG.getIntPtrConstant(i/2, dl)); + DAG.getIntPtrConstant(i/2)); } } @@ -4562,7 +4560,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, } V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Op.getOperand(i), - DAG.getIntPtrConstant(i, dl)); + DAG.getIntPtrConstant(i)); } } @@ -4669,10 +4667,9 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG, unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask; assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!"); - SDLoc DL(Op); - SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2, - DAG.getIntPtrConstant(InsertPSMask, DL)); - return DAG.getNode(ISD::BITCAST, DL, VT, Result); + SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2, + DAG.getIntPtrConstant(InsertPSMask)); + return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result); } /// Return a vector logical shift node. @@ -4685,7 +4682,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType()); assert(NumBits % 8 == 0 && "Only support byte sized shifts"); - SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, ScalarShiftTy); + SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy); return DAG.getNode(ISD::BITCAST, dl, VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal)); } @@ -4742,11 +4739,9 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) { if ((Offset % RequiredAlign) & 3) return SDValue(); int64_t StartOffset = Offset & ~(RequiredAlign-1); - if (StartOffset) { - SDLoc DL(Ptr); - Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, - DAG.getConstant(StartOffset, DL, Ptr.getValueType())); - } + if (StartOffset) + Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(), + Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); int EltNo = (Offset - StartOffset) >> 2; unsigned NumElems = VT.getVectorNumElements(); @@ -5139,7 +5134,7 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) { for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { unsigned Idx = InsertIndices[i]; NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), - DAG.getIntPtrConstant(Idx, DL)); + DAG.getIntPtrConstant(Idx)); } return NV; @@ -5155,13 +5150,13 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); if (ISD::isBuildVectorAllZeros(Op.getNode())) { - SDValue Cst = DAG.getTargetConstant(0, dl, MVT::i1); + SDValue Cst = DAG.getTargetConstant(0, MVT::i1); SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst); return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); } if (ISD::isBuildVectorAllOnes(Op.getNode())) { - SDValue Cst = DAG.getTargetConstant(1, dl, MVT::i1); + SDValue Cst = DAG.getTargetConstant(1, MVT::i1); SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst); return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); } @@ -5191,15 +5186,15 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { if (AllContants) { SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, - DAG.getConstant(Immediate, dl, MVT::i16)); + DAG.getConstant(Immediate, MVT::i16)); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); } if (NumNonConsts == 1 && NonConstIdx != 0) { SDValue DstVec; if (NumConsts) { - SDValue VecAsImm = DAG.getConstant(Immediate, dl, + SDValue VecAsImm = DAG.getConstant(Immediate, MVT::getIntegerVT(VT.getSizeInBits())); DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm); } @@ -5207,7 +5202,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { DstVec = DAG.getUNDEF(VT); return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, Op.getOperand(NonConstIdx), - DAG.getIntPtrConstant(NonConstIdx, dl)); + DAG.getIntPtrConstant(NonConstIdx)); } if (!IsSplat && (NonConstIdx != 0)) llvm_unreachable("Unsupported BUILD_VECTOR operation"); @@ -5215,12 +5210,12 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { SDValue Select; if (IsSplat) Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0), - DAG.getConstant(-1, dl, SelectVT), - DAG.getConstant(0, dl, SelectVT)); + DAG.getConstant(-1, SelectVT), + DAG.getConstant(0, SelectVT)); else Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0), - DAG.getConstant((Immediate | 1), dl, SelectVT), - DAG.getConstant(Immediate, dl, SelectVT)); + DAG.getConstant((Immediate | 1), SelectVT), + DAG.getConstant(Immediate, SelectVT)); return DAG.getNode(ISD::BITCAST, dl, VT, Select); } @@ -5738,7 +5733,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { if (VT.is512BitVector()) { SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, - Item, DAG.getIntPtrConstant(0, dl)); + Item, DAG.getIntPtrConstant(0)); } assert((VT.is128BitVector() || VT.is256BitVector()) && "Expected an SSE value type!"); @@ -5932,7 +5927,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { for (unsigned i = 1; i < NumElems; ++i) { if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, - Op.getOperand(i), DAG.getIntPtrConstant(i, dl)); + Op.getOperand(i), DAG.getIntPtrConstant(i)); } return Result; } @@ -6030,10 +6025,10 @@ static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op, if (IsZeroV1 && IsZeroV2) return getZeroVector(ResVT, Subtarget, DAG, dl); - SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl); + SDValue ZeroIdx = DAG.getIntPtrConstant(0); SDValue Undef = DAG.getUNDEF(ResVT); unsigned NumElems = ResVT.getVectorNumElements(); - SDValue ShiftBits = DAG.getConstant(NumElems/2, dl, MVT::i8); + SDValue ShiftBits = DAG.getConstant(NumElems/2, MVT::i8); V2 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V2, ZeroIdx); V2 = DAG.getNode(X86ISD::VSHLI, dl, ResVT, V2, ShiftBits); @@ -6201,7 +6196,7 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask, /// example. /// /// NB: We rely heavily on "undef" masks preserving the input lane. -static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL, +static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SelectionDAG &DAG) { assert(Mask.size() == 4 && "Only 4-lane shuffle masks"); assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!"); @@ -6214,7 +6209,7 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL, Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2; Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4; Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6; - return DAG.getConstant(Imm, DL, MVT::i8); + return DAG.getConstant(Imm, MVT::i8); } /// \brief Try to emit a blend instruction for a shuffle using bit math. @@ -6228,9 +6223,8 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1, assert(VT.isInteger() && "Only supports integer vector types!"); MVT EltVT = VT.getScalarType(); int NumEltBits = EltVT.getSizeInBits(); - SDValue Zero = DAG.getConstant(0, DL, EltVT); - SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL, - EltVT); + SDValue Zero = DAG.getConstant(0, EltVT); + SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT); SmallVector<SDValue, 16> MaskOps; for (int i = 0, Size = Mask.size(); i < Size; ++i) { if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size) @@ -6276,7 +6270,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1, case MVT::v4f64: case MVT::v8f32: return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2, - DAG.getConstant(BlendMask, DL, MVT::i8)); + DAG.getConstant(BlendMask, MVT::i8)); case MVT::v4i64: case MVT::v8i32: @@ -6300,7 +6294,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1, V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2); return DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2, - DAG.getConstant(BlendMask, DL, MVT::i8))); + DAG.getConstant(BlendMask, MVT::i8))); } // FALLTHROUGH case MVT::v8i16: { @@ -6317,7 +6311,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1, V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2); return DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2, - DAG.getConstant(BlendMask, DL, MVT::i8))); + DAG.getConstant(BlendMask, MVT::i8))); } case MVT::v16i16: { @@ -6331,7 +6325,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1, if (RepeatedMask[i] >= 16) BlendMask |= 1u << i; return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2, - DAG.getConstant(BlendMask, DL, MVT::i8)); + DAG.getConstant(BlendMask, MVT::i8)); } } // FALLTHROUGH @@ -6363,8 +6357,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1, for (int j = 0; j < Scale; ++j) VSELECTMask.push_back( Mask[i] < 0 ? DAG.getUNDEF(MVT::i8) - : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL, - MVT::i8)); + : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8)); V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1); V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2); @@ -6558,8 +6551,7 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1, return DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo, - DAG.getConstant(Rotation * Scale, DL, - MVT::i8))); + DAG.getConstant(Rotation * Scale, MVT::i8))); } assert(VT.getSizeInBits() == 128 && @@ -6576,9 +6568,9 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1, Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi); SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo, - DAG.getConstant(LoByteShift, DL, MVT::i8)); + DAG.getConstant(LoByteShift, MVT::i8)); SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi, - DAG.getConstant(HiByteShift, DL, MVT::i8)); + DAG.getConstant(HiByteShift, MVT::i8)); return DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift)); } @@ -6637,9 +6629,8 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1, MVT EltVT = VT.getScalarType(); int NumEltBits = EltVT.getSizeInBits(); MVT IntEltVT = MVT::getIntegerVT(NumEltBits); - SDValue Zero = DAG.getConstant(0, DL, IntEltVT); - SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL, - IntEltVT); + SDValue Zero = DAG.getConstant(0, IntEltVT); + SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT); if (EltVT.isFloatingPoint()) { Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero); AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes); @@ -6736,8 +6727,7 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1, "Illegal integer vector type"); V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V); - V = DAG.getNode(OpCode, DL, ShiftVT, V, - DAG.getConstant(ShiftAmt, DL, MVT::i8)); + V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8)); return DAG.getNode(ISD::BITCAST, DL, VT, V); }; @@ -6791,19 +6781,19 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend( ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV), - getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); + getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG))); } if (AnyExt && EltBits == 16 && Scale > 2) { int PSHUFDMask[4] = {0, -1, 0, -1}; InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV), - getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)); int PSHUFHWMask[4] = {1, -1, -1, -1}; return DAG.getNode( ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV), - getV4X86ShuffleImm8ForMask(PSHUFHWMask, DL, DAG))); + getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG))); } // If this would require more than 2 unpack instructions to expand, use @@ -6814,7 +6804,7 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend( SDValue PSHUFBMask[16]; for (int i = 0; i < 16; ++i) PSHUFBMask[i] = - DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, DL, MVT::i8); + DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8); InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV); return DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV, @@ -7072,7 +7062,7 @@ static SDValue lowerVectorShuffleAsElementInsertion( V2 = DAG.getNode( X86ISD::VSHLDQ, DL, MVT::v2i64, V2, DAG.getConstant( - V2Index * EltVT.getSizeInBits()/8, DL, + V2Index * EltVT.getSizeInBits()/8, DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64))); V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2); } @@ -7232,7 +7222,7 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2, // Insert the V2 element into the desired position. SDLoc DL(Op); return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2, - DAG.getConstant(InsertPSMask, DL, MVT::i8)); + DAG.getConstant(InsertPSMask, MVT::i8)); } /// \brief Try to lower a shuffle as a permute of the inputs followed by an @@ -7382,11 +7372,11 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, // If we have AVX, we can use VPERMILPS which will allow folding a load // into the shuffle. return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1, - DAG.getConstant(SHUFPDMask, DL, MVT::i8)); + DAG.getConstant(SHUFPDMask, MVT::i8)); } - return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V1, - DAG.getConstant(SHUFPDMask, DL, MVT::i8)); + return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1, + DAG.getConstant(SHUFPDMask, MVT::i8)); } assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!"); assert(Mask[1] >= 2 && "Non-canonicalized blend!"); @@ -7429,8 +7419,8 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2); unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1); - return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2, - DAG.getConstant(SHUFPDMask, DL, MVT::i8)); + return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2, + DAG.getConstant(SHUFPDMask, MVT::i8)); } /// \brief Handle lowering of 2-lane 64-bit integer shuffles. @@ -7465,8 +7455,8 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1}; return DAG.getNode( ISD::BITCAST, DL, MVT::v2i64, - DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1, - getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG))); + DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1, + getV4X86ShuffleImm8ForMask(WidenedMask, DAG))); } assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!"); assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!"); @@ -7600,7 +7590,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT, int V1Index = V2AdjIndex; int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0}; V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1, - getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(BlendMask, DAG)); // Now proceed to reconstruct the final blend as we have the necessary // high or low half formed. @@ -7639,7 +7629,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT, (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4, (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4}; V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2, - getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(BlendMask, DAG)); // Now we do a normal shuffle of V1 by giving V1 as both operands to // a blend. @@ -7651,7 +7641,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT, } } return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV, - getV4X86ShuffleImm8ForMask(NewMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(NewMask, DAG)); } /// \brief Lower 4-lane 32-bit floating point shuffles. @@ -7691,13 +7681,13 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, // If we have AVX, we can use VPERMILPS which will allow folding a load // into the shuffle. return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1, - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); } // Otherwise, use a straight shuffle of a single input vector. We pass the // input vector to both operands to simulate this with a SHUFPS. return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1, - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); } // There are special ways we can lower some single-element blends. However, we @@ -7783,7 +7773,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, Mask = UnpackHiMask; return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1, - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); } // Try to use shift instructions. @@ -8000,7 +7990,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle( std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]); V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL, MVT::v8i16, V, - getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG)); for (int &M : Mask) if (M != -1 && M == FixIdx) @@ -8027,8 +8017,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle( V = DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getNode(ISD::BITCAST, DL, PSHUFDVT, V), - getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, - DAG))); + getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG))); // Adjust the mask to match the new locations of A and B. for (int &M : Mask) @@ -8264,16 +8253,15 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle( // target half. if (!isNoopShuffleMask(PSHUFLMask)) V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, - getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG)); if (!isNoopShuffleMask(PSHUFHMask)) V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, - getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG)); if (!isNoopShuffleMask(PSHUFDMask)) V = DAG.getNode(ISD::BITCAST, DL, VT, DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getNode(ISD::BITCAST, DL, PSHUFDVT, V), - getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, - DAG))); + getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG))); // At this point, each half should contain all its inputs, and we can then // just shuffle them into their final position. @@ -8287,7 +8275,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle( // Do a half shuffle for the low mask. if (!isNoopShuffleMask(LoMask)) V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, - getV4X86ShuffleImm8ForMask(LoMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(LoMask, DAG)); // Do a half shuffle with the high mask after shifting its values down. for (int &M : HiMask) @@ -8295,7 +8283,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle( M -= 4; if (!isNoopShuffleMask(HiMask)) V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, - getV4X86ShuffleImm8ForMask(HiMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(HiMask, DAG)); return V; } @@ -8325,8 +8313,8 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1, : (Mask[i / Scale] - Size) * Scale + i % Scale; if (Zeroable[i / Scale]) V1Idx = V2Idx = ZeroMask; - V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8); - V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8); + V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8); + V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8); V1InUse |= (ZeroMask != V1Idx); V2InUse |= (ZeroMask != V2Idx); } @@ -8769,7 +8757,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2, MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 }; SDValue ByteClearMask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, - DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1])); + DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1])); V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask); if (!IsSingleInput) V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask); @@ -8815,7 +8803,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2, // Use a mask to drop the high bytes. VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V); VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf, - DAG.getConstant(0x00FF, DL, MVT::v8i16)); + DAG.getConstant(0x00FF, MVT::v8i16)); // This will be a single vector shuffle instead of a blend so nuke VHiHalf. VHiHalf = DAG.getUNDEF(MVT::v8i16); @@ -8961,9 +8949,9 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1, auto *BV = dyn_cast<BuildVectorSDNode>(V); if (!BV) { LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V, - DAG.getIntPtrConstant(OrigSplitNumElements, DL)); + DAG.getIntPtrConstant(OrigSplitNumElements)); } else { SmallVector<SDValue, 16> LoOps, HiOps; @@ -9152,7 +9140,7 @@ static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT, // allow folding it into a memory operand. unsigned PERMMask = 3 | 2 << 4; SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT), - V1, DAG.getConstant(PERMMask, DL, MVT::i8)); + V1, DAG.getConstant(PERMMask, MVT::i8)); return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask); } @@ -9189,10 +9177,9 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1, MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), VT.getVectorNumElements() / 2); SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, - OnlyUsesV1 ? V1 : V2, - DAG.getIntPtrConstant(0, DL)); + OnlyUsesV1 ? V1 : V2, DAG.getIntPtrConstant(0)); return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV); } } @@ -9241,7 +9228,7 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1, } return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2, - DAG.getConstant(PermMask, DL, MVT::i8)); + DAG.getConstant(PermMask, MVT::i8)); } /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then @@ -9377,13 +9364,13 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) | ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3); return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1, - DAG.getConstant(VPERMILPMask, DL, MVT::i8)); + DAG.getConstant(VPERMILPMask, MVT::i8)); } // With AVX2 we have direct support for this permutation. if (Subtarget->hasAVX2()) return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1, - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); // Otherwise, fall back. return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask, @@ -9413,7 +9400,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) | ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3); return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2, - DAG.getConstant(SHUFPDMask, DL, MVT::i8)); + DAG.getConstant(SHUFPDMask, MVT::i8)); } if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) && (Mask[1] == -1 || Mask[1] < 2) && @@ -9422,7 +9409,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) | ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3); return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1, - DAG.getConstant(SHUFPDMask, DL, MVT::i8)); + DAG.getConstant(SHUFPDMask, MVT::i8)); } // Try to simplify this by merging 128-bit lanes to enable a lane-based @@ -9489,7 +9476,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, ISD::BITCAST, DL, MVT::v4i64, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1), - getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); + getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG))); } } @@ -9497,7 +9484,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2, // lanes. if (isSingleInputShuffleMask(Mask)) return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1, - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); // Try to use shift instructions. if (SDValue Shift = @@ -9567,7 +9554,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, if (isSingleInputShuffleMask(Mask)) return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1, - getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(RepeatedMask, DAG)); // Use dedicated unpack instructions for masks that match their pattern. if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 4, 12, 5, 13})) @@ -9594,7 +9581,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, SDValue VPermMask[8]; for (int i = 0; i < 8; ++i) VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32) - : DAG.getConstant(Mask[i], DL, MVT::i32); + : DAG.getConstant(Mask[i], MVT::i32); if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) return DAG.getNode( X86ISD::VPERMILPV, DL, MVT::v8f32, V1, @@ -9667,7 +9654,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!"); if (isSingleInputShuffleMask(Mask)) return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1, - getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(RepeatedMask, DAG)); // Use dedicated unpack instructions for masks that match their pattern. if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 4, 12, 5, 13})) @@ -9695,7 +9682,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2, SDValue VPermMask[8]; for (int i = 0; i < 8; ++i) VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32) - : DAG.getConstant(Mask[i], DL, MVT::i32); + : DAG.getConstant(Mask[i], MVT::i32); return DAG.getNode( X86ISD::VPERMV, DL, MVT::v8i32, DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1); @@ -9792,8 +9779,8 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2, int M = i < 8 ? Mask[i] : Mask[i] - 8; assert(M >= 0 && M < 8 && "Invalid single-input mask!"); - PSHUFBMask[2 * i] = DAG.getConstant(2 * M, DL, MVT::i8); - PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, DL, MVT::i8); + PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8); + PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8); } return DAG.getNode( ISD::BITCAST, DL, MVT::v16i16, @@ -9884,8 +9871,7 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2, PSHUFBMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i8) - : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, DL, - MVT::i8); + : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8); return DAG.getNode( X86ISD::PSHUFB, DL, MVT::v32i8, V1, @@ -10483,11 +10469,11 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const rc = getRegClassFor(MVT::v16i1); unsigned MaxSift = rc->getSize()*8 - 1; Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec, - DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8)); + DAG.getConstant(MaxSift - IdxVal, MVT::i8)); Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec, - DAG.getConstant(MaxSift, dl, MVT::i8)); + DAG.getConstant(MaxSift, MVT::i8)); return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); } SDValue @@ -10514,10 +10500,10 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT); SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT, getZeroVector(MaskVT, Subtarget, DAG, dl), - Idx, DAG.getConstant(0, dl, getPointerTy())); + Idx, DAG.getConstant(0, getPointerTy())); SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), - Perm, DAG.getConstant(0, dl, getPointerTy())); + Perm, DAG.getConstant(0, getPointerTy())); } return SDValue(); } @@ -10537,7 +10523,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, // IdxVal -= NumElems/2; IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk; return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, - DAG.getConstant(IdxVal, dl, MVT::i32)); + DAG.getConstant(IdxVal, MVT::i32)); } assert(VecVT.is128BitVector() && "Unexpected vector length"); @@ -10579,7 +10565,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), DAG.getUNDEF(VVT), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); } if (VT.getSizeInBits() == 64) { @@ -10598,7 +10584,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), DAG.getUNDEF(VVT), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); } return SDValue(); @@ -10629,13 +10615,13 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const { SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt); if (Vec.getOpcode() == ISD::UNDEF) return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec, - DAG.getConstant(IdxVal, dl, MVT::i8)); + DAG.getConstant(IdxVal, MVT::i8)); const TargetRegisterClass* rc = getRegClassFor(VecVT); unsigned MaxSift = rc->getSize()*8 - 1; EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec, - DAG.getConstant(MaxSift, dl, MVT::i8)); + DAG.getConstant(MaxSift, MVT::i8)); EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec, - DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8)); + DAG.getConstant(MaxSift - IdxVal, MVT::i8)); return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec); } @@ -10668,7 +10654,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) || (Subtarget->hasAVX2() && EltVT == MVT::i32)) { SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1); - N2 = DAG.getIntPtrConstant(1, dl); + N2 = DAG.getIntPtrConstant(1); return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2); } } @@ -10681,7 +10667,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128; V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, - DAG.getConstant(IdxIn128, dl, MVT::i32)); + DAG.getConstant(IdxIn128, MVT::i32)); // Insert the changed part back into the bigger vector return Insert128BitVector(N0, V, IdxVal, DAG, dl); @@ -10703,7 +10689,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, if (N1.getValueType() != MVT::i32) N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); if (N2.getValueType() != MVT::i32) - N2 = DAG.getIntPtrConstant(IdxVal, dl); + N2 = DAG.getIntPtrConstant(IdxVal); return DAG.getNode(Opc, dl, VT, N0, N1, N2); } @@ -10727,11 +10713,11 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, // But if optimizing for size and there's a load folding opportunity, // generate insertps because blendps does not have a 32-bit memory // operand form. - N2 = DAG.getIntPtrConstant(1, dl); + N2 = DAG.getIntPtrConstant(1); N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2); } - N2 = DAG.getIntPtrConstant(IdxVal << 4, dl); + N2 = DAG.getIntPtrConstant(IdxVal << 4); // Create this as a scalar to vector.. N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); @@ -10752,7 +10738,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, if (N1.getValueType() != MVT::i32) N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); if (N2.getValueType() != MVT::i32) - N2 = DAG.getIntPtrConstant(IdxVal, dl); + N2 = DAG.getIntPtrConstant(IdxVal); return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); } return SDValue(); @@ -10861,10 +10847,10 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, if (OpVT.getVectorElementType() == MVT::i1) { if (IdxVal == 0 && Vec.getOpcode() == ISD::UNDEF) // the operation is legal return Op; - SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl); + SDValue ZeroIdx = DAG.getIntPtrConstant(0); SDValue Undef = DAG.getUNDEF(OpVT); unsigned NumElems = OpVT.getVectorNumElements(); - SDValue ShiftBits = DAG.getConstant(NumElems/2, dl, MVT::i8); + SDValue ShiftBits = DAG.getConstant(NumElems/2, MVT::i8); if (IdxVal == OpVT.getVectorNumElements() / 2) { // Zero upper bits of the Vec @@ -11079,7 +11065,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, // addition for it. if (Offset != 0) Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, - DAG.getConstant(Offset, dl, getPointerTy())); + DAG.getConstant(Offset, getPointerTy())); return Result; } @@ -11194,7 +11180,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, is64Bit ? 257 : 256)); SDValue ThreadPointer = - DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl), + DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0), MachinePointerInfo(Ptr), false, false, false, 0); unsigned char OperandFlags = 0; @@ -11336,9 +11322,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { SDValue TlsArray = Subtarget->is64Bit() - ? DAG.getIntPtrConstant(0x58, dl) + ? DAG.getIntPtrConstant(0x58) : (Subtarget->isTargetWindowsGNU() - ? DAG.getIntPtrConstant(0x2C, dl) + ? DAG.getIntPtrConstant(0x2C) : DAG.getExternalSymbol("_tls_array", getPointerTy())); SDValue ThreadPointer = @@ -11355,7 +11341,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), false, false, false, 0); - SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), dl, + SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), getPointerTy()); IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); @@ -11392,10 +11378,10 @@ static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) { // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away // during isel. SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, - DAG.getConstant(VTBits - 1, dl, MVT::i8)); + DAG.getConstant(VTBits - 1, MVT::i8)); SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, - DAG.getConstant(VTBits - 1, dl, MVT::i8)) - : DAG.getConstant(0, dl, VT); + DAG.getConstant(VTBits - 1, MVT::i8)) + : DAG.getConstant(0, VT); SDValue Tmp2, Tmp3; if (Op.getOpcode() == ISD::SHL_PARTS) { @@ -11410,12 +11396,12 @@ static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) { // rely on the results of shld/shrd. Insert a test and select the appropriate // values for large shift amounts. SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, - DAG.getConstant(VTBits, dl, MVT::i8)); + DAG.getConstant(VTBits, MVT::i8)); SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, - AndNode, DAG.getConstant(0, dl, MVT::i8)); + AndNode, DAG.getConstant(0, MVT::i8)); SDValue Hi, Lo; - SDValue CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8); + SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; @@ -11594,7 +11580,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, } return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); } // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. @@ -11602,7 +11588,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); // FP constant to bias correct the final result. - SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, + SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), MVT::f64); // Load the 32-bit value into an XMM register. @@ -11614,7 +11600,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); // Or the load with the bias. SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, @@ -11626,7 +11612,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, MVT::v2f64, Bias))); Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); // Subtract the bias. SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); @@ -11636,7 +11622,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, if (DestVT.bitsLT(MVT::f64)) return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); if (DestVT.bitsGT(MVT::f64)) return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); @@ -11681,20 +11667,20 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG, // -- v >> 16 // Create the splat vector for 0x4b000000. - SDValue CstLow = DAG.getConstant(0x4b000000, DL, MVT::i32); + SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32); SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow, CstLow, CstLow, CstLow, CstLow}; SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, makeArrayRef(&CstLowArray[0], NumElts)); // Create the splat vector for 0x53000000. - SDValue CstHigh = DAG.getConstant(0x53000000, DL, MVT::i32); + SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32); SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh, CstHigh, CstHigh, CstHigh, CstHigh}; SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, makeArrayRef(&CstHighArray[0], NumElts)); // Create the right shift. - SDValue CstShift = DAG.getConstant(16, DL, MVT::i32); + SDValue CstShift = DAG.getConstant(16, MVT::i32); SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift, CstShift, CstShift, CstShift, CstShift}; SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, @@ -11711,7 +11697,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG, // Low will be bitcasted right away, so do not bother bitcasting back to its // original type. Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast, - VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32)); + VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32)); // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16), // (uint4) 0x53000000, 0xaa); SDValue VecCstHighBitcast = @@ -11721,9 +11707,9 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG, // High will be bitcasted right away, so do not bother bitcasting back to // its original type. High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast, - VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32)); + VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32)); } else { - SDValue CstMask = DAG.getConstant(0xffff, DL, MVT::i32); + SDValue CstMask = DAG.getConstant(0xffff, MVT::i32); SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask, CstMask, CstMask, CstMask); // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000; @@ -11736,7 +11722,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG, // Create the vector constant for -(0x1.0p39f + 0x1.0p23f). SDValue CstFAdd = DAG.getConstantFP( - APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, MVT::f32); + APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32); SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd, CstFAdd, CstFAdd, CstFAdd, CstFAdd}; SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT, @@ -11801,13 +11787,13 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, // Make a 64-bit buffer, and use it to build an FILD. SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); if (SrcVT == MVT::i32) { - SDValue WordOff = DAG.getConstant(4, dl, getPointerTy()); + SDValue WordOff = DAG.getConstant(4, getPointerTy()); SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackSlot, WordOff); SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), StackSlot, MachinePointerInfo(), false, false, 0); - SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32), + SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), OffsetSlot, MachinePointerInfo(), false, false, 0); SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); @@ -11839,8 +11825,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, // Check whether the sign bit is set. SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(*DAG.getContext(), MVT::i64), - Op.getOperand(0), - DAG.getConstant(0, dl, MVT::i64), ISD::SETLT); + Op.getOperand(0), DAG.getConstant(0, MVT::i64), + ISD::SETLT); // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. SDValue FudgePtr = DAG.getConstantPool( @@ -11848,8 +11834,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, getPointerTy()); // Get a pointer to FF if the sign bit was set, or to 0 otherwise. - SDValue Zero = DAG.getIntPtrConstant(0, dl); - SDValue Four = DAG.getIntPtrConstant(4, dl); + SDValue Zero = DAG.getIntPtrConstant(0); + SDValue Four = DAG.getIntPtrConstant(4); SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, Zero, Four); FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); @@ -11861,8 +11847,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, MVT::f32, false, false, false, 4); // Extend everything to 80 bits to force it to be done on x87. SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); - return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, - DAG.getIntPtrConstant(0, dl)); + return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); } std::pair<SDValue,SDValue> @@ -12018,7 +12003,7 @@ static SDValue LowerZERO_EXTEND_AVX512(SDValue Op, const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // Now we have only mask extension assert(InVT.getVectorElementType() == MVT::i1); - SDValue Cst = DAG.getTargetConstant(1, DL, ExtVT.getScalarType()); + SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType()); const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue(); SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy()); unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); @@ -12111,7 +12096,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { InVT = ExtVT; } - SDValue Cst = DAG.getTargetConstant(1, DL, InVT.getVectorElementType()); + SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType()); const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue(); SDValue CP = DAG.getConstantPool(C, getPointerTy()); unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); @@ -12131,13 +12116,13 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32), ShufMask); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); } SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, - DAG.getIntPtrConstant(2, DL)); + DAG.getIntPtrConstant(2)); OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); static const int ShufMask[] = {0, 2, 4, 6}; @@ -12151,16 +12136,16 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { SmallVector<SDValue,32> pshufbMask; for (unsigned i = 0; i < 2; ++i) { - pshufbMask.push_back(DAG.getConstant(0x0, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x1, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x4, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x5, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x8, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0x9, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0xc, DL, MVT::i8)); - pshufbMask.push_back(DAG.getConstant(0xd, DL, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); for (unsigned j = 0; j < 8; ++j) - pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8)); + pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); } SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask); In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV); @@ -12170,15 +12155,15 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64), &ShufMask[0]); In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); return DAG.getNode(ISD::BITCAST, DL, VT, In); } SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, - DAG.getIntPtrConstant(4, DL)); + DAG.getIntPtrConstant(4)); OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo); OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi); @@ -12217,7 +12202,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { DAG.getNode(ISD::BITCAST, DL, NVT, In), DAG.getUNDEF(NVT), &MaskVec[0]); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); } SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, @@ -12354,7 +12339,7 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { } // And if it is bigger, shrink it first. if (SrcVT.bitsGT(VT)) { - Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1, dl)); + Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); SrcVT = VT; } @@ -12413,8 +12398,8 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, - DAG.getConstant(1, dl, VT)); - return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, dl, VT)); + DAG.getConstant(1, VT)); + return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); } // Check whether an OR'd tree is PTEST-able. @@ -12535,7 +12520,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl, if (Op.getValueType() == MVT::i1) { SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op); return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp, - DAG.getConstant(0, dl, MVT::i8)); + DAG.getConstant(0, MVT::i8)); } // CF and OF aren't always set the way we want. Determine which // of these we need. @@ -12579,7 +12564,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl, // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op, // DAG.getConstant(0, MVT::i1)); return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, - DAG.getConstant(0, dl, Op.getValueType())); + DAG.getConstant(0, Op.getValueType())); } unsigned Opcode = 0; unsigned NumOperands = 0; @@ -12667,7 +12652,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl, if (!Mask.isSignedIntN(32)) // Avoid large immediates. break; SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0), - DAG.getConstant(Mask, dl, VT)); + DAG.getConstant(Mask, VT)); DAG.ReplaceAllUsesWith(Op, New); Op = New; } @@ -12753,7 +12738,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl, if (Opcode == 0) // Emit a CMP with 0, which is the TEST pattern. return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, - DAG.getConstant(0, dl, Op.getValueType())); + DAG.getConstant(0, Op.getValueType())); SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands); @@ -12818,7 +12803,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, - DAG.getConstant(8, dl, MVT::i8)); + DAG.getConstant(8, MVT::i8)); SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); } @@ -12941,7 +12926,7 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, // Use BT if the immediate can't be encoded in a TEST instruction. if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { LHS = AndLHS; - RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType()); + RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); } } @@ -12963,7 +12948,7 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(Cond, dl, MVT::i8), BT); + DAG.getConstant(Cond, MVT::i8), BT); } return SDValue(); @@ -13055,16 +13040,16 @@ static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) { "Unexpected type for boolean compare operation"); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); SDValue NotOp0 = DAG.getNode(ISD::XOR, dl, VT, Op0, - DAG.getConstant(-1, dl, VT)); + DAG.getConstant(-1, VT)); SDValue NotOp1 = DAG.getNode(ISD::XOR, dl, VT, Op1, - DAG.getConstant(-1, dl, VT)); + DAG.getConstant(-1, VT)); switch (SetCCOpcode) { default: llvm_unreachable("Unexpected SETCC condition"); case ISD::SETNE: // (x != y) -> ~(x ^ y) return DAG.getNode(ISD::XOR, dl, VT, DAG.getNode(ISD::XOR, dl, VT, Op0, Op1), - DAG.getConstant(-1, dl, VT)); + DAG.getConstant(-1, VT)); case ISD::SETEQ: // (x == y) -> (x ^ y) return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1); @@ -13124,7 +13109,7 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG, return DAG.getNode(Opc, dl, VT, Op0, Op1); Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM; return DAG.getNode(Opc, dl, VT, Op0, Op1, - DAG.getConstant(SSECC, dl, MVT::i8)); + DAG.getConstant(SSECC, MVT::i8)); } /// \brief Try to turn a VSETULT into a VSETULE by modifying its second @@ -13151,7 +13136,7 @@ static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG) if (Val == 0) return SDValue(); - ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT)); + ULTOp1.push_back(DAG.getConstant(Val - 1, EVT)); } return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1); @@ -13191,14 +13176,14 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, } SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1, - DAG.getConstant(CC0, dl, MVT::i8)); + DAG.getConstant(CC0, MVT::i8)); SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1, - DAG.getConstant(CC1, dl, MVT::i8)); + DAG.getConstant(CC1, MVT::i8)); return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); } // Handle all other FP comparisons here. return DAG.getNode(Opc, dl, VT, Op0, Op1, - DAG.getConstant(SSECC, dl, MVT::i8)); + DAG.getConstant(SSECC, MVT::i8)); } // Break 256-bit integer vector compare into smaller ones. @@ -13319,10 +13304,10 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, // compare is always unsigned. SDValue SB; if (FlipSigns) { - SB = DAG.getConstant(0x80000000U, dl, MVT::v4i32); + SB = DAG.getConstant(0x80000000U, MVT::v4i32); } else { - SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32); - SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32); + SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32); + SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32); SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Sign, Zero, Sign, Zero); } @@ -13377,8 +13362,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, // bits of the inputs before performing those operations. if (FlipSigns) { EVT EltVT = VT.getVectorElementType(); - SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), dl, - VT); + SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT); Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); } @@ -13446,7 +13430,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { CCode = X86::GetOppositeBranchCondition(CCode); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(CCode, dl, MVT::i8), + DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); if (VT == MVT::i1) return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC); @@ -13458,18 +13442,18 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { (CC == ISD::SETEQ || CC == ISD::SETNE)) { ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true); - return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC); + return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC); } bool isFP = Op1.getSimpleValueType().isFloatingPoint(); - unsigned X86CC = TranslateX86CC(CC, dl, isFP, Op0, Op1, DAG); + unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); if (X86CC == X86::COND_INVALID) return SDValue(); SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG); EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(X86CC, dl, MVT::i8), EFLAGS); + DAG.getConstant(X86CC, MVT::i8), EFLAGS); if (VT == MVT::i1) return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC); return SetCC; @@ -13534,12 +13518,12 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { if (SSECC != 8) { if (Subtarget->hasAVX512()) { SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1, - DAG.getConstant(SSECC, DL, MVT::i8)); + DAG.getConstant(SSECC, MVT::i8)); return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2); } SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1, - DAG.getConstant(SSECC, DL, MVT::i8)); + DAG.getConstant(SSECC, MVT::i8)); // If we have AVX, we can use a variable vector select (VBLENDV) instead // of 3 logic instructions for size savings and potentially speed. @@ -13571,7 +13555,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue VSel = DAG.getNode(ISD::VSELECT, DL, VecVT, VCmp, VOp1, VOp2); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, - VSel, DAG.getIntPtrConstant(0, DL)); + VSel, DAG.getIntPtrConstant(0)); } SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2); SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1); @@ -13609,22 +13593,21 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, - DAG.getConstant(0, DL, - CmpOp0.getValueType()), + DAG.getConstant(0, CmpOp0.getValueType()), CmpOp0); SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), - DAG.getConstant(X86::COND_B, DL, MVT::i8), + DAG.getConstant(X86::COND_B, MVT::i8), SDValue(Neg.getNode(), 1)); return Res; } Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, - CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType())); + CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); Cmp = ConvertCmpIfNecessary(Cmp, DAG); SDValue Res = // Res = 0 or -1. DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), - DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp); + DAG.getConstant(X86::COND_B, MVT::i8), Cmp); if (isAllOnes(Op1) != (CondCode == X86::COND_E)) Res = DAG.getNOT(DL, Res, Res.getValueType()); @@ -13696,7 +13679,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { else Cond = X86Op.getValue(1); - CC = DAG.getConstant(X86Cond, DL, MVT::i8); + CC = DAG.getConstant(X86Cond, MVT::i8); addTest = false; } @@ -13718,7 +13701,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { } if (addTest) { - CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8); + CC = DAG.getConstant(X86::COND_NE, MVT::i8); Cond = EmitTest(Cond, X86::COND_NE, DL, DAG); } @@ -13733,8 +13716,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), - DAG.getConstant(X86::COND_B, DL, MVT::i8), - Cond); + DAG.getConstant(X86::COND_B, MVT::i8), Cond); if (isAllOnes(Op1) != (CondCode == X86::COND_B)) return DAG.getNOT(DL, Res, Res.getValueType()); return Res; @@ -13994,7 +13976,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget, SmallVector<SDValue, 8> Chains; SDValue Ptr = Ld->getBasePtr(); SDValue Increment = - DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, dl, TLI.getPointerTy()); + DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy()); SDValue Res = DAG.getUNDEF(LoadUnitVecVT); for (unsigned i = 0; i < NumLoads; ++i) { @@ -14010,7 +13992,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget, Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); else Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, - ScalarLoad, DAG.getIntPtrConstant(i, dl)); + ScalarLoad, DAG.getIntPtrConstant(i)); Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); } @@ -14050,8 +14032,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget, unsigned Amt = RegVT.getVectorElementType().getSizeInBits() - MemVT.getVectorElementType().getSizeInBits(); Shuff = - DAG.getNode(ISD::SRA, dl, RegVT, Shuff, - DAG.getConstant(Amt, dl, RegVT)); + DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT)); DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF); return Shuff; @@ -14218,7 +14199,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { else Cond = X86Op.getValue(1); - CC = DAG.getConstant(X86Cond, dl, MVT::i8); + CC = DAG.getConstant(X86Cond, MVT::i8); addTest = false; } else { unsigned CondOpc; @@ -14249,7 +14230,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { X86::CondCode CCode = (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); CCode = X86::GetOppositeBranchCondition(CCode); - CC = DAG.getConstant(CCode, dl, MVT::i8); + CC = DAG.getConstant(CCode, MVT::i8); SDNode *User = *Op.getNode()->use_begin(); // Look for an unconditional branch following this conditional branch. // We need this because we need to reverse the successors in order @@ -14267,7 +14248,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { X86::CondCode CCode = (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); CCode = X86::GetOppositeBranchCondition(CCode); - CC = DAG.getConstant(CCode, dl, MVT::i8); + CC = DAG.getConstant(CCode, MVT::i8); Cond = Cmp; addTest = false; } @@ -14280,7 +14261,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { X86::CondCode CCode = (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); CCode = X86::GetOppositeBranchCondition(CCode); - CC = DAG.getConstant(CCode, dl, MVT::i8); + CC = DAG.getConstant(CCode, MVT::i8); Cond = Cond.getOperand(0).getOperand(1); addTest = false; } else if (Cond.getOpcode() == ISD::SETCC && @@ -14306,10 +14287,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond.getOperand(0), Cond.getOperand(1)); Cmp = ConvertCmpIfNecessary(Cmp, DAG); - CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8); + CC = DAG.getConstant(X86::COND_NE, MVT::i8); Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain, Dest, CC, Cmp); - CC = DAG.getConstant(X86::COND_P, dl, MVT::i8); + CC = DAG.getConstant(X86::COND_P, MVT::i8); Cond = Cmp; addTest = false; } @@ -14336,10 +14317,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond.getOperand(0), Cond.getOperand(1)); Cmp = ConvertCmpIfNecessary(Cmp, DAG); - CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8); + CC = DAG.getConstant(X86::COND_NE, MVT::i8); Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain, Dest, CC, Cmp); - CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8); + CC = DAG.getConstant(X86::COND_NP, MVT::i8); Cond = Cmp; addTest = false; Dest = FalseBB; @@ -14367,7 +14348,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { if (addTest) { X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE; - CC = DAG.getConstant(X86Cond, dl, MVT::i8); + CC = DAG.getConstant(X86Cond, MVT::i8); Cond = EmitTest(Cond, X86Cond, dl, DAG); } Cond = ConvertCmpIfNecessary(Cond, DAG); @@ -14404,7 +14385,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, // Chain the dynamic stack allocation so that it doesn't modify the stack // pointer when other instructions are using the stack. - Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true), + Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true), SDLoc(Node)); SDValue Size = Tmp2.getOperand(1); @@ -14416,11 +14397,11 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value if (Align > StackAlign) Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, - DAG.getConstant(-(uint64_t)Align, dl, VT)); + DAG.getConstant(-(uint64_t)Align, VT)); Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain - Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), - DAG.getIntPtrConstant(0, dl, true), SDValue(), + Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), + DAG.getIntPtrConstant(0, true), SDValue(), SDLoc(Node)); SDValue Ops[2] = { Tmp1, Tmp2 }; @@ -14476,7 +14457,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, if (Align) { SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), - DAG.getConstant(-(uint64_t)Align, dl, VT)); + DAG.getConstant(-(uint64_t)Align, VT)); Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP); } @@ -14511,22 +14492,22 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { // Store gp_offset SDValue Store = DAG.getStore(Op.getOperand(0), DL, DAG.getConstant(FuncInfo->getVarArgsGPOffset(), - DL, MVT::i32), + MVT::i32), FIN, MachinePointerInfo(SV), false, false, 0); MemOps.push_back(Store); // Store fp_offset FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), - FIN, DAG.getIntPtrConstant(4, DL)); + FIN, DAG.getIntPtrConstant(4)); Store = DAG.getStore(Op.getOperand(0), DL, - DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, + DAG.getConstant(FuncInfo->getVarArgsFPOffset(), MVT::i32), FIN, MachinePointerInfo(SV, 4), false, false, 0); MemOps.push_back(Store); // Store ptr to overflow_arg_area FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), - FIN, DAG.getIntPtrConstant(4, DL)); + FIN, DAG.getIntPtrConstant(4)); SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), getPointerTy()); Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, @@ -14536,7 +14517,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { // Store ptr to reg_save_area. FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), - FIN, DAG.getIntPtrConstant(8, DL)); + FIN, DAG.getIntPtrConstant(8)); SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), getPointerTy()); Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, @@ -14586,9 +14567,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { // Insert VAARG_64 node into the DAG // VAARG_64 returns two values: Variable Argument Address, Chain - SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32), - DAG.getConstant(ArgMode, dl, MVT::i8), - DAG.getConstant(Align, dl, MVT::i32)}; + SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, MVT::i32), + DAG.getConstant(ArgMode, MVT::i8), + DAG.getConstant(Align, MVT::i32)}; SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, @@ -14619,7 +14600,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, SDLoc DL(Op); return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, - DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false, + DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, false, false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); } @@ -14640,7 +14621,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, if (Opc == X86ISD::VSRAI) ShiftAmt = ElementType.getSizeInBits() - 1; else - return DAG.getConstant(0, dl, VT); + return DAG.getConstant(0, VT); } assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI) @@ -14665,7 +14646,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, } ND = cast<ConstantSDNode>(CurrentOp); const APInt &C = ND->getAPIntValue(); - Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType)); + Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType)); } break; case X86ISD::VSRLI: @@ -14677,7 +14658,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, } ND = cast<ConstantSDNode>(CurrentOp); const APInt &C = ND->getAPIntValue(); - Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType)); + Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType)); } break; case X86ISD::VSRAI: @@ -14689,7 +14670,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, } ND = cast<ConstantSDNode>(CurrentOp); const APInt &C = ND->getAPIntValue(); - Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType)); + Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType)); } break; } @@ -14697,8 +14678,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT, return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts); } - return DAG.getNode(Opc, dl, VT, SrcOp, - DAG.getConstant(ShiftAmt, dl, MVT::i8)); + return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8)); } // getTargetVShiftNode - Handle vector element shifts where the shift amount @@ -14736,7 +14716,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT, SmallVector<SDValue, 4> ShOps; ShOps.push_back(ShAmt); if (SVT == MVT::i32) { - ShOps.push_back(DAG.getConstant(0, dl, SVT)); + ShOps.push_back(DAG.getConstant(0, SVT)); ShOps.push_back(DAG.getUNDEF(SVT)); } ShOps.push_back(DAG.getUNDEF(SVT)); @@ -14777,7 +14757,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, // are extracted by EXTRACT_SUBVECTOR. SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT, DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); switch (Op.getOpcode()) { default: break; @@ -14935,23 +14915,22 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget Op.getOperand(2)); } SDValue CmpMask = getVectorMaskingNode(Cmp, Mask, - DAG.getTargetConstant(0, dl, - MaskVT), + DAG.getTargetConstant(0, MaskVT), Subtarget, DAG); SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT, DAG.getUNDEF(BitcastVT), CmpMask, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); } case COMI: { // Comparison intrinsics ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1; SDValue LHS = Op.getOperand(1); SDValue RHS = Op.getOperand(2); - unsigned X86CC = TranslateX86CC(CC, dl, true, LHS, RHS, DAG); + unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(X86CC, dl, MVT::i8), Cond); + DAG.getConstant(X86CC, MVT::i8), Cond); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); } case VSHIFT: @@ -14978,7 +14957,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDLoc dl(Op); SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT, DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress, PassThru); @@ -14993,7 +14972,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDLoc dl(Op); SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT, DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1), Op.getOperand(2)); } @@ -15083,7 +15062,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDValue RHS = Op.getOperand(2); unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); - SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8); + SDValue CC = DAG.getConstant(X86CC, MVT::i8); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); } @@ -15092,7 +15071,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B; SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1)); SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2)); - SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8); + SDValue CC = DAG.getConstant(X86CC, MVT::i8); SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); @@ -15157,7 +15136,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(X86CC, dl, MVT::i8), + DAG.getConstant(X86CC, MVT::i8), SDValue(PCMP.getNode(), 1)); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); } @@ -15184,17 +15163,17 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, SDLoc dl(Op); ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp); assert(C && "Invalid scale type"); - SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); + SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8); EVT MaskVT = MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements()); SDValue MaskInReg; ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask); if (MaskC) - MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT); + MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT); else MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask); SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other); - SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); + SDValue Disp = DAG.getTargetConstant(0, MVT::i32); SDValue Segment = DAG.getRegister(0, MVT::i32); if (Src.getOpcode() == ISD::UNDEF) Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl); @@ -15210,15 +15189,15 @@ static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, SDLoc dl(Op); ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp); assert(C && "Invalid scale type"); - SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); - SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); + SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8); + SDValue Disp = DAG.getTargetConstant(0, MVT::i32); SDValue Segment = DAG.getRegister(0, MVT::i32); EVT MaskVT = MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements()); SDValue MaskInReg; ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask); if (MaskC) - MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT); + MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT); else MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask); SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other); @@ -15233,15 +15212,15 @@ static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, SDLoc dl(Op); ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp); assert(C && "Invalid scale type"); - SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); - SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); + SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8); + SDValue Disp = DAG.getTargetConstant(0, MVT::i32); SDValue Segment = DAG.getRegister(0, MVT::i32); EVT MaskVT = MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements()); SDValue MaskInReg; ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask); if (MaskC) - MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT); + MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT); else MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask); //SDVTList VTs = DAG.getVTList(MVT::Other); @@ -15282,7 +15261,7 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL, // The EAX register is loaded with the low-order 32 bits. The EDX register // is loaded with the supported high-order bits of the counter. SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI, - DAG.getConstant(32, DL, MVT::i8)); + DAG.getConstant(32, MVT::i8)); Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp)); Results.push_back(Chain); return; @@ -15336,7 +15315,7 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode, // The EDX register is loaded with the high-order 32 bits of the MSR, and // the EAX register is loaded with the low-order 32 bits. SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI, - DAG.getConstant(32, DL, MVT::i8)); + DAG.getConstant(32, MVT::i8)); Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp)); Results.push_back(Chain); return; @@ -15381,8 +15360,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1. // Otherwise return the value from Rand, which is always 0, casted to i32. SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), - DAG.getConstant(1, dl, Op->getValueType(1)), - DAG.getConstant(X86::COND_B, dl, MVT::i32), + DAG.getConstant(1, Op->getValueType(1)), + DAG.getConstant(X86::COND_B, MVT::i32), SDValue(Result.getNode(), 1) }; SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, DAG.getVTList(Op->getValueType(1), MVT::Glue), @@ -15442,7 +15421,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other); SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0)); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(X86::COND_NE, dl, MVT::i8), + DAG.getConstant(X86::COND_NE, MVT::i8), InTrans); SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC); return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), @@ -15454,14 +15433,14 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other); SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other); SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2), - DAG.getConstant(-1, dl, MVT::i8)); + DAG.getConstant(-1, MVT::i8)); SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3), Op.getOperand(4), GenCF.getValue(1)); SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0), Op.getOperand(5), MachinePointerInfo(), false, false, 0); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(X86::COND_B, dl, MVT::i8), + DAG.getConstant(X86::COND_B, MVT::i8), Res.getValue(1)); Results.push_back(SetCC); Results.push_back(Store); @@ -15485,7 +15464,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, Mask.getValueType().getSizeInBits()); SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT, DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress, DAG.getUNDEF(VT)); @@ -15509,7 +15488,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget, Mask.getValueType().getSizeInBits()); SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT, DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask), - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false, false, 0); @@ -15537,7 +15516,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, if (Depth > 0) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); - SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT); + SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT); return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), @@ -15605,7 +15584,7 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName, SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); - return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op)); + return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); } SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { @@ -15624,8 +15603,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX; SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame, - DAG.getIntPtrConstant(RegInfo->getSlotSize(), - dl)); + DAG.getIntPtrConstant(RegInfo->getSlotSize())); StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset); Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), false, false, 0); @@ -15680,12 +15658,12 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, // Load the pointer to the nested function into R11. unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 SDValue Addr = Trmp; - OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16), + OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), Addr, MachinePointerInfo(TrmpAddr), false, false, 0); Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, - DAG.getConstant(2, dl, MVT::i64)); + DAG.getConstant(2, MVT::i64)); OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2), false, false, 2); @@ -15694,13 +15672,13 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, // R10 is specified in X86CallingConv.td OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, - DAG.getConstant(10, dl, MVT::i64)); - OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16), + DAG.getConstant(10, MVT::i64)); + OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), Addr, MachinePointerInfo(TrmpAddr, 10), false, false, 0); Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, - DAG.getConstant(12, dl, MVT::i64)); + DAG.getConstant(12, MVT::i64)); OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12), false, false, 2); @@ -15708,16 +15686,16 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, // Jump to the nested function. OpCode = (JMP64r << 8) | REX_WB; // jmpq *... Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, - DAG.getConstant(20, dl, MVT::i64)); - OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16), + DAG.getConstant(20, MVT::i64)); + OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), Addr, MachinePointerInfo(TrmpAddr, 20), false, false, 0); unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, - DAG.getConstant(22, dl, MVT::i64)); - OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8), - Addr, MachinePointerInfo(TrmpAddr, 22), + DAG.getConstant(22, MVT::i64)); + OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, + MachinePointerInfo(TrmpAddr, 22), false, false, 0); return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); @@ -15770,32 +15748,32 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, SDValue Addr, Disp; Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, - DAG.getConstant(10, dl, MVT::i32)); + DAG.getConstant(10, MVT::i32)); Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); // This is storing the opcode for MOV32ri. const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; OutChains[0] = DAG.getStore(Root, dl, - DAG.getConstant(MOV32ri|N86Reg, dl, MVT::i8), + DAG.getConstant(MOV32ri|N86Reg, MVT::i8), Trmp, MachinePointerInfo(TrmpAddr), false, false, 0); Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, - DAG.getConstant(1, dl, MVT::i32)); + DAG.getConstant(1, MVT::i32)); OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1), false, false, 1); const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, - DAG.getConstant(5, dl, MVT::i32)); - OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), - Addr, MachinePointerInfo(TrmpAddr, 5), + DAG.getConstant(5, MVT::i32)); + OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, + MachinePointerInfo(TrmpAddr, 5), false, false, 1); Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, - DAG.getConstant(6, dl, MVT::i32)); + DAG.getConstant(6, MVT::i32)); OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6), false, false, 1); @@ -15852,20 +15830,20 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SDValue CWD1 = DAG.getNode(ISD::SRL, DL, MVT::i16, DAG.getNode(ISD::AND, DL, MVT::i16, - CWD, DAG.getConstant(0x800, DL, MVT::i16)), - DAG.getConstant(11, DL, MVT::i8)); + CWD, DAG.getConstant(0x800, MVT::i16)), + DAG.getConstant(11, MVT::i8)); SDValue CWD2 = DAG.getNode(ISD::SRL, DL, MVT::i16, DAG.getNode(ISD::AND, DL, MVT::i16, - CWD, DAG.getConstant(0x400, DL, MVT::i16)), - DAG.getConstant(9, DL, MVT::i8)); + CWD, DAG.getConstant(0x400, MVT::i16)), + DAG.getConstant(9, MVT::i8)); SDValue RetVal = DAG.getNode(ISD::AND, DL, MVT::i16, DAG.getNode(ISD::ADD, DL, MVT::i16, DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), - DAG.getConstant(1, DL, MVT::i16)), - DAG.getConstant(3, DL, MVT::i16)); + DAG.getConstant(1, MVT::i16)), + DAG.getConstant(3, MVT::i16)); return DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); @@ -15891,15 +15869,14 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { // If src is zero (i.e. bsr sets ZF), returns NumBits. SDValue Ops[] = { Op, - DAG.getConstant(NumBits + NumBits - 1, dl, OpVT), - DAG.getConstant(X86::COND_E, dl, MVT::i8), + DAG.getConstant(NumBits+NumBits-1, OpVT), + DAG.getConstant(X86::COND_E, MVT::i8), Op.getValue(1) }; Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops); // Finally xor with NumBits-1. - Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, - DAG.getConstant(NumBits - 1, dl, OpVT)); + Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); if (VT == MVT::i8) Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); @@ -15924,8 +15901,7 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); // And xor with NumBits-1. - Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, - DAG.getConstant(NumBits - 1, dl, OpVT)); + Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); if (VT == MVT::i8) Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); @@ -15945,8 +15921,8 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { // If src is zero (i.e. bsf sets ZF), returns NumBits. SDValue Ops[] = { Op, - DAG.getConstant(NumBits, dl, VT), - DAG.getConstant(X86::COND_E, dl, MVT::i8), + DAG.getConstant(NumBits, VT), + DAG.getConstant(X86::COND_E, MVT::i8), Op.getValue(1) }; return DAG.getNode(X86ISD::CMOV, dl, VT, Ops); @@ -16013,8 +15989,8 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, if (Subtarget->hasInt256()) { if (VT == MVT::v32i8) { MVT SubVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() / 2); - SDValue Lo = DAG.getIntPtrConstant(0, dl); - SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl); + SDValue Lo = DAG.getIntPtrConstant(0); + SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2); SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Lo); SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Lo); SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Hi); @@ -16048,8 +16024,8 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); ALo = DAG.getNode(ISD::BITCAST, dl, ExVT, ALo); BLo = DAG.getNode(ISD::BITCAST, dl, ExVT, BLo); - ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT)); - BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT)); + ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, ExVT)); + BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, ExVT)); } // Extract the hi parts and sign extend to i16 @@ -16068,15 +16044,15 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); AHi = DAG.getNode(ISD::BITCAST, dl, ExVT, AHi); BHi = DAG.getNode(ISD::BITCAST, dl, ExVT, BHi); - AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT)); - BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT)); + AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, ExVT)); + BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, ExVT)); } // Multiply, mask the lower 8bits of the lo/hi results and pack SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo); SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi); - RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT)); - RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT)); + RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, ExVT)); + RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, ExVT)); return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); } @@ -16252,8 +16228,7 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget, // unsigned multiply. if (IsSigned && !Subtarget->hasSSE41()) { SDValue ShAmt = - DAG.getConstant(31, dl, - DAG.getTargetLoweringInfo().getShiftAmountTy(VT)); + DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT)); SDValue T1 = DAG.getNode(ISD::AND, dl, VT, DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1); SDValue T2 = DAG.getNode(ISD::AND, dl, VT, @@ -16308,7 +16283,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); // Zero out the rightmost bits. SmallVector<SDValue, 32> V( - NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, MVT::i8)); + NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), MVT::i8)); return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V)); } @@ -16319,7 +16294,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); // Zero out the leftmost bits. SmallVector<SDValue, 32> V( - NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, MVT::i8)); + NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, MVT::i8)); return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V)); } @@ -16333,8 +16308,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, // R s>> a === ((R u>> a) ^ m) - m SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); SmallVector<SDValue, 32> V(NumElts, - DAG.getConstant(128 >> ShiftAmt, dl, - MVT::i8)); + DAG.getConstant(128 >> ShiftAmt, MVT::i8)); SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V); Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); @@ -16439,7 +16413,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, if (!BaseShAmt) // Avoid introducing an extract element from a shuffle. BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec, - DAG.getIntPtrConstant(SplatIdx, dl)); + DAG.getIntPtrConstant(SplatIdx)); } } @@ -16597,7 +16571,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget, Elts.push_back(DAG.getUNDEF(SVT)); continue; } - Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT)); + Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT)); } SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts); return DAG.getNode(ISD::MUL, dl, VT, R, BV); @@ -16605,10 +16579,9 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget, // Lower SHL with variable shift amount. if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { - Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT)); + Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT)); - Op = DAG.getNode(ISD::ADD, dl, VT, Op, - DAG.getConstant(0x3f800000U, dl, VT)); + Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT)); Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); return DAG.getNode(ISD::MUL, dl, VT, Op, R); @@ -16672,10 +16645,10 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget, // Replace this node with two shifts followed by a MOVSS/MOVSD. EVT CastVT = MVT::v4i32; SDValue Splat1 = - DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT); + DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT); SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1); SDValue Splat2 = - DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT); + DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT); SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2); if (TargetOpcode == X86ISD::MOVSD) CastVT = MVT::v2i64; @@ -16691,16 +16664,16 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget, assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); // a = a << 5; - Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, dl, VT)); + Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT)); Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); // Turn 'a' into a mask suitable for VSELECT - SDValue VSelM = DAG.getConstant(0x80, dl, VT); + SDValue VSelM = DAG.getConstant(0x80, VT); SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); - SDValue CM1 = DAG.getConstant(0x0f, dl, VT); - SDValue CM2 = DAG.getConstant(0x3f, dl, VT); + SDValue CM1 = DAG.getConstant(0x0f, VT); + SDValue CM2 = DAG.getConstant(0x3f, VT); // r = VSELECT(r, psllw(r & (char16)15, 4), a); SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); @@ -16841,7 +16814,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, - DAG.getConstant(X86::COND_O, DL, MVT::i32), + DAG.getConstant(X86::COND_O, MVT::i32), SDValue(Sum.getNode(), 2)); return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); @@ -16854,7 +16827,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), - DAG.getConstant(Cond, DL, MVT::i32), + DAG.getConstant(Cond, MVT::i32), SDValue(Sum.getNode(), 1)); return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); @@ -17006,13 +16979,13 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); SDValue Chain = Op.getOperand(0); - SDValue Zero = DAG.getConstant(0, dl, MVT::i32); + SDValue Zero = DAG.getConstant(0, MVT::i32); SDValue Ops[] = { - DAG.getRegister(X86::ESP, MVT::i32), // Base - DAG.getTargetConstant(1, dl, MVT::i8), // Scale - DAG.getRegister(0, MVT::i32), // Index - DAG.getTargetConstant(0, dl, MVT::i32), // Disp - DAG.getRegister(0, MVT::i32), // Segment. + DAG.getRegister(X86::ESP, MVT::i32), // Base + DAG.getTargetConstant(1, MVT::i8), // Scale + DAG.getRegister(0, MVT::i32), // Index + DAG.getTargetConstant(0, MVT::i32), // Disp + DAG.getRegister(0, MVT::i32), // Segment. Zero, Chain }; @@ -17045,7 +17018,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, SDValue Ops[] = { cpIn.getValue(0), Op.getOperand(1), Op.getOperand(3), - DAG.getTargetConstant(size, DL, MVT::i8), + DAG.getTargetConstant(size, MVT::i8), cpIn.getValue(1) }; SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); @@ -17057,8 +17030,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS, MVT::i32, cpOut.getValue(2)); SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1), - DAG.getConstant(X86::COND_E, DL, MVT::i8), - EFLAGS); + DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS); DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut); DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); @@ -17087,7 +17059,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget, SmallVector<SDValue, 16> Elts; for (unsigned i = 0, e = NumElts; i != e; ++i) Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec, - DAG.getIntPtrConstant(i, dl))); + DAG.getIntPtrConstant(i))); // Explicitly mark the extra elements as Undef. Elts.append(NumElts, DAG.getUNDEF(SVT)); @@ -17096,7 +17068,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget, SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts); SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64, - DAG.getIntPtrConstant(0, dl)); + DAG.getIntPtrConstant(0)); } assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && @@ -17151,15 +17123,12 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget, bool NeedsBitcast = EltVT == MVT::i32; MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64; - SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, - EltVT); - SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, - EltVT); - SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, - EltVT); + SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT); + SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT); + SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT); // v = v - ((v >> 1) & 0x55555555...) - SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, dl, EltVT)); + SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT)); SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones); SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV); if (NeedsBitcast) @@ -17178,7 +17147,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget, // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) SmallVector<SDValue, 8> Mask33(NumElts, Cst33); SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33); - SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, dl, EltVT)); + SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT)); SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos); Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV); @@ -17197,7 +17166,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget, SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS); // v = (v + (v >> 4)) & 0x0F0F0F0F... - SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, dl, EltVT)); + SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT)); SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours); Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV); Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl); @@ -17230,7 +17199,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget, Add = And; SmallVector<SDValue, 8> Csts; for (unsigned i = 8; i <= Len/2; i *= 2) { - Csts.assign(NumElts, DAG.getConstant(i, dl, EltVT)); + Csts.assign(NumElts, DAG.getConstant(i, EltVT)); SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts); Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV); Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl); @@ -17238,8 +17207,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget, } // The result is on the least significant 6-bits on i32 and 7-bits on i64. - SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), dl, - EltVT); + SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT); SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F); SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV); if (NeedsBitcast) { @@ -17258,7 +17226,7 @@ static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { SDLoc dl(Node); EVT T = Node->getValueType(0); SDValue negOp = DAG.getNode(ISD::SUB, dl, T, - DAG.getConstant(0, dl, T), Node->getOperand(2)); + DAG.getConstant(0, T), Node->getOperand(2)); return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, cast<AtomicSDNode>(Node)->getMemoryVT(), Node->getOperand(0), @@ -17364,9 +17332,9 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget, // Returned in bits 0:31 and 32:64 xmm0. SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, - CallResult.first, DAG.getIntPtrConstant(0, dl)); + CallResult.first, DAG.getIntPtrConstant(0)); SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, - CallResult.first, DAG.getIntPtrConstant(1, dl)); + CallResult.first, DAG.getIntPtrConstant(1)); SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal); } @@ -17533,7 +17501,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, return; SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, N->getOperand(0)); - SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, + SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), MVT::f64); SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, @@ -17576,9 +17544,9 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; SDValue cpInL, cpInH; cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), - DAG.getConstant(0, dl, HalfT)); + DAG.getConstant(0, HalfT)); cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), - DAG.getConstant(1, dl, HalfT)); + DAG.getConstant(1, HalfT)); cpInL = DAG.getCopyToReg(N->getOperand(0), dl, Regs64bit ? X86::RAX : X86::EAX, cpInL, SDValue()); @@ -17587,9 +17555,9 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, cpInH, cpInL.getValue(1)); SDValue swapInL, swapInH; swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), - DAG.getConstant(0, dl, HalfT)); + DAG.getConstant(0, HalfT)); swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), - DAG.getConstant(1, dl, HalfT)); + DAG.getConstant(1, HalfT)); swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RBX : X86::EBX, swapInL, cpInH.getValue(1)); @@ -17616,7 +17584,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, MVT::i32, cpOutH.getValue(2)); SDValue Success = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, - DAG.getConstant(X86::COND_E, dl, MVT::i8), EFLAGS); + DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS); Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1)); Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF)); @@ -17666,7 +17634,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, SmallVector<SDValue, 8> Elts; for (unsigned i = 0, e = NumElts; i != e; ++i) Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, - ToVecInt, DAG.getIntPtrConstant(i, dl))); + ToVecInt, DAG.getIntPtrConstant(i))); Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts)); } @@ -19877,7 +19845,7 @@ static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask, int M = Mask[i / Ratio] != SM_SentinelZero ? Ratio * Mask[i / Ratio] + i % Ratio : 255; - PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8)); + PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8)); } MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes); Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Input); @@ -20183,7 +20151,7 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask, for (int &M : Mask) M = VMask[M]; V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0), - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); // Rebuild the chain around this new shuffle. while (!Chain.empty()) { @@ -20270,7 +20238,7 @@ static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask, for (int &M : Mask) M = VMask[M]; V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0), - getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); + getV4X86ShuffleImm8ForMask(Mask, DAG)); // Check that the shuffles didn't cancel each other out. If not, we need to // combine to the new one. @@ -20329,7 +20297,7 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG, V = DAG.getNode(ISD::BITCAST, DL, DVT, V); DCI.AddToWorklist(V.getNode()); V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V, - getV4X86ShuffleImm8ForMask(DMask, DL, DAG)); + getV4X86ShuffleImm8ForMask(DMask, DAG)); DCI.AddToWorklist(V.getNode()); return DAG.getNode(ISD::BITCAST, DL, VT, V); } @@ -20759,11 +20727,11 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector); EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy(); SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst, - DAG.getConstant(0, dl, VecIdxTy)); + DAG.getConstant(0, VecIdxTy)); SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst, - DAG.getConstant(1, dl, VecIdxTy)); + DAG.getConstant(1, VecIdxTy)); - SDValue ShAmt = DAG.getConstant(32, dl, + SDValue ShAmt = DAG.getConstant(32, DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64)); Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf); Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, @@ -20783,7 +20751,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, // Replace each use (extract) with a load of the appropriate element. for (unsigned i = 0; i < 4; ++i) { uint64_t Offset = EltSize * i; - SDValue OffsetVal = DAG.getConstant(Offset, dl, TLI.getPointerTy()); + SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), StackPtr, OffsetVal); @@ -21144,21 +21112,21 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, TrueC->getAPIntValue().isPowerOf2()) { if (NeedsCondInvert) // Invert the condition if needed. Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, - DAG.getConstant(1, DL, Cond.getValueType())); + DAG.getConstant(1, Cond.getValueType())); // Zero extend the condition if needed. Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); unsigned ShAmt = TrueC->getAPIntValue().logBase2(); return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, - DAG.getConstant(ShAmt, DL, MVT::i8)); + DAG.getConstant(ShAmt, MVT::i8)); } // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { if (NeedsCondInvert) // Invert the condition if needed. Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, - DAG.getConstant(1, DL, Cond.getValueType())); + DAG.getConstant(1, Cond.getValueType())); // Zero extend the condition if needed. Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, @@ -21193,7 +21161,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); if (NeedsCondInvert) // Invert the condition if needed. Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, - DAG.getConstant(1, DL, Cond.getValueType())); + DAG.getConstant(1, Cond.getValueType())); // Zero extend the condition if needed. Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), @@ -21201,8 +21169,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // Scale the condition by the difference. if (Diff != 1) Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, - DAG.getConstant(Diff, DL, - Cond.getValueType())); + DAG.getConstant(Diff, Cond.getValueType())); // Add the base if non-zero. if (FalseC->getAPIntValue() != 0) @@ -21290,7 +21257,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, (-OpRHSConst->getAPIntValue() - 1)) return DAG.getNode( X86ISD::SUBUS, DL, VT, OpLHS, - DAG.getConstant(-OpRHSConst->getAPIntValue(), DL, VT)); + DAG.getConstant(-OpRHSConst->getAPIntValue(), VT)); // Another special case: If C was a sign bit, the sub has been // canonicalized into a xor. @@ -21304,7 +21271,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // don't rely on particular values of undef lanes. return DAG.getNode( X86ISD::SUBUS, DL, VT, OpLHS, - DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT)); + DAG.getConstant(OpRHSConst->getAPIntValue(), VT)); } } } @@ -21698,7 +21665,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, // Extra check as FCMOV only supports a subset of X86 cond. (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { SDValue Ops[] = { FalseOp, TrueOp, - DAG.getConstant(CC, DL, MVT::i8), Flags }; + DAG.getConstant(CC, MVT::i8), Flags }; return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops); } @@ -21720,14 +21687,14 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, // shift amount. if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, - DAG.getConstant(CC, DL, MVT::i8), Cond); + DAG.getConstant(CC, MVT::i8), Cond); // Zero extend the condition if needed. Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); unsigned ShAmt = TrueC->getAPIntValue().logBase2(); Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, - DAG.getConstant(ShAmt, DL, MVT::i8)); + DAG.getConstant(ShAmt, MVT::i8)); if (N->getNumValues() == 2) // Dead flag value? return DCI.CombineTo(N, Cond, SDValue()); return Cond; @@ -21737,7 +21704,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, // for any integer data type, including i8/i16. if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, - DAG.getConstant(CC, DL, MVT::i8), Cond); + DAG.getConstant(CC, MVT::i8), Cond); // Zero extend the condition if needed. Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, @@ -21775,14 +21742,14 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, if (isFastMultiplier) { APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, - DAG.getConstant(CC, DL, MVT::i8), Cond); + DAG.getConstant(CC, MVT::i8), Cond); // Zero extend the condition if needed. Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), Cond); // Scale the condition by the difference. if (Diff != 1) Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, - DAG.getConstant(Diff, DL, Cond.getValueType())); + DAG.getConstant(Diff, Cond.getValueType())); // Add the base if non-zero. if (FalseC->getAPIntValue() != 0) @@ -21828,7 +21795,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, if (CC == X86::COND_E && CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { SDValue Ops[] = { FalseOp, Cond.getOperand(0), - DAG.getConstant(CC, DL, MVT::i8), Cond }; + DAG.getConstant(CC, MVT::i8), Cond }; return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops); } } @@ -21862,10 +21829,10 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, CC1 = X86::GetOppositeBranchCondition(CC1); } - SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8), + SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, MVT::i8), Flags}; SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), LOps); - SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags}; + SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, MVT::i8), Flags}; SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops); DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(CMOV.getNode(), 1)); return CMOV; @@ -21963,9 +21930,8 @@ static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, // Replace this packed shift intrinsic with a target independent // shift dag node. - SDLoc DL(N); - SDValue Splat = DAG.getConstant(C, DL, VT); - return DAG.getNode(ISD::SRA, DL, VT, Op0, Splat); + SDValue Splat = DAG.getConstant(C, VT); + return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat); } } } @@ -22015,17 +21981,17 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, SDValue NewMul; if (isPowerOf2_64(MulAmt1)) NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), - DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8)); + DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); else NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), - DAG.getConstant(MulAmt1, DL, VT)); + DAG.getConstant(MulAmt1, VT)); if (isPowerOf2_64(MulAmt2)) NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, - DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8)); + DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); else NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, - DAG.getConstant(MulAmt2, DL, VT)); + DAG.getConstant(MulAmt2, VT)); // Do not add new nodes to DAG combiner worklist. DCI.CombineTo(N, NewMul, false); @@ -22052,11 +22018,9 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); APInt ShAmt = N1C->getAPIntValue(); Mask = Mask.shl(ShAmt); - if (Mask != 0) { - SDLoc DL(N); - return DAG.getNode(ISD::AND, DL, VT, - N00, DAG.getConstant(Mask, DL, VT)); - } + if (Mask != 0) + return DAG.getNode(ISD::AND, SDLoc(N), VT, + N00, DAG.getConstant(Mask, VT)); } } @@ -22186,8 +22150,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; if (Subtarget->hasAVX512()) { SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00, - CMP01, - DAG.getConstant(x86cc, DL, MVT::i8)); + CMP01, DAG.getConstant(x86cc, MVT::i8)); if (N->getValueType(0) != MVT::i1) return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), FSetCC); @@ -22195,8 +22158,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, } SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00, CMP01, - DAG.getConstant(x86cc, DL, - MVT::i8)); + DAG.getConstant(x86cc, MVT::i8)); bool is64BitFP = (CMP00.getValueType() == MVT::f64); MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32; @@ -22212,16 +22174,14 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, Vector64); OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, - Vector32, DAG.getIntPtrConstant(0, DL)); + Vector32, DAG.getIntPtrConstant(0)); IntVT = MVT::i32; } - SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, - OnesOrZeroesF); + SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF); SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI, - DAG.getConstant(1, DL, IntVT)); - SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, - ANDed); + DAG.getConstant(1, IntVT)); + SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); return OneBitOfTruth; } } @@ -22333,7 +22293,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, APInt Mask = APInt::getAllOnesValue(InBits); Mask = Mask.zext(VT.getScalarType().getSizeInBits()); return DAG.getNode(ISD::AND, DL, VT, - Op, DAG.getConstant(Mask, DL, VT)); + Op, DAG.getConstant(Mask, VT)); } case ISD::SIGN_EXTEND: return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, @@ -22429,8 +22389,8 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG, Mask.push_back(i / ZextRatio); SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL, - Shuffle->getOperand(0), DAG.getConstant(0, DL, SrcType), Mask); - return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle); + Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask); + return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle); } static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, @@ -22465,8 +22425,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, uint64_t MaskSize = countPopulation(Mask); if (Shift + MaskSize <= VT.getSizeInBits()) return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0), - DAG.getConstant(Shift | (MaskSize << 8), DL, - VT)); + DAG.getConstant(Shift | (MaskSize << 8), VT)); } } } // BEXTR @@ -22681,10 +22640,10 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { // Generate SUB & CMOV. SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), - DAG.getConstant(0, DL, VT), N0.getOperand(0)); + DAG.getConstant(0, VT), N0.getOperand(0)); SDValue Ops[] = { N0.getOperand(0), Neg, - DAG.getConstant(X86::COND_GE, DL, MVT::i8), + DAG.getConstant(X86::COND_GE, MVT::i8), SDValue(Neg.getNode(), 1) }; return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops); } @@ -22729,7 +22688,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); SDValue Ptr = Ld->getBasePtr(); - SDValue Increment = DAG.getConstant(16, dl, TLI.getPointerTy()); + SDValue Increment = DAG.getConstant(16, TLI.getPointerTy()); EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), NumElems/2); @@ -22808,7 +22767,7 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG, for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i) ShuffleVec[i] = NumElems*SizeRatio; NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask, - DAG.getConstant(0, dl, WideVecVT), + DAG.getConstant(0, WideVecVT), &ShuffleVec[0]); } else { @@ -22820,7 +22779,7 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG, unsigned NumConcat = WidenNumElts / MaskNumElts; SmallVector<SDValue, 16> Ops(NumConcat); - SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType()); + SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType()); Ops[0] = Mask; for (unsigned i = 1; i != NumConcat; ++i) Ops[i] = ZeroVal; @@ -22892,7 +22851,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG, for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i) ShuffleVec[i] = NumElems*SizeRatio; NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask, - DAG.getConstant(0, dl, WideVecVT), + DAG.getConstant(0, WideVecVT), &ShuffleVec[0]); } else { @@ -22904,7 +22863,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG, unsigned NumConcat = WidenNumElts / MaskNumElts; SmallVector<SDValue, 16> Ops(NumConcat); - SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType()); + SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType()); Ops[0] = Mask; for (unsigned i = 1; i != NumConcat; ++i) Ops[i] = ZeroVal; @@ -22938,7 +22897,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl); SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl); - SDValue Stride = DAG.getConstant(16, dl, TLI.getPointerTy()); + SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); SDValue Ptr0 = St->getBasePtr(); SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); @@ -23011,7 +22970,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); SmallVector<SDValue, 8> Chains; - SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, dl, + SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, TLI.getPointerTy()); SDValue Ptr = St->getBasePtr(); @@ -23019,7 +22978,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StoreType, ShuffWide, - DAG.getIntPtrConstant(i, dl)); + DAG.getIntPtrConstant(i)); SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(), St->isVolatile(), St->isNonTemporal(), St->getAlignment()); @@ -23103,7 +23062,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, // Otherwise, lower to two pairs of 32-bit loads / stores. SDValue LoAddr = Ld->getBasePtr(); SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, - DAG.getConstant(4, LdDL, MVT::i32)); + DAG.getConstant(4, MVT::i32)); SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, Ld->getPointerInfo(), @@ -23124,7 +23083,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, LoAddr = St->getBasePtr(); HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, - DAG.getConstant(4, StDL, MVT::i32)); + DAG.getConstant(4, MVT::i32)); SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, St->getPointerInfo(), @@ -23548,7 +23507,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, return DAG.getNode(ISD::AND, dl, VT, DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N00.getOperand(0), N00.getOperand(1)), - DAG.getConstant(1, dl, VT)); + DAG.getConstant(1, VT)); } } @@ -23560,7 +23519,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, return DAG.getNode(ISD::AND, dl, VT, DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N00.getOperand(0), N00.getOperand(1)), - DAG.getConstant(1, dl, VT)); + DAG.getConstant(1, VT)); } } if (VT.is256BitVector()) { @@ -23599,18 +23558,18 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG, if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { - SDValue addV = DAG.getNode(ISD::ADD, DL, LHS.getValueType(), RHS, + SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), LHS.getValueType(), RHS, LHS.getOperand(1)); - return DAG.getSetCC(DL, N->getValueType(0), addV, - DAG.getConstant(0, DL, addV.getValueType()), CC); + return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV, + DAG.getConstant(0, addV.getValueType()), CC); } if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { - SDValue addV = DAG.getNode(ISD::ADD, DL, RHS.getValueType(), LHS, + SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), RHS.getValueType(), LHS, RHS.getOperand(1)); - return DAG.getSetCC(DL, N->getValueType(0), addV, - DAG.getConstant(0, DL, addV.getValueType()), CC); + return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV, + DAG.getConstant(0, addV.getValueType()), CC); } if (VT.getScalarType() == MVT::i1 && @@ -23634,9 +23593,9 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG, assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type"); if (CC == ISD::SETGT) - return DAG.getConstant(0, DL, VT); + return DAG.getConstant(0, VT); if (CC == ISD::SETLE) - return DAG.getConstant(1, DL, VT); + return DAG.getConstant(1, VT); if (CC == ISD::SETEQ || CC == ISD::SETGE) return DAG.getNOT(DL, LHS.getOperand(0), VT); @@ -23657,8 +23616,7 @@ static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index, SDValue Addr = Load->getOperand(1); SDValue NewAddr = DAG.getNode( ISD::ADD, dl, Addr.getSimpleValueType(), Addr, - DAG.getConstant(Index * EVT.getStoreSize(), dl, - Addr.getSimpleValueType())); + DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType())); SDValue NewLoad = DAG.getLoad(EVT, dl, Load->getChain(), NewAddr, @@ -23714,7 +23672,7 @@ static SDValue PerformBLENDICombine(SDNode *N, SelectionDAG &DAG) { if (VT == MVT::v2f64) if (auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(2))) if (Mask->getZExtValue() == 2 && !isShuffleFoldableLoad(V0)) { - SDValue NewMask = DAG.getConstant(1, DL, MVT::i8); + SDValue NewMask = DAG.getConstant(1, MVT::i8); return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask); } @@ -23729,14 +23687,12 @@ static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG, if (VT == MVT::i8) return DAG.getNode(ISD::AND, DL, VT, DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, - DAG.getConstant(X86::COND_B, DL, MVT::i8), - EFLAGS), - DAG.getConstant(1, DL, VT)); + DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), + DAG.getConstant(1, VT)); assert (VT == MVT::i1 && "Unexpected type for SECCC node"); return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, - DAG.getConstant(X86::COND_B, DL, MVT::i8), - EFLAGS)); + DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS)); } // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT @@ -23775,7 +23731,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, Flags = checkBoolTestSetCCCombine(EFLAGS, CC); if (Flags.getNode()) { - SDValue Cond = DAG.getConstant(CC, DL, MVT::i8); + SDValue Cond = DAG.getConstant(CC, MVT::i8); return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); } @@ -23797,7 +23753,7 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, Flags = checkBoolTestSetCCCombine(EFLAGS, CC); if (Flags.getNode()) { - SDValue Cond = DAG.getConstant(CC, DL, MVT::i8); + SDValue Cond = DAG.getConstant(CC, MVT::i8); return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, Flags); } @@ -23901,13 +23857,12 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, SDValue(N, 1).use_empty()) { SDLoc DL(N); EVT VT = N->getValueType(0); - SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1)); + SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, - DAG.getConstant(X86::COND_B, DL, - MVT::i8), + DAG.getConstant(X86::COND_B,MVT::i8), N->getOperand(2)), - DAG.getConstant(1, DL, VT)); + DAG.getConstant(1, VT)); return DCI.CombineTo(N, Res1, CarryOut); } @@ -23942,17 +23897,16 @@ static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { SDValue CmpOp0 = Cmp.getOperand(0); SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, - DAG.getConstant(1, DL, CmpOp0.getValueType())); + DAG.getConstant(1, CmpOp0.getValueType())); SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); if (CC == X86::COND_NE) return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, DL, OtherVal.getValueType(), OtherVal, - DAG.getConstant(-1ULL, DL, OtherVal.getValueType()), - NewCmp); + DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, DL, OtherVal.getValueType(), OtherVal, - DAG.getConstant(0, DL, OtherVal.getValueType()), NewCmp); + DAG.getConstant(0, OtherVal.getValueType()), NewCmp); } /// PerformADDCombine - Do target-specific dag combines on integer adds. @@ -23988,9 +23942,9 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, EVT VT = Op0.getValueType(); SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0), - DAG.getConstant(~XorC, SDLoc(Op1), VT)); + DAG.getConstant(~XorC, VT)); return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, - DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT)); + DAG.getConstant(C->getAPIntValue()+1, VT)); } } @@ -24060,7 +24014,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(), OrigVT.getVectorNumElements() / Ratio); OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV, - DAG.getIntPtrConstant(0, DL)); + DAG.getIntPtrConstant(0)); } Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV); return DAG.getNode(X86ISD::VZEXT, DL, VT, Op); @@ -24541,8 +24495,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'I': if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (C->getZExtValue() <= 31) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24550,8 +24503,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'J': if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (C->getZExtValue() <= 63) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24559,8 +24511,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'K': if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (isInt<8>(C->getSExtValue())) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24569,8 +24520,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff || (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) { - Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType()); break; } } @@ -24578,8 +24528,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'M': if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (C->getZExtValue() <= 3) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24587,8 +24536,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'N': if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (C->getZExtValue() <= 255) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24596,8 +24544,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, case 'O': if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (C->getZExtValue() <= 127) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24608,7 +24555,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), C->getSExtValue())) { // Widen to 64 bits here to get it sign extended. - Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64); + Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); break; } // FIXME gcc accepts some relocatable values here too, but only in certain @@ -24621,8 +24568,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), C->getZExtValue())) { - Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), - Op.getValueType()); + Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } } @@ -24634,7 +24580,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, // Literal immediates are always ok. if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { // Widen to 64 bits here to get it sign extended. - Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64); + Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); break; } |