summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp139
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp484
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrFormats.td13
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td30
-rw-r--r--llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp304
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp474
-rw-r--r--llvm/lib/Target/ARM/ARMInstrInfo.td21
-rw-r--r--llvm/lib/Target/ARM/ARMInstrNEON.td21
-rw-r--r--llvm/lib/Target/ARM/ARMInstrThumb.td9
-rw-r--r--llvm/lib/Target/ARM/ARMInstrThumb2.td8
-rw-r--r--llvm/lib/Target/ARM/ARMInstrVFP.td4
-rw-r--r--llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp8
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp98
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp77
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.td8
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td28
-rw-r--r--llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp10
-rw-r--r--llvm/lib/Target/MSP430/MSP430ISelLowering.cpp30
-rw-r--r--llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp9
-rw-r--r--llvm/lib/Target/Mips/MipsISelDAGToDAG.h2
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp140
-rw-r--r--llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp46
-rw-r--r--llvm/lib/Target/Mips/MipsSEISelLowering.cpp66
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp130
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h4
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp169
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXVector.td8
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp218
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp318
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstr64Bit.td9
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrAltivec.td12
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrHTM.td2
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.td15
-rw-r--r--llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp200
-rw-r--r--llvm/lib/Target/R600/AMDGPUISelLowering.cpp208
-rw-r--r--llvm/lib/Target/R600/AMDGPUInstructions.td2
-rw-r--r--llvm/lib/Target/R600/R600ISelLowering.cpp202
-rw-r--r--llvm/lib/Target/R600/R600ISelLowering.h3
-rw-r--r--llvm/lib/Target/R600/SIISelLowering.cpp90
-rw-r--r--llvm/lib/Target/R600/SIInstrInfo.td24
-rw-r--r--llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp10
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp133
-rw-r--r--llvm/lib/Target/Sparc/SparcInstr64Bit.td11
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.td5
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp42
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp163
-rw-r--r--llvm/lib/Target/SystemZ/SystemZOperands.td33
-rw-r--r--llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp42
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp96
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp974
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td4
-rw-r--r--llvm/lib/Target/X86/X86InstrFragmentsSIMD.td12
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.td2
-rw-r--r--llvm/lib/Target/X86/X86InstrShiftRotate.td4
-rw-r--r--llvm/lib/Target/X86/X86SelectionDAGInfo.cpp25
-rw-r--r--llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp13
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp115
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.td11
59 files changed, 2837 insertions, 2509 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 610e7cf63c9..78a2021f79a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -252,8 +252,9 @@ bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
return false;
unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
- Val = CurDAG->getTargetConstant(Immed, MVT::i32);
- Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
+ SDLoc dl(N);
+ Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
+ Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
return true;
}
@@ -286,7 +287,8 @@ bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
return false;
Immed &= 0xFFFFFFULL;
- return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
+ return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
+ Shift);
}
/// getShiftTypeForNode - Translate a shift node to the corresponding
@@ -334,7 +336,7 @@ bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
Reg = N.getOperand(0);
- Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
+ Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
return isWorthFolding(N);
}
@@ -435,6 +437,7 @@ static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
/// is a lane in the upper half of a 128-bit vector. Recognize and select this
/// so that we don't emit unnecessary lane extracts.
SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
+ SDLoc dl(N);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
@@ -451,7 +454,7 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
return nullptr;
}
- SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
+ SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
@@ -474,10 +477,11 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
break;
}
- return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
+ return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
}
SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
+ SDLoc dl(N);
SDValue SMULLOp0;
SDValue SMULLOp1;
int LaneIdx;
@@ -486,7 +490,7 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
LaneIdx))
return nullptr;
- SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
+ SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
@@ -517,7 +521,7 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
} else
llvm_unreachable("Unrecognized intrinsic.");
- return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
+ return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
}
/// Instructions that accept extend modifiers like UXTW expect the register
@@ -528,9 +532,10 @@ static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
if (N.getValueType() == MVT::i32)
return N;
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDLoc dl(N);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
- SDLoc(N), MVT::i32, N, SubReg);
+ dl, MVT::i32, N, SubReg);
return SDValue(Node, 0);
}
@@ -570,7 +575,8 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
// (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
Reg = narrowIfNeeded(CurDAG, Reg);
- Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
+ Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
+ MVT::i32);
return isWorthFolding(N);
}
@@ -600,11 +606,12 @@ static bool isWorthFoldingADDlow(SDValue N) {
/// reference, which determines the scale.
bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
SDValue &Base, SDValue &OffImm) {
+ SDLoc dl(N);
const TargetLowering *TLI = getTargetLowering();
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
return true;
}
@@ -637,7 +644,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
return true;
}
}
@@ -653,7 +660,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
// add x0, Xbase, #offset
// ldr x0, [x0]
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
return true;
}
@@ -680,7 +687,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
const TargetLowering *TLI = getTargetLowering();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
return true;
}
}
@@ -688,12 +695,12 @@ bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
}
static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDLoc dl(N);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
SDValue ImpDef = SDValue(
- CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
- 0);
+ CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
MachineSDNode *Node = CurDAG->getMachineNode(
- TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
+ TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
return SDValue(Node, 0);
}
@@ -707,6 +714,7 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
return false;
+ SDLoc dl(N);
if (WantExtend) {
AArch64_AM::ShiftExtendType Ext =
getExtendTypeForNode(N.getOperand(0), true);
@@ -714,10 +722,11 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
return false;
Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
- SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
+ MVT::i32);
} else {
Offset = N.getOperand(0);
- SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
}
unsigned LegalShiftVal = Log2_32(Size);
@@ -740,6 +749,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
return false;
SDValue LHS = N.getOperand(0);
SDValue RHS = N.getOperand(1);
+ SDLoc dl(N);
// We don't want to match immediate adds here, because they are better lowered
// to the register-immediate addressing modes.
@@ -762,7 +772,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
Base = LHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
return true;
}
@@ -770,12 +780,12 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
Base = RHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
return true;
}
// There was no shift, whatever else we find.
- DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
// Try to match an unshifted extend on the LHS.
@@ -784,7 +794,8 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
AArch64_AM::InvalidShiftExtend) {
Base = RHS;
Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
- SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
+ MVT::i32);
if (isWorthFolding(LHS))
return true;
}
@@ -795,7 +806,8 @@ bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
AArch64_AM::InvalidShiftExtend) {
Base = LHS;
Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
- SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
+ MVT::i32);
if (isWorthFolding(RHS))
return true;
}
@@ -826,6 +838,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
return false;
SDValue LHS = N.getOperand(0);
SDValue RHS = N.getOperand(1);
+ SDLoc DL(N);
// Check if this particular node is reused in any non-memory related
// operation. If yes, do not try to fold this node into the address
@@ -857,7 +870,6 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
return false;
- SDLoc DL(N.getNode());
SDValue Ops[] = { RHS };
SDNode *MOVI =
CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
@@ -873,7 +885,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
Base = LHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
return true;
}
@@ -881,15 +893,15 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
Base = RHS;
- DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
return true;
}
// Match any non-shifted, non-extend, non-immediate add expression.
Base = LHS;
Offset = RHS;
- SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
- DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+ SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
// Reg1 + Reg2 is free: no check needed.
return true;
}
@@ -922,18 +934,18 @@ SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
assert(Regs.size() >= 2 && Regs.size() <= 4);
- SDLoc DL(Regs[0].getNode());
+ SDLoc DL(Regs[0]);
SmallVector<SDValue, 4> Ops;
// First operand of REG_SEQUENCE is the desired RegClass.
Ops.push_back(
- CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
+ CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
// Then we get pairs of source & subregister-position for the components.
for (unsigned i = 0; i < Regs.size(); ++i) {
Ops.push_back(Regs[i]);
- Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
}
SDNode *N =
@@ -1030,19 +1042,21 @@ SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
SDValue Base = LD->getBasePtr();
ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
int OffsetVal = (int)OffsetOp->getZExtValue();
- SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
+ SDLoc dl(N);
+ SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
SDValue Ops[] = { Base, Offset, Chain };
- SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
+ SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
MVT::Other, Ops);
// Either way, we're replacing the node, so tell the caller that.
Done = true;
SDValue LoadedVal = SDValue(Res, 1);
if (InsertTo64) {
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
LoadedVal =
SDValue(CurDAG->getMachineNode(
- AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
+ AArch64::SUBREG_TO_REG, dl, MVT::i64,
+ CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
+ SubReg),
0);
}
@@ -1198,7 +1212,7 @@ SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
N->getOperand(NumVecs + 3), N->getOperand(0)};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0);
@@ -1240,7 +1254,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
SDValue Ops[] = {RegSeq,
- CurDAG->getTargetConstant(LaneNo, MVT::i64), // Lane Number
+ CurDAG->getTargetConstant(LaneNo, dl,
+ MVT::i64), // Lane Number
N->getOperand(NumVecs + 2), // Base register
N->getOperand(NumVecs + 3), // Incremental
N->getOperand(0)};
@@ -1291,7 +1306,7 @@ SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
- SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
N->getOperand(NumVecs + 3), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
@@ -1324,7 +1339,7 @@ SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
- SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
+ SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
N->getOperand(NumVecs + 2), // Base Register
N->getOperand(NumVecs + 3), // Incremental
N->getOperand(0)};
@@ -1590,23 +1605,24 @@ SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
return nullptr;
EVT VT = N->getValueType(0);
+ SDLoc dl(N);
// If the bit extract operation is 64bit but the original type is 32bit, we
// need to add one EXTRACT_SUBREG.
if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
- SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
- CurDAG->getTargetConstant(MSB, MVT::i64)};
+ SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, MVT::i64),
+ CurDAG->getTargetConstant(MSB, dl, MVT::i64)};
- SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
- SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
+ SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
+ SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
MachineSDNode *Node =
- CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
+ CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
SDValue(BFM, 0), SubReg);
return Node;
}
- SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
- CurDAG->getTargetConstant(MSB, VT)};
+ SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, VT),
+ CurDAG->getTargetConstant(MSB, dl, VT)};
return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
}
@@ -1810,6 +1826,7 @@ static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
return Op;
EVT VT = Op.getValueType();
+ SDLoc dl(Op);
unsigned BitWidth = VT.getSizeInBits();
unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
@@ -1817,16 +1834,16 @@ static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
if (ShlAmount > 0) {
// LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
ShiftNode = CurDAG->getMachineNode(
- UBFMOpc, SDLoc(Op), VT, Op,
- CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
- CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
+ UBFMOpc, dl, VT, Op,
+ CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
+ CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
} else {
// LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
assert(ShlAmount < 0 && "expected right shift");
int ShrAmount = -ShlAmount;
ShiftNode = CurDAG->getMachineNode(
- UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
- CurDAG->getTargetConstant(BitWidth - 1, VT));
+ UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
+ CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
}
return SDValue(ShiftNode, 0);
@@ -1991,10 +2008,11 @@ SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
return nullptr;
EVT VT = N->getValueType(0);
+ SDLoc dl(N);
SDValue Ops[] = { Opd0,
Opd1,
- CurDAG->getTargetConstant(LSB, VT),
- CurDAG->getTargetConstant(MSB, VT) };
+ CurDAG->getTargetConstant(LSB, dl, VT),
+ CurDAG->getTargetConstant(MSB, dl, VT) };
return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
}
@@ -2092,7 +2110,7 @@ AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
// finding FBits, but it must still be in range.
if (FBits == 0 || FBits > RegWidth) return false;
- FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
+ FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
return true;
}
@@ -2207,8 +2225,9 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
const TargetLowering *TLI = getTargetLowering();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
- CurDAG->getTargetConstant(Shifter, MVT::i32) };
+ SDLoc DL(Node);
+ SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
+ CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
}
case ISD::INTRINSIC_W_CHAIN: {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index cc60cc4592f..576a14d6314 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1180,7 +1180,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
isLegalArithImmed(C - 1ULL))) {
CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
- RHS = DAG.getConstant(C, VT);
+ RHS = DAG.getConstant(C, dl, VT);
}
break;
case ISD::SETULT:
@@ -1190,7 +1190,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
(VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
- RHS = DAG.getConstant(C, VT);
+ RHS = DAG.getConstant(C, dl, VT);
}
break;
case ISD::SETLE:
@@ -1201,7 +1201,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
isLegalArithImmed(C + 1ULL))) {
CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
- RHS = DAG.getConstant(C, VT);
+ RHS = DAG.getConstant(C, dl, VT);
}
break;
case ISD::SETULE:
@@ -1212,7 +1212,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
isLegalArithImmed(C + 1ULL))) {
CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
- RHS = DAG.getConstant(C, VT);
+ RHS = DAG.getConstant(C, dl, VT);
}
break;
}
@@ -1246,10 +1246,11 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
DAG.getValueType(MVT::i16));
Cmp = emitComparison(SExt,
- DAG.getConstant(ValueofRHS, RHS.getValueType()),
+ DAG.getConstant(ValueofRHS, dl,
+ RHS.getValueType()),
CC, dl, DAG);
AArch64CC = changeIntCCToAArch64CC(CC);
- AArch64cc = DAG.getConstant(AArch64CC, MVT::i32);
+ AArch64cc = DAG.getConstant(AArch64CC, dl, MVT::i32);
return Cmp;
}
}
@@ -1257,7 +1258,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
}
Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
AArch64CC = changeIntCCToAArch64CC(CC);
- AArch64cc = DAG.getConstant(AArch64CC, MVT::i32);
+ AArch64cc = DAG.getConstant(AArch64CC, dl, MVT::i32);
return Cmp;
}
@@ -1304,7 +1305,7 @@ getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS);
SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Mul,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, DL, MVT::i64));
// On AArch64 the upper 32 bits are always zero extended for a 32 bit
// operation. We need to clear out the upper 32 bits, because we used a
// widening multiply that wrote all 64 bits. In the end this should be a
@@ -1317,10 +1318,10 @@ getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
// check we have to arithmetic shift right the 32nd bit of the result by
// 31 bits. Then we compare the result to the upper 32 bits.
SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Add,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, DL, MVT::i64));
UpperBits = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, UpperBits);
SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i32, Value,
- DAG.getConstant(31, MVT::i64));
+ DAG.getConstant(31, DL, MVT::i64));
// It is important that LowerBits is last, otherwise the arithmetic
// shift will not be folded into the compare (SUBS).
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32);
@@ -1333,10 +1334,11 @@ getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
// pattern:
// (i64 AArch64ISD::SUBS i64 0, (i64 srl i64 %Mul, i64 32)
SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, DL, MVT::i64));
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
Overflow =
- DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, MVT::i64),
+ DAG.getNode(AArch64ISD::SUBS, DL, VTs,
+ DAG.getConstant(0, DL, MVT::i64),
UpperBits).getValue(1);
}
break;
@@ -1347,7 +1349,7 @@ getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
if (IsSigned) {
SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS);
SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value,
- DAG.getConstant(63, MVT::i64));
+ DAG.getConstant(63, DL, MVT::i64));
// It is important that LowerBits is last, otherwise the arithmetic
// shift will not be folded into the compare (SUBS).
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
@@ -1357,7 +1359,8 @@ getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {
SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS);
SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32);
Overflow =
- DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, MVT::i64),
+ DAG.getNode(AArch64ISD::SUBS, DL, VTs,
+ DAG.getConstant(0, DL, MVT::i64),
UpperBits).getValue(1);
}
break;
@@ -1431,7 +1434,7 @@ static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) {
FVal = Other;
TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other,
- DAG.getConstant(-1ULL, Other.getValueType()));
+ DAG.getConstant(-1ULL, dl, Other.getValueType()));
return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal,
CCVal, Cmp);
@@ -1481,24 +1484,25 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
return SDValue();
+ SDLoc dl(Op);
AArch64CC::CondCode CC;
// The actual operation that sets the overflow or carry flag.
SDValue Value, Overflow;
std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG);
// We use 0 and 1 as false and true values.
- SDValue TVal = DAG.getConstant(1, MVT::i32);
- SDValue FVal = DAG.getConstant(0, MVT::i32);
+ SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
+ SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
// We use an inverted condition, because the conditional select is inverted
// too. This will allow it to be selected to a single instruction:
// CSINC Wd, WZR, WZR, invert(cond).
- SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), MVT::i32);
- Overflow = DAG.getNode(AArch64ISD::CSEL, SDLoc(Op), MVT::i32, FVal, TVal,
+ SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32);
+ Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal,
CCVal, Overflow);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
+ return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
}
// Prefetch operands are:
@@ -1529,7 +1533,7 @@ static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
(Locality << 1) | // Cache level bits
(unsigned)IsStream; // Stream bit
return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0),
- DAG.getConstant(PrfOp, MVT::i32), Op.getOperand(1));
+ DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1));
}
SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
@@ -1631,7 +1635,7 @@ static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()),
InVT.getVectorNumElements());
In = DAG.getNode(Op.getOpcode(), dl, CastVT, In);
- return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0));
+ return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl));
}
if (VT.getSizeInBits() > InVT.getSizeInBits()) {
@@ -1656,7 +1660,7 @@ SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
return DAG.getNode(
ISD::FP_ROUND, dl, MVT::f16,
DAG.getNode(Op.getOpcode(), dl, MVT::f32, Op.getOperand(0)),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
// i128 conversions are libcalls.
@@ -1719,7 +1723,7 @@ static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) {
Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op);
return SDValue(
DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::f16, Op,
- DAG.getTargetConstant(AArch64::hsub, MVT::i32)),
+ DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
0);
}
@@ -1793,6 +1797,7 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
EVT VT = N->getValueType(0);
+ SDLoc dl(N);
unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
unsigned NumElts = VT.getVectorNumElements();
MVT TruncVT = MVT::getIntegerVT(EltSize);
@@ -1802,9 +1807,9 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
const APInt &CInt = C->getAPIntValue();
// Element types smaller than 32 bits are not legal, so use i32 elements.
// The values are implicitly truncated so sext vs. zext doesn't matter.
- Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32));
+ Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
}
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
+ return DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(TruncVT, NumElts), Ops);
}
@@ -2276,7 +2281,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
MachinePointerInfo::getStack(i * 8), false, false, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
- DAG.getConstant(8, getPointerTy()));
+ DAG.getConstant(8, DL, getPointerTy()));
}
}
FuncInfo->setVarArgsGPRIndex(GPRIdx);
@@ -2305,7 +2310,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
MachinePointerInfo::getStack(i * 16), false, false, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
- DAG.getConstant(16, getPointerTy()));
+ DAG.getConstant(16, DL, getPointerTy()));
}
}
FuncInfo->setVarArgsFPRIndex(FPRIdx);
@@ -2657,8 +2662,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
if (!IsSibCall)
- Chain =
- DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), DL);
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, DL,
+ true),
+ DL);
SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, getPointerTy());
@@ -2728,7 +2734,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
}
unsigned LocMemOffset = VA.getLocMemOffset();
int32_t Offset = LocMemOffset + BEAlign;
- SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
if (IsTailCall) {
@@ -2743,7 +2749,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// clobbered.
Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
} else {
- SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
DstAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
DstInfo = MachinePointerInfo::getStack(LocMemOffset);
@@ -2751,7 +2757,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
if (Outs[i].Flags.isByVal()) {
SDValue SizeNode =
- DAG.getConstant(Outs[i].Flags.getByValSize(), MVT::i64);
+ DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64);
SDValue Cpy = DAG.getMemcpy(
Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(),
/*isVol = */ false, /*AlwaysInline = */ false,
@@ -2821,8 +2827,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// we've carefully laid out the parameters so that when sp is reset they'll be
// in the correct location.
if (IsTailCall && !IsSibCall) {
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, DL);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
+ DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
InFlag = Chain.getValue(1);
}
@@ -2834,7 +2840,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Each tail call may have to adjust the stack by a different amount, so
// this information must travel along with the operation for eventual
// consumption by emitEpilogue.
- Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
}
// Add argument registers to the end of the list so that they are known live
@@ -2877,8 +2883,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
? RoundUpToAlignment(NumBytes, 16)
: 0;
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(CalleePopBytes, true),
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
+ DAG.getIntPtrConstant(CalleePopBytes, DL, true),
InFlag, DL);
if (!Ins.empty())
InFlag = Chain.getValue(1);
@@ -2994,7 +3000,7 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
/*isInvariant=*/ true, 8);
if (GN->getOffset() != 0)
return DAG.getNode(ISD::ADD, DL, PtrVT, GlobalAddr,
- DAG.getConstant(GN->getOffset(), PtrVT));
+ DAG.getConstant(GN->getOffset(), DL, PtrVT));
return GlobalAddr;
}
@@ -3158,11 +3164,13 @@ AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
SDValue TPWithOff_lo =
SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase,
- HiVar, DAG.getTargetConstant(0, MVT::i32)),
+ HiVar,
+ DAG.getTargetConstant(0, DL, MVT::i32)),
0);
SDValue TPWithOff =
SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPWithOff_lo,
- LoVar, DAG.getTargetConstant(0, MVT::i32)),
+ LoVar,
+ DAG.getTargetConstant(0, DL, MVT::i32)),
0);
return TPWithOff;
} else if (Model == TLSModel::InitialExec) {
@@ -3198,10 +3206,10 @@ AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar,
- DAG.getTargetConstant(0, MVT::i32)),
+ DAG.getTargetConstant(0, DL, MVT::i32)),
0);
TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar,
- DAG.getTargetConstant(0, MVT::i32)),
+ DAG.getTargetConstant(0, DL, MVT::i32)),
0);
} else if (Model == TLSModel::GeneralDynamic) {
// The call needs a relocation too for linker relaxation. It doesn't make
@@ -3244,7 +3252,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!RHS.getNode()) {
- RHS = DAG.getConstant(0, LHS.getValueType());
+ RHS = DAG.getConstant(0, dl, LHS.getValueType());
CC = ISD::SETNE;
}
}
@@ -3269,7 +3277,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
if (CC == ISD::SETNE)
OFCC = getInvertedCondCode(OFCC);
- SDValue CCVal = DAG.getConstant(OFCC, MVT::i32);
+ SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32);
return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
Overflow);
@@ -3294,7 +3302,8 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Test = LHS.getOperand(0);
uint64_t Mask = LHS.getConstantOperandVal(1);
return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test,
- DAG.getConstant(Log2_64(Mask), MVT::i64), Dest);
+ DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
+ Dest);
}
return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest);
@@ -3309,7 +3318,8 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Test = LHS.getOperand(0);
uint64_t Mask = LHS.getConstantOperandVal(1);
return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test,
- DAG.getConstant(Log2_64(Mask), MVT::i64), Dest);
+ DAG.getConstant(Log2_64(Mask), dl, MVT::i64),
+ Dest);
}
return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest);
@@ -3319,7 +3329,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
// becomes redundant. This would also increase register pressure.
uint64_t Mask = LHS.getValueType().getSizeInBits() - 1;
return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS,
- DAG.getConstant(Mask, MVT::i64), Dest);
+ DAG.getConstant(Mask, dl, MVT::i64), Dest);
}
}
if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT &&
@@ -3329,7 +3339,7 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
// becomes redundant. This would also increase register pressure.
uint64_t Mask = LHS.getValueType().getSizeInBits() - 1;
return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS,
- DAG.getConstant(Mask, MVT::i64), Dest);
+ DAG.getConstant(Mask, dl, MVT::i64), Dest);
}
SDValue CCVal;
@@ -3345,11 +3355,11 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
AArch64CC::CondCode CC1, CC2;
changeFPCCToAArch64CC(CC, CC1, CC2);
- SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
SDValue BR1 =
DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp);
if (CC2 != AArch64CC::AL) {
- SDValue CC2Val = DAG.getConstant(CC2, MVT::i32);
+ SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val,
Cmp);
}
@@ -3369,7 +3379,8 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
if (SrcVT == MVT::f32 && VT == MVT::f64)
In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
else if (SrcVT == MVT::f64 && VT == MVT::f32)
- In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0));
+ In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2,
+ DAG.getIntPtrConstant(0, DL));
else
// FIXME: Src type is different, bail out for now. Can VT really be a
// vector type?
@@ -3416,7 +3427,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
llvm_unreachable("Invalid type for copysign!");
}
- SDValue BuildVec = DAG.getConstant(EltMask, VecVT);
+ SDValue BuildVec = DAG.getConstant(EltMask, DL, VecVT);
// If we couldn't materialize the mask above, then the mask vector will be
// the zero vector, and we need to negate it here.
@@ -3464,7 +3475,7 @@ SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
SDValue UaddLV = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
- DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, MVT::i32), CtPop);
+ DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
if (VT == MVT::i64)
UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
@@ -3483,8 +3494,8 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
// We chose ZeroOrOneBooleanContents, so use zero and one.
EVT VT = Op.getValueType();
- SDValue TVal = DAG.getConstant(1, VT);
- SDValue FVal = DAG.getConstant(0, VT);
+ SDValue TVal = DAG.getConstant(1, dl, VT);
+ SDValue FVal = DAG.getConstant(0, dl, VT);
// Handle f128 first, since one possible outcome is a normal integer
// comparison which gets picked up by the next if statement.
@@ -3521,7 +3532,7 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
changeFPCCToAArch64CC(CC, CC1, CC2);
if (CC2 == AArch64CC::AL) {
changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, false), CC1, CC2);
- SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
// Note that we inverted the condition above, so we reverse the order of
// the true and false operands here. This will allow the setcc to be
@@ -3534,11 +3545,11 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
// of the first as the RHS. We're effectively OR'ing the two CC's together.
// FIXME: It would be nice if we could match the two CSELs to two CSINCs.
- SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
SDValue CS1 =
DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
- SDValue CC2Val = DAG.getConstant(CC2, MVT::i32);
+ SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
}
}
@@ -3580,7 +3591,7 @@ SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
// If softenSetCCOperands returned a scalar, we need to compare the result
// against zero to select between true and false values.
if (!RHS.getNode()) {
- RHS = DAG.getConstant(0, LHS.getValueType());
+ RHS = DAG.getConstant(0, dl, LHS.getValueType());
CC = ISD::SETNE;
}
}
@@ -3733,13 +3744,13 @@ SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
// clean. Some of them require two CSELs to implement.
AArch64CC::CondCode CC1, CC2;
changeFPCCToAArch64CC(CC, CC1, CC2);
- SDValue CC1Val = DAG.getConstant(CC1, MVT::i32);
+ SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32);
SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp);
// If we need a second CSEL, emit it, using the output of the first as the
// RHS. We're effectively OR'ing the two CC's together.
if (CC2 != AArch64CC::AL) {
- SDValue CC2Val = DAG.getConstant(CC2, MVT::i32);
+ SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32);
return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp);
}
@@ -3778,7 +3789,7 @@ SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
AArch64CC::CondCode OFCC;
SDValue Value, Overflow;
std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG);
- SDValue CCVal = DAG.getConstant(OFCC, MVT::i32);
+ SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32);
return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal,
CCVal, Overflow);
@@ -3793,7 +3804,7 @@ SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
CC = cast<CondCodeSDNode>(CCVal->getOperand(2))->get();
} else {
LHS = CCVal;
- RHS = DAG.getConstant(0, CCVal.getValueType());
+ RHS = DAG.getConstant(0, DL, CCVal.getValueType());
CC = ISD::SETNE;
}
return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
@@ -3929,11 +3940,11 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
SDValue GRTop, GRTopAddr;
GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
- DAG.getConstant(8, getPointerTy()));
+ DAG.getConstant(8, DL, getPointerTy()));
GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), getPointerTy());
GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
- DAG.getConstant(GPRSize, getPointerTy()));
+ DAG.getConstant(GPRSize, DL, getPointerTy()));
MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
MachinePointerInfo(SV, 8), false, false, 8));
@@ -3944,11 +3955,11 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
if (FPRSize > 0) {
SDValue VRTop, VRTopAddr;
VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
- DAG.getConstant(16, getPointerTy()));
+ DAG.getConstant(16, DL, getPointerTy()));
VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), getPointerTy());
VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
- DAG.getConstant(FPRSize, getPointerTy()));
+ DAG.getConstant(FPRSize, DL, getPointerTy()));
MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
MachinePointerInfo(SV, 16), false, false, 8));
@@ -3956,15 +3967,17 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
// int __gr_offs at offset 24
SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
- DAG.getConstant(24, getPointerTy()));
- MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
+ DAG.getConstant(24, DL, getPointerTy()));
+ MemOps.push_back(DAG.getStore(Chain, DL,
+ DAG.getConstant(-GPRSize, DL, MVT::i32),
GROffsAddr, MachinePointerInfo(SV, 24), false,
false, 4));
// int __vr_offs at offset 28
SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
- DAG.getConstant(28, getPointerTy()));
- MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
+ DAG.getConstant(28, DL, getPointerTy()));
+ MemOps.push_back(DAG.getStore(Chain, DL,
+ DAG.getConstant(-FPRSize, DL, MVT::i32),
VROffsAddr, MachinePointerInfo(SV, 28), false,
false, 4));
@@ -3981,12 +3994,14 @@ SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
SelectionDAG &DAG) const {
// AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
// pointer.
+ SDLoc DL(Op);
unsigned VaListSize = Subtarget->isTargetDarwin() ? 8 : 32;
const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), Op.getOperand(1),
- Op.getOperand(2), DAG.getConstant(VaListSize, MVT::i32),
+ return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1),
+ Op.getOperand(2),
+ DAG.getConstant(VaListSize, DL, MVT::i32),
8, false, false, false, MachinePointerInfo(DestSV),
MachinePointerInfo(SrcSV));
}
@@ -4009,9 +4024,9 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (Align > 8) {
assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2");
VAList = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
- DAG.getConstant(Align - 1, getPointerTy()));
+ DAG.getConstant(Align - 1, DL, getPointerTy()));
VAList = DAG.getNode(ISD::AND, DL, getPointerTy(), VAList,
- DAG.getConstant(-(int64_t)Align, getPointerTy()));
+ DAG.getConstant(-(int64_t)Align, DL, getPointerTy()));
}
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
@@ -4031,7 +4046,7 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// Increment the pointer, VAList, to the next vaarg
SDValue VANext = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
- DAG.getConstant(ArgSize, getPointerTy()));
+ DAG.getConstant(ArgSize, DL, getPointerTy()));
// Store the incremented VAList to the legalized pointer
SDValue APStore = DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V),
false, false, 0);
@@ -4043,7 +4058,7 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(), false, false, false, 0);
// Round the value down to an f32.
SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
- DAG.getIntPtrConstant(1));
+ DAG.getIntPtrConstant(1, DL));
SDValue Ops[] = { NarrowFP, WideFP.getValue(1) };
// Merge the rounded value with the chain output of the load.
return DAG.getMergeValues(Ops, DL);
@@ -4092,7 +4107,7 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset = DAG.getConstant(8, getPointerTy());
+ SDValue Offset = DAG.getConstant(8, DL, getPointerTy());
return DAG.getLoad(VT, DL, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
MachinePointerInfo(), false, false, false, 0);
@@ -4120,15 +4135,15 @@ SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op,
assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
- DAG.getConstant(VTBits, MVT::i64), ShAmt);
+ DAG.getConstant(VTBits, dl, MVT::i64), ShAmt);
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
- DAG.getConstant(VTBits, MVT::i64));
+ DAG.getConstant(VTBits, dl, MVT::i64));
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
- SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, MVT::i64),
+ SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, dl, MVT::i64),
ISD::SETGE, dl, DAG);
- SDValue CCVal = DAG.getConstant(AArch64CC::GE, MVT::i32);
+ SDValue CCVal = DAG.getConstant(AArch64CC::GE, dl, MVT::i32);
SDValue FalseValLo = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
SDValue TrueValLo = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
@@ -4140,8 +4155,9 @@ SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op,
SDValue FalseValHi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
SDValue TrueValHi = Opc == ISD::SRA
? DAG.getNode(Opc, dl, VT, ShOpHi,
- DAG.getConstant(VTBits - 1, MVT::i64))
- : DAG.getConstant(0, VT);
+ DAG.getConstant(VTBits - 1, dl,
+ MVT::i64))
+ : DAG.getConstant(0, dl, VT);
SDValue Hi =
DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValHi, FalseValHi, CCVal, Cmp);
@@ -4164,24 +4180,24 @@ SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op,
assert(Op.getOpcode() == ISD::SHL_PARTS);
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64,
- DAG.getConstant(VTBits, MVT::i64), ShAmt);
+ DAG.getConstant(VTBits, dl, MVT::i64), ShAmt);
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt,
- DAG.getConstant(VTBits, MVT::i64));
+ DAG.getConstant(VTBits, dl, MVT::i64));
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, MVT::i64),
+ SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, dl, MVT::i64),
ISD::SETGE, dl, DAG);
- SDValue CCVal = DAG.getConstant(AArch64CC::GE, MVT::i32);
+ SDValue CCVal = DAG.getConstant(AArch64CC::GE, dl, MVT::i32);
SDValue Hi =
DAG.getNode(AArch64ISD::CSEL, dl, VT, Tmp3, FalseVal, CCVal, Cmp);
// AArch64 shifts of larger than register sizes are wrapped rather than
// clamped, so we can't just emit "lo << a" if a is too big.
- SDValue TrueValLo = DAG.getConstant(0, VT);
+ SDValue TrueValLo = DAG.getConstant(0, dl, VT);
SDValue FalseValLo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
SDValue Lo =
DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValLo, FalseValLo, CCVal, Cmp);
@@ -4474,7 +4490,7 @@ void AArch64TargetLowering::LowerAsmOperandForConstraint(
}
// All assembler immediates are 64-bit integers.
- Result = DAG.getTargetConstant(CVal, MVT::i64);
+ Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64);
break;
}
@@ -4500,7 +4516,7 @@ static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {
SDLoc DL(V64Reg);
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy),
- V64Reg, DAG.getConstant(0, MVT::i32));
+ V64Reg, DAG.getConstant(0, DL, MVT::i32));
}
/// getExtFactor - Determine the adjustment factor for the position when
@@ -4632,25 +4648,26 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
// The extraction can just take the second half
Src.ShuffleVec =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(NumSrcElts, MVT::i64));
+ DAG.getConstant(NumSrcElts, dl, MVT::i64));
Src.WindowBase = -NumSrcElts;
} else if (Src.MaxElt < NumSrcElts) {
// The extraction can just take the first half
Src.ShuffleVec =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, dl, MVT::i64));
} else {
// An actual VEXT is needed
SDValue VEXTSrc1 =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, dl, MVT::i64));
SDValue VEXTSrc2 =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
- DAG.getConstant(NumSrcElts, MVT::i64));
+ DAG.getConstant(NumSrcElts, dl, MVT::i64));
unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1);
Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1,
- VEXTSrc2, DAG.getConstant(Imm, MVT::i32));
+ VEXTSrc2,
+ DAG.getConstant(Imm, dl, MVT::i32));
Src.WindowBase = -Src.MinElt;
}
}
@@ -4985,11 +5002,11 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
VT.getVectorNumElements() / 2);
if (SplitV0) {
V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, DL, MVT::i64));
}
if (V1.getValueType().getSizeInBits() == 128) {
V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, DL, MVT::i64));
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
}
@@ -5067,7 +5084,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
if (VT.getSizeInBits() == 64)
OpLHS = WidenVector(OpLHS, DAG);
- SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, MVT::i64);
+ SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64);
return DAG.getNode(Opcode, dl, VT, OpLHS, Lane);
}
case OP_VEXT1:
@@ -5075,7 +5092,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
case OP_VEXT3: {
unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS);
return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS,
- DAG.getConstant(Imm, MVT::i32));
+ DAG.getConstant(Imm, dl, MVT::i32));
}
case OP_VUZPL:
return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS,
@@ -5112,7 +5129,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
for (int Val : ShuffleMask) {
for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
unsigned Offset = Byte + Val * BytesPerElt;
- TBLMask.push_back(DAG.getConstant(Offset, MVT::i32));
+ TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32));
}
}
@@ -5132,7 +5149,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl1, MVT::i32), V1Cst,
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
makeArrayRef(TBLMask.data(), IndexLen)));
} else {
@@ -5140,7 +5157,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl1, MVT::i32), V1Cst,
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
makeArrayRef(TBLMask.data(), IndexLen)));
} else {
@@ -5152,7 +5169,8 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
// &TBLMask[0], IndexLen));
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl2, MVT::i32), V1Cst, V2Cst,
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32),
+ V1Cst, V2Cst,
DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
makeArrayRef(TBLMask.data(), IndexLen)));
}
@@ -5221,7 +5239,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
} else if (VT.getSizeInBits() == 64)
V1 = WidenVector(V1, DAG);
- return DAG.getNode(Opcode, dl, VT, V1, DAG.getConstant(Lane, MVT::i64));
+ return DAG.getNode(Opcode, dl, VT, V1, DAG.getConstant(Lane, dl, MVT::i64));
}
if (isREVMask(ShuffleMask, VT, 64))
@@ -5238,12 +5256,12 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
std::swap(V1, V2);
Imm *= getExtFactor(V1);
return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
- DAG.getConstant(Imm, MVT::i32));
+ DAG.getConstant(Imm, dl, MVT::i32));
} else if (V2->getOpcode() == ISD::UNDEF &&
isSingletonEXTMask(ShuffleMask, VT, Imm)) {
Imm *= getExtFactor(V1);
return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
- DAG.getConstant(Imm, MVT::i32));
+ DAG.getConstant(Imm, dl, MVT::i32));
}
unsigned WhichResult;
@@ -5282,7 +5300,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
int NumInputElements = V1.getValueType().getVectorNumElements();
if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
SDValue DstVec = DstIsLeft ? V1 : V2;
- SDValue DstLaneV = DAG.getConstant(Anomaly, MVT::i64);
+ SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64);
SDValue SrcVec = V1;
int SrcLane = ShuffleMask[Anomaly];
@@ -5290,7 +5308,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SrcVec = V2;
SrcLane -= VT.getVectorNumElements();
}
- SDValue SrcLaneV = DAG.getConstant(SrcLane, MVT::i64);
+ SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64);
EVT ScalarVT = VT.getVectorElementType();
@@ -5380,8 +5398,8 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5389,8 +5407,8 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5398,8 +5416,8 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(16, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5407,8 +5425,8 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(24, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(24, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5416,8 +5434,8 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5425,8 +5443,8 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
}
@@ -5530,7 +5548,8 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli;
SDValue ResultSLI =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrin, MVT::i32), X, Y, Shift.getOperand(1));
+ DAG.getConstant(Intrin, DL, MVT::i32), X, Y,
+ Shift.getOperand(1));
DEBUG(dbgs() << "aarch64-lower: transformed: \n");
DEBUG(N->dump(&DAG));
@@ -5580,8 +5599,8 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5589,8 +5608,8 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5598,8 +5617,8 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(16, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5607,8 +5626,8 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(24, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(24, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5616,8 +5635,8 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5625,8 +5644,8 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
}
@@ -5661,7 +5680,7 @@ static SDValue NormalizeBuildVector(SDValue Op,
if (Lane.getOpcode() == ISD::Constant) {
APInt LowBits(EltTy.getSizeInBits(),
cast<ConstantSDNode>(Lane)->getZExtValue());
- Lane = DAG.getConstant(LowBits.getZExtValue(), MVT::i32);
+ Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
}
Ops.push_back(Lane);
}
@@ -5699,13 +5718,13 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType10(CnstVal);
if (VT.getSizeInBits() == 128) {
SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::v2i64,
- DAG.getConstant(CnstVal, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
// Support the V64 version via subregister insertion.
SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::f64,
- DAG.getConstant(CnstVal, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5713,8 +5732,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5722,8 +5741,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5731,8 +5750,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(16, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5740,8 +5759,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(24, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(24, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5749,8 +5768,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5758,8 +5777,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5767,8 +5786,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(264, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(264, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5776,8 +5795,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(272, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(272, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5785,7 +5804,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType9(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8;
SDValue Mov = DAG.getNode(AArch64ISD::MOVI, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5794,7 +5813,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType11(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4f32 : MVT::v2f32;
SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5802,7 +5821,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
VT.getSizeInBits() == 128) {
CnstVal = AArch64_AM::encodeAdvSIMDModImmType12(CnstVal);
SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MVT::v2f64,
- DAG.getConstant(CnstVal, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5812,8 +5831,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5821,8 +5840,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5830,8 +5849,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(16, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5839,8 +5858,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(24, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(24, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5848,8 +5867,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5857,8 +5876,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(8, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5866,8 +5885,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(264, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(264, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
@@ -5875,8 +5894,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal);
MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy,
- DAG.getConstant(CnstVal, MVT::i32),
- DAG.getConstant(272, MVT::i32));
+ DAG.getConstant(CnstVal, dl, MVT::i32),
+ DAG.getConstant(272, dl, MVT::i32));
return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov);
}
}
@@ -5982,7 +6001,7 @@ FailedModImm:
// Now insert the non-constant lanes.
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
+ SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) {
// Note that type legalization likely mucked about with the VT of the
// source operand, so we may have to convert it here before inserting.
@@ -6024,7 +6043,7 @@ FailedModImm:
unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub;
MachineSDNode *N =
DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, VT, Vec, Op0,
- DAG.getTargetConstant(SubIdx, MVT::i32));
+ DAG.getTargetConstant(SubIdx, dl, MVT::i32));
Vec = SDValue(N, 0);
++i;
}
@@ -6032,7 +6051,7 @@ FailedModImm:
SDValue V = Op.getOperand(i);
if (V.getOpcode() == ISD::UNDEF)
continue;
- SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
+ SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
}
return Vec;
@@ -6255,10 +6274,11 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
case ISD::SHL:
if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
- return DAG.getNode(AArch64ISD::VSHL, SDLoc(Op), VT, Op.getOperand(0),
- DAG.getConstant(Cnt, MVT::i32));
+ return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0),
+ DAG.getConstant(Cnt, DL, MVT::i32));
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::aarch64_neon_ushl, MVT::i32),
+ DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL,
+ MVT::i32),
Op.getOperand(0), Op.getOperand(1));
case ISD::SRA:
case ISD::SRL:
@@ -6267,8 +6287,8 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
Cnt < EltSize) {
unsigned Opc =
(Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR;
- return DAG.getNode(Opc, SDLoc(Op), VT, Op.getOperand(0),
- DAG.getConstant(Cnt, MVT::i32));
+ return DAG.getNode(Opc, DL, VT, Op.getOperand(0),
+ DAG.getConstant(Cnt, DL, MVT::i32));
}
// Right shift register. Note, there is not a shift right register
@@ -6280,7 +6300,8 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
SDValue NegShift = DAG.getNode(AArch64ISD::NEG, DL, VT, Op.getOperand(1));
SDValue NegShiftLeft =
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Opc, MVT::i32), Op.getOperand(0), NegShift);
+ DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0),
+ NegShift);
return NegShiftLeft;
}
@@ -6896,14 +6917,14 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0))
if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
if (Y1C->getAPIntValue() == VT.getSizeInBits() - 1) {
- SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
+ SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
N0.getOperand(0));
// Generate SUBS & CSEL.
SDValue Cmp =
DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32),
- N0.getOperand(0), DAG.getConstant(0, VT));
+ N0.getOperand(0), DAG.getConstant(0, DL, VT));
return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0.getOperand(0), Neg,
- DAG.getConstant(AArch64CC::PL, MVT::i32),
+ DAG.getConstant(AArch64CC::PL, DL, MVT::i32),
SDValue(Cmp.getNode(), 1));
}
return SDValue();
@@ -6932,8 +6953,8 @@ AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SDLoc DL(N);
SDValue N0 = N->getOperand(0);
unsigned Lg2 = Divisor.countTrailingZeros();
- SDValue Zero = DAG.getConstant(0, VT);
- SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, VT);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
// Add (N0 < 0) ? Pow2 - 1 : 0;
SDValue CCVal;
@@ -6949,7 +6970,7 @@ AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
// Divide by pow2.
SDValue SRA =
- DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, MVT::i64));
+ DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64));
// If we're dividing by a positive value, we're done. Otherwise, we must
// negate the result.
@@ -6958,7 +6979,7 @@ AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
if (Created)
Created->push_back(SRA.getNode());
- return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), SRA);
+ return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
}
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
@@ -6975,23 +6996,24 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
APInt Value = C->getAPIntValue();
EVT VT = N->getValueType(0);
+ SDLoc DL(N);
if (Value.isNonNegative()) {
// (mul x, 2^N + 1) => (add (shl x, N), x)
APInt VM1 = Value - 1;
if (VM1.isPowerOf2()) {
SDValue ShiftedVal =
- DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
- DAG.getConstant(VM1.logBase2(), MVT::i64));
- return DAG.getNode(ISD::ADD, SDLoc(N), VT, ShiftedVal,
+ DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(VM1.logBase2(), DL, MVT::i64));
+ return DAG.getNode(ISD::ADD, DL, VT, ShiftedVal,
N->getOperand(0));
}
// (mul x, 2^N - 1) => (sub (shl x, N), x)
APInt VP1 = Value + 1;
if (VP1.isPowerOf2()) {
SDValue ShiftedVal =
- DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
- DAG.getConstant(VP1.logBase2(), MVT::i64));
- return DAG.getNode(ISD::SUB, SDLoc(N), VT, ShiftedVal,
+ DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(VP1.logBase2(), DL, MVT::i64));
+ return DAG.getNode(ISD::SUB, DL, VT, ShiftedVal,
N->getOperand(0));
}
} else {
@@ -6999,20 +7021,20 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
APInt VNP1 = -Value + 1;
if (VNP1.isPowerOf2()) {
SDValue ShiftedVal =
- DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
- DAG.getConstant(VNP1.logBase2(), MVT::i64));
- return DAG.getNode(ISD::SUB, SDLoc(N), VT, N->getOperand(0),
+ DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(VNP1.logBase2(), DL, MVT::i64));
+ return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0),
ShiftedVal);
}
// (mul x, -(2^N + 1)) => - (add (shl x, N), x)
APInt VNM1 = -Value - 1;
if (VNM1.isPowerOf2()) {
SDValue ShiftedVal =
- DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0),
- DAG.getConstant(VNM1.logBase2(), MVT::i64));
+ DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(VNM1.logBase2(), DL, MVT::i64));
SDValue Add =
- DAG.getNode(ISD::ADD, SDLoc(N), VT, ShiftedVal, N->getOperand(0));
- return DAG.getNode(ISD::SUB, SDLoc(N), VT, DAG.getConstant(0, VT), Add);
+ DAG.getNode(ISD::ADD, DL, VT, ShiftedVal, N->getOperand(0));
+ return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Add);
}
}
}
@@ -7167,7 +7189,7 @@ static SDValue tryCombineToEXTR(SDNode *N,
}
return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS,
- DAG.getConstant(ShiftRHS, MVT::i64));
+ DAG.getConstant(ShiftRHS, DL, MVT::i64));
}
static SDValue tryCombineToBSL(SDNode *N,
@@ -7295,10 +7317,10 @@ static SDValue performBitcastCombine(SDNode *N,
SDLoc dl(N);
unsigned NumElements = VT.getVectorNumElements();
if (idx) {
- SDValue HalfIdx = DAG.getConstant(NumElements, MVT::i64);
+ SDValue HalfIdx = DAG.getConstant(NumElements, dl, MVT::i64);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Source, HalfIdx);
} else {
- SDValue SubReg = DAG.getTargetConstant(AArch64::dsub, MVT::i32);
+ SDValue SubReg = DAG.getTargetConstant(AArch64::dsub, dl, MVT::i32);
return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, VT,
Source, SubReg),
0);
@@ -7356,7 +7378,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
if (N0 == N1 && VT.getVectorNumElements() == 2) {
assert(VT.getVectorElementType().getSizeInBits() == 64);
return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, dl, MVT::i64));
}
// Canonicalise concat_vectors so that the right-hand vector has as few
@@ -7472,15 +7494,16 @@ static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
unsigned NumElems = NarrowTy.getVectorNumElements();
MVT NewDUPVT = MVT::getVectorVT(ElementTy, NumElems * 2);
+ SDLoc dl(N);
SDValue NewDUP;
if (IsDUPLANE)
- NewDUP = DAG.getNode(N.getOpcode(), SDLoc(N), NewDUPVT, N.getOperand(0),
+ NewDUP = DAG.getNode(N.getOpcode(), dl, NewDUPVT, N.getOperand(0),
N.getOperand(1));
else
- NewDUP = DAG.getNode(AArch64ISD::DUP, SDLoc(N), NewDUPVT, N.getOperand(0));
+ NewDUP = DAG.getNode(AArch64ISD::DUP, dl, NewDUPVT, N.getOperand(0));
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N.getNode()), NarrowTy,
- NewDUP, DAG.getConstant(NumElems, MVT::i64));
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy, NewDUP,
+ DAG.getConstant(NumElems, dl, MVT::i64));
}
static bool isEssentiallyExtractSubvector(SDValue N) {
@@ -7605,7 +7628,8 @@ static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
SDLoc dl(Op);
if (InfoAndKind.IsAArch64) {
CCVal = DAG.getConstant(
- AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), MVT::i32);
+ AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl,
+ MVT::i32);
Cmp = *InfoAndKind.Info.AArch64.Cmp;
} else
Cmp = getAArch64Cmp(*InfoAndKind.Info.Generic.Opnd0,
@@ -7614,7 +7638,7 @@ static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {
CCVal, DAG, dl);
EVT VT = Op->getValueType(0);
- LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, VT));
+ LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT));
return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp);
}
@@ -7754,12 +7778,15 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
break;
}
- if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits)
- return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), N->getOperand(1),
- DAG.getConstant(-ShiftAmount, MVT::i32));
- else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits)
- return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), N->getOperand(1),
- DAG.getConstant(ShiftAmount, MVT::i32));
+ if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) {
+ SDLoc dl(N);
+ return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
+ DAG.getConstant(-ShiftAmount, dl, MVT::i32));
+ } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
+ SDLoc dl(N);
+ return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1),
+ DAG.getConstant(ShiftAmount, dl, MVT::i32));
+ }
return SDValue();
}
@@ -7782,11 +7809,12 @@ static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {
static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
SelectionDAG &DAG) {
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), N->getValueType(0),
- DAG.getNode(Opc, SDLoc(N),
+ SDLoc dl(N);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0),
+ DAG.getNode(Opc, dl,
N->getOperand(1).getSimpleValueType(),
N->getOperand(1)),
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, dl, MVT::i64));
}
static SDValue performIntrinsicCombine(SDNode *N,
@@ -7927,9 +7955,9 @@ static SDValue performExtendCombine(SDNode *N,
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(),
LoVT.getVectorNumElements());
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, DL, MVT::i64));
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src,
- DAG.getConstant(InNVT.getVectorNumElements(), MVT::i64));
+ DAG.getConstant(InNVT.getVectorNumElements(), DL, MVT::i64));
Lo = DAG.getNode(N->getOpcode(), DL, LoVT, Lo);
Hi = DAG.getNode(N->getOpcode(), DL, HiVT, Hi);
@@ -7989,7 +8017,7 @@ static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) {
unsigned Offset = EltOffset;
while (--NumVecElts) {
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
- DAG.getConstant(Offset, MVT::i64));
+ DAG.getConstant(Offset, DL, MVT::i64));
NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
St->getPointerInfo(), St->isVolatile(),
St->isNonTemporal(), Alignment);
@@ -8050,15 +8078,15 @@ static SDValue performSTORECombine(SDNode *N,
EVT HalfVT =
EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), NumElts);
SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
- DAG.getConstant(0, MVT::i64));
+ DAG.getConstant(0, DL, MVT::i64));
SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal,
- DAG.getConstant(NumElts, MVT::i64));
+ DAG.getConstant(NumElts, DL, MVT::i64));
SDValue BasePtr = S->getBasePtr();
SDValue NewST1 =
DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
S->isVolatile(), S->isNonTemporal(), S->getAlignment());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
- DAG.getConstant(8, MVT::i64));
+ DAG.getConstant(8, DL, MVT::i64));
return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
S->getPointerInfo(), S->isVolatile(), S->isNonTemporal(),
S->getAlignment());
@@ -8906,7 +8934,7 @@ static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
Op = SDValue(
DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32,
DAG.getUNDEF(MVT::i32), Op,
- DAG.getTargetConstant(AArch64::hsub, MVT::i32)),
+ DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)),
0);
Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op));
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 0c0efaf7c1a..3b8b6681a08 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -441,11 +441,11 @@ def vecshiftL64 : Operand<i32>, ImmLeaf<i32, [{
// instructions for splatting repeating bit patterns across the immediate.
def logical_imm32_XFORM : SDNodeXForm<imm, [{
uint64_t enc = AArch64_AM::encodeLogicalImmediate(N->getZExtValue(), 32);
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>;
def logical_imm64_XFORM : SDNodeXForm<imm, [{
uint64_t enc = AArch64_AM::encodeLogicalImmediate(N->getZExtValue(), 64);
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>;
let DiagnosticType = "LogicalSecondSource" in {
@@ -682,7 +682,7 @@ def fpimm32 : Operand<f32>,
}], SDNodeXForm<fpimm, [{
APFloat InVal = N->getValueAPF();
uint32_t enc = AArch64_AM::getFP32Imm(InVal);
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>> {
let ParserMatchClass = FPImmOperand;
let PrintMethod = "printFPImmOperand";
@@ -693,7 +693,7 @@ def fpimm64 : Operand<f64>,
}], SDNodeXForm<fpimm, [{
APFloat InVal = N->getValueAPF();
uint32_t enc = AArch64_AM::getFP64Imm(InVal);
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>> {
let ParserMatchClass = FPImmOperand;
let PrintMethod = "printFPImmOperand";
@@ -768,7 +768,7 @@ def simdimmtype10 : Operand<i32>,
uint32_t enc = AArch64_AM::encodeAdvSIMDModImmType10(N->getValueAPF()
.bitcastToAPInt()
.getZExtValue());
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>> {
let ParserMatchClass = SIMDImmType10Operand;
let PrintMethod = "printSIMDType10Operand";
@@ -2192,7 +2192,8 @@ class BaseCondSelectOp<bit op, bits<2> op2, RegisterClass regtype, string asm,
def inv_cond_XFORM : SDNodeXForm<imm, [{
AArch64CC::CondCode CC = static_cast<AArch64CC::CondCode>(N->getZExtValue());
- return CurDAG->getTargetConstant(AArch64CC::getInvertedCondCode(CC), MVT::i32);
+ return CurDAG->getTargetConstant(AArch64CC::getInvertedCondCode(CC), SDLoc(N),
+ MVT::i32);
}]>;
multiclass CondSelectOp<bit op, bits<2> op2, string asm, PatFrag frag> {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 92d446080b7..e76e74cc82f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -498,7 +498,7 @@ def i64imm_32bit : ImmLeaf<i64, [{
}]>;
def trunc_imm : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
def : Pat<(i64 i64imm_32bit:$src),
@@ -507,12 +507,12 @@ def : Pat<(i64 i64imm_32bit:$src),
// Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstant(
- N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i32);
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
}]>;
def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstant(
- N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i64);
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
}]>;
@@ -857,57 +857,57 @@ defm UBFM : BitfieldImm<0b10, "ubfm">;
def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 31 - N->getZExtValue();
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
// min(7, 31 - shift_amt)
def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 31 - N->getZExtValue();
enc = enc > 7 ? 7 : enc;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
// min(15, 31 - shift_amt)
def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 31 - N->getZExtValue();
enc = enc > 15 ? 15 : enc;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 63 - N->getZExtValue();
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
// min(7, 63 - shift_amt)
def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 63 - N->getZExtValue();
enc = enc > 7 ? 7 : enc;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
// min(15, 63 - shift_amt)
def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 63 - N->getZExtValue();
enc = enc > 15 ? 15 : enc;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
// min(31, 63 - shift_amt)
def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
uint64_t enc = 63 - N->getZExtValue();
enc = enc > 31 ? 31 : enc;
- return CurDAG->getTargetConstant(enc, MVT::i64);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
}]>;
def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
@@ -3563,13 +3563,13 @@ def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
// instruction even if the types don't match: we just have to remap the lane
// carefully. N.b. this trick only applies to truncations.
def VecIndex_x2 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(2 * N->getZExtValue(), MVT::i64);
+ return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
}]>;
def VecIndex_x4 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(4 * N->getZExtValue(), MVT::i64);
+ return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
}]>;
def VecIndex_x8 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(8 * N->getZExtValue(), MVT::i64);
+ return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
}]>;
multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 44cd1ef8052..4405625e47c 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -83,8 +83,8 @@ public:
/// getI32Imm - Return a target constant of type i32 with the specified
/// value.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
SDNode *Select(SDNode *N) override;
@@ -134,7 +134,7 @@ public:
bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
const ConstantSDNode *CN = cast<ConstantSDNode>(N);
- Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
+ Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32);
Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
return true;
}
@@ -272,7 +272,8 @@ private:
SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
// Get the alignment operand for a NEON VLD or VST instruction.
- SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
+ SDValue GetVLDSTAlign(SDValue Align, SDLoc dl, unsigned NumVecs,
+ bool is64BitVector);
};
}
@@ -394,11 +395,13 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
// Now make the transformation.
Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
Srl.getOperand(0),
- CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
+ CurDAG->getConstant(Srl_imm + TZ, SDLoc(Srl),
+ MVT::i32));
N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
- Srl, CurDAG->getConstant(And_imm, MVT::i32));
+ Srl,
+ CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
- N1, CurDAG->getConstant(TZ, MVT::i32));
+ N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
CurDAG->UpdateNodeOperands(N, N0, N1);
}
}
@@ -483,7 +486,7 @@ bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
if (!RHS) return false;
ShImmVal = RHS->getZExtValue() & 31;
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -510,7 +513,7 @@ bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
return false;
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -527,7 +530,7 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
// Match frame index.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -536,7 +539,7 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
Base = N.getOperand(0);
} else
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -551,14 +554,14 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
return true;
}
}
// Base only.
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -583,7 +586,7 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
Base = Offset = N.getOperand(0);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
ARM_AM::lsl),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
}
@@ -654,7 +657,7 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -682,7 +685,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
Base = Offset = N.getOperand(0);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
ARM_AM::lsl),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return AM2_SHOP;
}
}
@@ -703,7 +706,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
ARM_AM::no_shift),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return AM2_BASE;
}
@@ -726,7 +729,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
ARM_AM::no_shift),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return AM2_BASE;
}
}
@@ -737,7 +740,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
ARM_AM::no_shift),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return AM2_BASE;
}
@@ -792,7 +795,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return AM2_SHOP;
}
@@ -828,7 +831,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -844,7 +847,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
if (AddSub == ARM_AM::sub) Val *= -1;
Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(Val, MVT::i32);
+ Opc = CurDAG->getTargetConstant(Val, SDLoc(Op), MVT::i32);
return true;
}
@@ -865,7 +868,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
ARM_AM::no_shift),
- MVT::i32);
+ SDLoc(Op), MVT::i32);
return true;
}
@@ -884,7 +887,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
// X - C is canonicalize to X + -C, no need to handle it here.
Base = N.getOperand(0);
Offset = N.getOperand(1);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0), SDLoc(N),
+ MVT::i32);
return true;
}
@@ -895,7 +899,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
+ MVT::i32);
return true;
}
@@ -915,13 +920,15 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
AddSub = ARM_AM::sub;
RHSC = -RHSC;
}
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC), SDLoc(N),
+ MVT::i32);
return true;
}
Base = N.getOperand(0);
Offset = N.getOperand(1);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
+ MVT::i32);
return true;
}
@@ -936,12 +943,14 @@ bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
int Val;
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), SDLoc(Op),
+ MVT::i32);
return true;
}
Offset = N;
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
+ Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), SDLoc(Op),
+ MVT::i32);
return true;
}
@@ -957,7 +966,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
Base = N.getOperand(0);
}
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -977,13 +986,13 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
RHSC = -RHSC;
}
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
Base = N;
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -1012,7 +1021,7 @@ bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
Alignment = MemN->getAlignment();
}
- Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
+ Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32);
return true;
}
@@ -1036,7 +1045,7 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
- MVT::i32);
+ SDLoc(N), MVT::i32);
return true;
}
@@ -1141,7 +1150,7 @@ ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
Base = N;
}
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1158,7 +1167,7 @@ ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
if (LHSC != 0 || RHSC != 0) return false;
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1166,12 +1175,12 @@ ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
int RHSC;
if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
Base = N.getOperand(0);
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
return true;
}
Base = N.getOperand(0);
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1203,7 +1212,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
if (MFI->getObjectAlignment(FI) < 4)
MFI->setObjectAlignment(FI, 4);
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1226,7 +1235,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
MFI->setObjectAlignment(FI, 4);
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
return true;
}
}
@@ -1255,7 +1264,7 @@ bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
unsigned ShImmVal = 0;
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
ShImmVal = RHS->getZExtValue() & 31;
- Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
+ Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), SDLoc(N));
return true;
}
@@ -1273,7 +1282,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
// Match frame index.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1284,7 +1293,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
return false; // We want to select t2LDRpci instead.
} else
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1303,14 +1312,14 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
return true;
}
}
// Base only.
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
return true;
}
@@ -1332,7 +1341,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
return true;
}
}
@@ -1349,8 +1358,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
int RHSC;
if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
- ? CurDAG->getTargetConstant(RHSC, MVT::i32)
- : CurDAG->getTargetConstant(-RHSC, MVT::i32);
+ ? CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32)
+ : CurDAG->getTargetConstant(-RHSC, SDLoc(N), MVT::i32);
return true;
}
@@ -1399,7 +1408,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
}
}
- ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
+ ShImm = CurDAG->getTargetConstant(ShAmt, SDLoc(N), MVT::i32);
return true;
}
@@ -1409,7 +1418,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
// This *must* succeed since it's used for the irreplaceable ldrex and strex
// instructions.
Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
return true;
@@ -1428,15 +1437,15 @@ bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
}
- OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
+ OffImm = CurDAG->getTargetConstant(RHSC/4, SDLoc(N), MVT::i32);
return true;
}
//===--------------------------------------------------------------------===//
/// getAL - Returns a ARMCC::AL immediate node.
-static inline SDValue getAL(SelectionDAG *CurDAG) {
- return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
+static inline SDValue getAL(SelectionDAG *CurDAG, SDLoc dl) {
+ return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
}
SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
@@ -1495,14 +1504,14 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
- SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
+ SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
CurDAG->getRegister(0, MVT::i32), Chain };
return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
MVT::i32, MVT::Other, Ops);
} else {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
- SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
+ SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
CurDAG->getRegister(0, MVT::i32), Chain };
return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
MVT::i32, MVT::Other, Ops);
@@ -1551,7 +1560,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
if (Match) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
- SDValue Ops[]= { Base, Offset, getAL(CurDAG),
+ SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
CurDAG->getRegister(0, MVT::i32), Chain };
return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
MVT::Other, Ops);
@@ -1564,9 +1573,9 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
SDLoc dl(V0.getNode());
SDValue RegClass =
- CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
+ CurDAG->getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
}
@@ -1575,9 +1584,9 @@ SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
SDLoc dl(V0.getNode());
SDValue RegClass =
- CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
+ CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, dl, MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
}
@@ -1585,9 +1594,10 @@ SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
/// \brief Form a quad register from a pair of D registers.
SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
SDLoc dl(V0.getNode());
- SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
+ SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl,
+ MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
}
@@ -1595,9 +1605,10 @@ SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
/// \brief Form 4 consecutive D registers from a pair of Q registers.
SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
SDLoc dl(V0.getNode());
- SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
+ SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
+ MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
}
@@ -1607,11 +1618,11 @@ SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
SDValue V2, SDValue V3) {
SDLoc dl(V0.getNode());
SDValue RegClass =
- CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
- SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
- SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
+ CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, dl, MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
+ SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, dl, MVT::i32);
+ SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
V2, SubReg2, V3, SubReg3 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
@@ -1621,11 +1632,12 @@ SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
SDValue V2, SDValue V3) {
SDLoc dl(V0.getNode());
- SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
- SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
- SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
+ SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
+ MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
+ SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, dl, MVT::i32);
+ SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
V2, SubReg2, V3, SubReg3 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
@@ -1635,11 +1647,12 @@ SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
SDValue V2, SDValue V3) {
SDLoc dl(V0.getNode());
- SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
- SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
- SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
+ SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, dl,
+ MVT::i32);
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
+ SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, dl, MVT::i32);
+ SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, dl, MVT::i32);
const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
V2, SubReg2, V3, SubReg3 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
@@ -1648,8 +1661,8 @@ SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
/// of a NEON VLD or VST instruction. The supported values depend on the
/// number of registers being loaded.
-SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
- bool is64BitVector) {
+SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, SDLoc dl,
+ unsigned NumVecs, bool is64BitVector) {
unsigned NumRegs = NumVecs;
if (!is64BitVector && NumVecs < 3)
NumRegs *= 2;
@@ -1664,7 +1677,7 @@ SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
else
Alignment = 0;
- return CurDAG->getTargetConstant(Alignment, MVT::i32);
+ return CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
}
static bool isVLDfixed(unsigned Opc)
@@ -1784,7 +1797,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
SDValue Chain = N->getOperand(0);
EVT VT = N->getValueType(0);
bool is64BitVector = VT.is64BitVector();
- Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
+ Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -1821,7 +1834,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
ResTys.push_back(MVT::i32);
ResTys.push_back(MVT::Other);
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDNode *VLd;
SmallVector<SDValue, 7> Ops;
@@ -1921,7 +1934,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
SDValue Chain = N->getOperand(0);
EVT VT = N->getOperand(Vec0Idx).getValueType();
bool is64BitVector = VT.is64BitVector();
- Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
+ Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -1948,7 +1961,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
ResTys.push_back(MVT::i32);
ResTys.push_back(MVT::Other);
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SmallVector<SDValue, 7> Ops;
@@ -2084,7 +2097,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
if (Alignment == 1)
Alignment = 0;
}
- Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
+ Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -2112,7 +2125,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
ResTys.push_back(MVT::i32);
ResTys.push_back(MVT::Other);
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SmallVector<SDValue, 8> Ops;
@@ -2142,7 +2155,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
}
Ops.push_back(SuperReg);
- Ops.push_back(getI32Imm(Lane));
+ Ops.push_back(getI32Imm(Lane, dl));
Ops.push_back(Pred);
Ops.push_back(Reg0);
Ops.push_back(Chain);
@@ -2197,7 +2210,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
if (Alignment == 1)
Alignment = 0;
}
- Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
+ Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
unsigned OpcodeIndex;
switch (VT.getSimpleVT().SimpleTy) {
@@ -2208,7 +2221,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
case MVT::v2i32: OpcodeIndex = 2; break;
}
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue SuperReg;
unsigned Opc = Opcodes[OpcodeIndex];
@@ -2279,7 +2292,7 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
Ops.push_back(N->getOperand(1));
Ops.push_back(RegSeq);
Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
- Ops.push_back(getAL(CurDAG)); // predicate
+ Ops.push_back(getAL(CurDAG, dl)); // predicate
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
return CurDAG->getMachineNode(Opc, dl, VT, Ops);
}
@@ -2292,6 +2305,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
unsigned Opc = isSigned
? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
: (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
+ SDLoc dl(N);
// For unsigned extracts, check for a shift right and mask
unsigned And_imm = 0;
@@ -2318,25 +2332,25 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
if (Subtarget->isThumb()) {
Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
SDValue Ops[] = { N->getOperand(0).getOperand(0),
- CurDAG->getTargetConstant(LSB, MVT::i32),
- getAL(CurDAG), Reg0, Reg0 };
+ CurDAG->getTargetConstant(LSB, dl, MVT::i32),
+ getAL(CurDAG, dl), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
// ARM models shift instructions as MOVsi with shifter operand.
ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
SDValue ShOpc =
- CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
+ CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), dl,
MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
- getAL(CurDAG), Reg0, Reg0 };
+ getAL(CurDAG, dl), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
}
SDValue Ops[] = { N->getOperand(0).getOperand(0),
- CurDAG->getTargetConstant(LSB, MVT::i32),
- CurDAG->getTargetConstant(Width, MVT::i32),
- getAL(CurDAG), Reg0 };
+ CurDAG->getTargetConstant(LSB, dl, MVT::i32),
+ CurDAG->getTargetConstant(Width, dl, MVT::i32),
+ getAL(CurDAG, dl), Reg0 };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
}
@@ -2357,9 +2371,9 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
return nullptr;
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0),
- CurDAG->getTargetConstant(LSB, MVT::i32),
- CurDAG->getTargetConstant(Width, MVT::i32),
- getAL(CurDAG), Reg0 };
+ CurDAG->getTargetConstant(LSB, dl, MVT::i32),
+ CurDAG->getTargetConstant(Width, dl, MVT::i32),
+ getAL(CurDAG, dl), Reg0 };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
}
@@ -2376,9 +2390,9 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0),
- CurDAG->getTargetConstant(LSB, MVT::i32),
- CurDAG->getTargetConstant(Width - 1, MVT::i32),
- getAL(CurDAG), Reg0 };
+ CurDAG->getTargetConstant(LSB, dl, MVT::i32),
+ CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
+ getAL(CurDAG, dl), Reg0 };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
@@ -2484,7 +2498,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDNode *ResNode;
if (Subtarget->isThumb()) {
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
@@ -2492,8 +2506,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
} else {
SDValue Ops[] = {
CPIdx,
- CurDAG->getTargetConstant(0, MVT::i32),
- getAL(CurDAG),
+ CurDAG->getTargetConstant(0, dl, MVT::i32),
+ getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getEntryNode()
};
@@ -2518,12 +2532,12 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (MFI->getObjectAlignment(FI) < 4)
MFI->setObjectAlignment(FI, 4);
return CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
- CurDAG->getTargetConstant(0, MVT::i32));
+ CurDAG->getTargetConstant(0, dl, MVT::i32));
} else {
unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
ARM::t2ADDri : ARM::ADDri);
- SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
+ SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
+ getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
}
@@ -2549,13 +2563,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
SDValue V = N->getOperand(0);
ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
- SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
+ SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
- SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
+ SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
} else {
- SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
+ SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
+ Reg0 };
return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
}
}
@@ -2565,13 +2580,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
SDValue V = N->getOperand(0);
ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
- SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
+ SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
- SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
+ SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
} else {
- SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
+ SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
+ Reg0 };
return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
}
}
@@ -2610,9 +2626,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
(N1CVal & 0xffffU) == 0xffffU &&
(N2CVal & 0xffffU) == 0x0U) {
SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
- MVT::i32);
+ dl, MVT::i32);
SDValue Ops[] = { N0.getOperand(0), Imm16,
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
+ getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(Opc, dl, VT, Ops);
}
}
@@ -2620,18 +2636,18 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VMOVRRD:
return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
- N->getOperand(0), getAL(CurDAG),
+ N->getOperand(0), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32));
case ISD::UMUL_LOHI: {
if (Subtarget->isThumb1Only())
break;
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
+ getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
} else {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
+ getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
ARM::UMULL : ARM::UMULLv5,
@@ -2643,11 +2659,11 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
+ getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
} else {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
+ getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
ARM::SMULL : ARM::SMULLv5,
@@ -2657,12 +2673,12 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ARMISD::UMLAL:{
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), getAL(CurDAG),
+ N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32)};
return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
}else{
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), getAL(CurDAG),
+ N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
@@ -2673,12 +2689,12 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ARMISD::SMLAL:{
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), getAL(CurDAG),
+ N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32)};
return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
}else{
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
- N->getOperand(3), getAL(CurDAG),
+ N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
@@ -2722,7 +2738,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
assert(N3.getOpcode() == ISD::Register);
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
- cast<ConstantSDNode>(N2)->getZExtValue()),
+ cast<ConstantSDNode>(N2)->getZExtValue()), dl,
MVT::i32);
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
@@ -2751,7 +2767,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case MVT::v4f32:
case MVT::v4i32: Opc = ARM::VZIPq32; break;
}
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
@@ -2771,7 +2787,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case MVT::v4f32:
case MVT::v4i32: Opc = ARM::VUZPq32; break;
}
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
@@ -2790,7 +2806,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case MVT::v4f32:
case MVT::v4i32: Opc = ARM::VTRNq32; break;
}
- SDValue Pred = getAL(CurDAG);
+ SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
@@ -3038,7 +3054,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
// Place arguments in the right order.
SmallVector<SDValue, 7> Ops;
Ops.push_back(MemAddr);
- Ops.push_back(getAL(CurDAG));
+ Ops.push_back(getAL(CurDAG, dl));
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
Ops.push_back(Chain);
SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
@@ -3054,7 +3070,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (isThumb)
Result = SDValue(Ld, 0);
else {
- SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
+ SDValue SubRegIdx =
+ CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
Result = SDValue(ResNode,0);
@@ -3066,7 +3083,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (isThumb)
Result = SDValue(Ld, 1);
else {
- SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
+ SDValue SubRegIdx =
+ CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
Result = SDValue(ResNode,0);
@@ -3098,7 +3116,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
// arm_strexd uses GPRPair.
Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
Ops.push_back(MemAddr);
- Ops.push_back(getAL(CurDAG));
+ Ops.push_back(getAL(CurDAG, dl));
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
Ops.push_back(Chain);
@@ -3290,7 +3308,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
Ops.push_back(N->getOperand(0));
Ops.push_back(N->getOperand(1));
- Ops.push_back(getAL(CurDAG)); // Predicate
+ Ops.push_back(getAL(CurDAG, dl)); // Predicate
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
}
@@ -3306,7 +3324,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SmallVector<SDValue, 6> Ops;
Ops.push_back(RegSeq);
Ops.push_back(N->getOperand(2));
- Ops.push_back(getAL(CurDAG)); // Predicate
+ Ops.push_back(getAL(CurDAG, dl)); // Predicate
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
}
@@ -3451,7 +3469,7 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
// Replace the current flag.
AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
- Flag, MVT::i32);
+ Flag, dl, MVT::i32);
// Add the new register node and skip the original two GPRs.
AsmNodeOperands.push_back(PairedReg);
// Skip the next two GPRs.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 2d3ea84287f..5f593d6f986 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1372,7 +1372,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
if (VA.getLocVT() == MVT::v2f64) {
SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
VA = RVLocs[++i]; // skip ahead to next loc
Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
@@ -1386,7 +1386,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
std::swap (Lo, Hi);
Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
}
} else {
Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
@@ -1417,7 +1417,7 @@ ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
return DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo::getStack(LocMemOffset),
@@ -1457,7 +1457,7 @@ SDValue
ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- SDLoc &dl = CLI.DL;
+ SDLoc &dl = CLI.DL;
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
@@ -1511,8 +1511,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
if (!isSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
- dl);
+ Chain = DAG.getCALLSEQ_START(Chain,
+ DAG.getIntPtrConstant(NumBytes, dl, true), dl);
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
@@ -1551,9 +1551,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (VA.needsCustom()) {
if (VA.getLocVT() == MVT::v2f64) {
SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
@@ -1598,7 +1598,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
unsigned int i, j;
for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
- SDValue Const = DAG.getConstant(4*i, MVT::i32);
+ SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
MachinePointerInfo(),
@@ -1617,14 +1617,15 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Flags.getByValSize() > 4*offset) {
unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
StkPtrOff);
- SDValue SrcOffset = DAG.getIntPtrConstant(4*offset);
+ SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
MVT::i32);
- SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32);
+ SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
+ MVT::i32);
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
@@ -1774,7 +1775,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
false, false, false, 0);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
+ SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
} else {
@@ -1849,8 +1850,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
if (!Ins.empty())
InFlag = Chain.getValue(1);
@@ -2165,7 +2166,8 @@ static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
report_fatal_error("Unsupported interrupt attribute. If present, value "
"must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
- RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false));
+ RetOps.insert(RetOps.begin() + 1,
+ DAG.getConstant(LROffset, DL, MVT::i32, false));
return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
}
@@ -2218,7 +2220,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
if (VA.getLocVT() == MVT::v2f64) {
// Extract the first half and return it in two registers.
SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
DAG.getVTList(MVT::i32, MVT::i32), Half);
@@ -2237,7 +2239,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
// Extract the 2nd half and fall through to handle it as an f64 value.
Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
}
// Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
// available.
@@ -2418,7 +2420,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
false, false, false, 0);
if (RelocM == Reloc::Static)
return Result;
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
+ SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
}
@@ -2442,7 +2444,7 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
false, false, false, 0);
SDValue Chain = Argument.getValue(1);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
+ SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
// call __tls_get_addr.
@@ -2494,7 +2496,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
false, false, false, 0);
Chain = Offset.getValue(1);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
+ SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
@@ -2648,14 +2650,14 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
false, false, false, 0);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
+ SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
}
SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
- SDValue Val = DAG.getConstant(0, MVT::i32);
+ SDValue Val = DAG.getConstant(0, dl, MVT::i32);
return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
Op.getOperand(1), Val);
@@ -2665,7 +2667,7 @@ SDValue
ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
- Op.getOperand(1), DAG.getConstant(0, MVT::i32));
+ Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
}
SDValue
@@ -2704,7 +2706,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
false, false, false, 0);
if (RelocM == Reloc::PIC_) {
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
+ SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
}
return Result;
@@ -2730,7 +2732,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
"Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
}
ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
@@ -2747,8 +2749,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
}
return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(Intrinsic::arm_dmb, MVT::i32),
- DAG.getConstant(Domain, MVT::i32));
+ DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
+ DAG.getConstant(Domain, dl, MVT::i32));
}
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
@@ -2774,8 +2776,8 @@ static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
}
return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
- Op.getOperand(1), DAG.getConstant(isRead, MVT::i32),
- DAG.getConstant(isData, MVT::i32));
+ Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
+ DAG.getConstant(isData, dl, MVT::i32));
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
@@ -2884,7 +2886,7 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
MachinePointerInfo(OrigArg, 4 * i), false, false, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
- DAG.getConstant(4, getPointerTy()));
+ DAG.getConstant(4, dl, getPointerTy()));
}
if (!MemOps.empty())
@@ -3010,9 +3012,11 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
}
ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
- ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
+ ArgValue, ArgValue1,
+ DAG.getIntPtrConstant(0, dl));
ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
- ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
+ ArgValue, ArgValue2,
+ DAG.getIntPtrConstant(1, dl));
} else
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
@@ -3158,28 +3162,28 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
case ISD::SETGE:
if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
- RHS = DAG.getConstant(C-1, MVT::i32);
+ RHS = DAG.getConstant(C - 1, dl, MVT::i32);
}
break;
case ISD::SETULT:
case ISD::SETUGE:
if (C != 0 && isLegalICmpImmediate(C-1)) {
CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
- RHS = DAG.getConstant(C-1, MVT::i32);
+ RHS = DAG.getConstant(C - 1, dl, MVT::i32);
}
break;
case ISD::SETLE:
case ISD::SETGT:
if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
- RHS = DAG.getConstant(C+1, MVT::i32);
+ RHS = DAG.getConstant(C + 1, dl, MVT::i32);
}
break;
case ISD::SETULE:
case ISD::SETUGT:
if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
- RHS = DAG.getConstant(C+1, MVT::i32);
+ RHS = DAG.getConstant(C + 1, dl, MVT::i32);
}
break;
}
@@ -3198,7 +3202,7 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
CompareType = ARMISD::CMPZ;
break;
}
- ARMcc = DAG.getConstant(CondCode, MVT::i32);
+ ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
}
@@ -3244,7 +3248,7 @@ ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
SDValue Value, OverflowCmp;
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
-
+ SDLoc dl(Op);
// FIXME: We are currently always generating CMPs because we don't support
// generating CMN through the backend. This is not as good as the natural
@@ -3255,24 +3259,24 @@ ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
default:
llvm_unreachable("Unknown overflow instruction!");
case ISD::SADDO:
- ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
- Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
- OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
+ ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
+ Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
break;
case ISD::UADDO:
- ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
- Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS);
- OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS);
+ ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
+ Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
break;
case ISD::SSUBO:
- ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32);
- Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
- OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
+ ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
+ Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
break;
case ISD::USUBO:
- ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32);
- Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS);
- OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS);
+ ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
+ Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
+ OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
break;
} // switch (...)
@@ -3290,16 +3294,17 @@ ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
SDValue ARMcc;
std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+ SDLoc dl(Op);
// We use 0 and 1 as false and true values.
- SDValue TVal = DAG.getConstant(1, MVT::i32);
- SDValue FVal = DAG.getConstant(0, MVT::i32);
+ SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
+ SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
EVT VT = Op.getValueType();
- SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal,
+ SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
ARMcc, CCR, OverflowCmp);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow);
+ return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
}
@@ -3322,7 +3327,7 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
EVT VT = Op.getValueType();
- return getCMOV(SDLoc(Op), VT, SelectTrue, SelectFalse, ARMcc, CCR,
+ return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
OverflowCmp, DAG);
}
@@ -3365,10 +3370,10 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
// undefined bits before doing a full-word comparison with zero.
Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
- DAG.getConstant(1, Cond.getValueType()));
+ DAG.getConstant(1, dl, Cond.getValueType()));
return DAG.getSelectCC(dl, Cond,
- DAG.getConstant(0, Cond.getValueType()),
+ DAG.getConstant(0, dl, Cond.getValueType()),
SelectTrue, SelectFalse, ISD::SETNE);
}
@@ -3469,7 +3474,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
// If softenSetCCOperands only returned one value, we should compare it to
// zero.
if (!RHS.getNode()) {
- RHS = DAG.getConstant(0, LHS.getValueType());
+ RHS = DAG.getConstant(0, dl, LHS.getValueType());
CC = ISD::SETNE;
}
}
@@ -3549,12 +3554,12 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
}
}
- SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
+ SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
if (CondCode2 != ARMCC::AL) {
- SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
+ SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
// FIXME: Needs another CMP because flag can have but one use.
SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
@@ -3587,7 +3592,7 @@ static bool canChangeToInt(SDValue Op, bool &SeenZero,
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
if (isFloatingPointZero(Op))
- return DAG.getConstant(0, MVT::i32);
+ return DAG.getConstant(0, SDLoc(Op), MVT::i32);
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
return DAG.getLoad(MVT::i32, SDLoc(Op),
@@ -3600,15 +3605,17 @@ static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
SDValue &RetVal1, SDValue &RetVal2) {
+ SDLoc dl(Op);
+
if (isFloatingPointZero(Op)) {
- RetVal1 = DAG.getConstant(0, MVT::i32);
- RetVal2 = DAG.getConstant(0, MVT::i32);
+ RetVal1 = DAG.getConstant(0, dl, MVT::i32);
+ RetVal2 = DAG.getConstant(0, dl, MVT::i32);
return;
}
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
SDValue Ptr = Ld->getBasePtr();
- RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op),
+ RetVal1 = DAG.getLoad(MVT::i32, dl,
Ld->getChain(), Ptr,
Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
@@ -3616,9 +3623,9 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
EVT PtrType = Ptr.getValueType();
unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
- SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op),
- PtrType, Ptr, DAG.getConstant(4, PtrType));
- RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op),
+ SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
+ PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
+ RetVal2 = DAG.getLoad(MVT::i32, dl,
Ld->getChain(), NewPtr,
Ld->getPointerInfo().getWithOffset(4),
Ld->isVolatile(), Ld->isNonTemporal(),
@@ -3653,7 +3660,7 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
else if (CC == ISD::SETUNE)
CC = ISD::SETNE;
- SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32);
+ SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
SDValue ARMcc;
if (LHS.getValueType() == MVT::f32) {
LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
@@ -3673,7 +3680,7 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
- ARMcc = DAG.getConstant(CondCode, MVT::i32);
+ ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
@@ -3697,7 +3704,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
// If softenSetCCOperands only returned one value, we should compare it to
// zero.
if (!RHS.getNode()) {
- RHS = DAG.getConstant(0, LHS.getValueType());
+ RHS = DAG.getConstant(0, dl, LHS.getValueType());
CC = ISD::SETNE;
}
}
@@ -3723,14 +3730,14 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
ARMCC::CondCodes CondCode, CondCode2;
FPCCToARMCC(CC, CondCode, CondCode2);
- SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
+ SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
if (CondCode2 != ARMCC::AL) {
- ARMcc = DAG.getConstant(CondCode2, MVT::i32);
+ ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
}
@@ -3746,10 +3753,10 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
EVT PTy = getPointerTy();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
- SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
+ SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), dl, PTy);
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
- Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
+ Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
if (Subtarget->isThumb2()) {
// Thumb2 uses a two-level jump. That is, it jumps into the jump table
@@ -3880,12 +3887,12 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
// Use VBSL to copy the sign bit.
unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
- DAG.getTargetConstant(EncodedVal, MVT::i32));
+ DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
if (VT == MVT::f64)
Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, dl, MVT::i32));
else /*if (VT == MVT::f32)*/
Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
if (SrcVT == MVT::f32) {
@@ -3893,16 +3900,16 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
if (VT == MVT::f64)
Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, dl, MVT::i32));
} else if (VT == MVT::f32)
Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, dl, MVT::i32));
Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
- MVT::i32);
+ dl, MVT::i32);
AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
@@ -3913,7 +3920,7 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
if (VT == MVT::f32) {
Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
} else {
Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
}
@@ -3928,8 +3935,8 @@ SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
// Or in the signbit with integer operations.
- SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32);
- SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32);
+ SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
+ SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
if (VT == MVT::f32) {
Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
@@ -3960,7 +3967,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset = DAG.getConstant(4, MVT::i32);
+ SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
MachinePointerInfo(), false, false, false, 0);
@@ -4022,9 +4029,9 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
// Turn i64->f64 into VMOVDRR.
if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, DstVT,
DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
}
@@ -4056,7 +4063,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
assert(VT.isVector() && "Expected a vector type");
// The canonical modified immediate encoding of a zero vector is....0!
- SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
+ SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
@@ -4079,17 +4086,17 @@ SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, MVT::i32), ShAmt);
+ DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, MVT::i32));
+ DAG.getConstant(VTBits, dl, MVT::i32));
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
- ARMcc, DAG, dl);
+ SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
+ ISD::SETGE, ARMcc, DAG, dl);
SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
CCR, Cmp);
@@ -4113,17 +4120,17 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
assert(Op.getOpcode() == ISD::SHL_PARTS);
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, MVT::i32), ShAmt);
+ DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, MVT::i32));
+ DAG.getConstant(VTBits, dl, MVT::i32));
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
- ARMcc, DAG, dl);
+ SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
+ ISD::SETGE, ARMcc, DAG, dl);
SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
CCR, Cmp);
@@ -4140,14 +4147,14 @@ SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// so that the shift + and get folded into a bitfield extract.
SDLoc dl(Op);
SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
- DAG.getConstant(Intrinsic::arm_get_fpscr,
+ DAG.getConstant(Intrinsic::arm_get_fpscr, dl,
MVT::i32));
SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
- DAG.getConstant(1U << 22, MVT::i32));
+ DAG.getConstant(1U << 22, dl, MVT::i32));
SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
- DAG.getConstant(22, MVT::i32));
+ DAG.getConstant(22, dl, MVT::i32));
return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
- DAG.getConstant(3, MVT::i32));
+ DAG.getConstant(3, dl, MVT::i32));
}
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
@@ -4205,10 +4212,10 @@ static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
if (VT.is64BitVector()) {
SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
} else {
SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
- BitCounts, DAG.getIntPtrConstant(0));
+ BitCounts, DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
}
}
@@ -4247,10 +4254,10 @@ static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
if (VT.is64BitVector()) {
SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
} else {
SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
}
}
@@ -4284,7 +4291,8 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
// Left shifts translate directly to the vshiftu intrinsic.
if (N->getOpcode() == ISD::SHL)
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
+ DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl,
+ MVT::i32),
N->getOperand(0), N->getOperand(1));
assert((N->getOpcode() == ISD::SRA ||
@@ -4301,7 +4309,7 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
Intrinsic::arm_neon_vshifts :
Intrinsic::arm_neon_vshiftu);
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(vshiftInt, MVT::i32),
+ DAG.getConstant(vshiftInt, dl, MVT::i32),
N->getOperand(0), NegatedCount);
}
@@ -4327,9 +4335,9 @@ static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
// Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, dl, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
// First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
// captures the result into a carry flag.
@@ -4482,7 +4490,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
/// operand (e.g., VMOV). If so, return the encoded value.
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
unsigned SplatBitSize, SelectionDAG &DAG,
- EVT &VT, bool is128Bits, NEONModImmType type) {
+ SDLoc dl, EVT &VT, bool is128Bits,
+ NEONModImmType type) {
unsigned OpCmode, Imm;
// SplatBitSize is set to the smallest size that splats the vector, so a
@@ -4612,7 +4621,7 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
}
unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
- return DAG.getTargetConstant(EncodedVal, MVT::i32);
+ return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
}
SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
@@ -4642,11 +4651,11 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
// It's a float and we are trying to use NEON operations where
// possible. Lower it to a splat followed by an extract.
SDLoc DL(Op);
- SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
+ SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
NewVal);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
}
// The rest of our options are NEON only, make sure that's allowed before
@@ -4664,8 +4673,8 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
return SDValue();
// Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
- SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
- false, VMOVModImm);
+ SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
+ VMovVT, false, VMOVModImm);
if (NewVal != SDValue()) {
SDLoc DL(Op);
SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
@@ -4677,11 +4686,11 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
VecConstant);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
}
// Finally, try a VMVN.i32
- NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT,
+ NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
false, VMVNModImm);
if (NewVal != SDValue()) {
SDLoc DL(Op);
@@ -4694,7 +4703,7 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
VecConstant);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
}
return SDValue();
@@ -4957,10 +4966,10 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
if (ST->isThumb1Only()) {
if (Val <= 255 || ~Val <= 255)
- return DAG.getConstant(Val, MVT::i32);
+ return DAG.getConstant(Val, dl, MVT::i32);
} else {
if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
- return DAG.getConstant(Val, MVT::i32);
+ return DAG.getConstant(Val, dl, MVT::i32);
}
return SDValue();
}
@@ -4982,7 +4991,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
EVT VmovVT;
SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VmovVT, VT.is128BitVector(),
+ DAG, dl, VmovVT, VT.is128BitVector(),
VMOVModImm);
if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
@@ -4993,7 +5002,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
uint64_t NegatedImm = (~SplatBits).getZExtValue();
Val = isNEONModifiedImm(NegatedImm,
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VmovVT, VT.is128BitVector(),
+ DAG, dl, VmovVT, VT.is128BitVector(),
VMVNModImm);
if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
@@ -5004,7 +5013,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
int ImmVal = ARM_AM::getFP32Imm(SplatBits);
if (ImmVal != -1) {
- SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
+ SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
}
}
@@ -5086,8 +5095,8 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
VT.getVectorNumElements();
N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
- Value, DAG.getConstant(index, MVT::i32)),
- DAG.getConstant(index, MVT::i32));
+ Value, DAG.getConstant(index, dl, MVT::i32)),
+ DAG.getConstant(index, dl, MVT::i32));
} else
N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
Value->getOperand(0), Value->getOperand(1));
@@ -5103,7 +5112,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SmallVector<SDValue, 3> Ops;
Ops.push_back(N);
Ops.push_back(Op.getOperand(I));
- Ops.push_back(DAG.getConstant(I, MVT::i32));
+ Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
}
}
@@ -5167,7 +5176,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue V = Op.getOperand(i);
if (V.getOpcode() == ISD::UNDEF)
continue;
- SDValue LaneIdx = DAG.getConstant(i, MVT::i32);
+ SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
}
return Vec;
@@ -5270,24 +5279,25 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
VEXTOffsets[i] = NumElts;
ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
SourceVecs[i],
- DAG.getIntPtrConstant(NumElts));
+ DAG.getIntPtrConstant(NumElts, dl));
} else if (MaxElts[i] < NumElts) {
// The extraction can just take the first half
VEXTOffsets[i] = 0;
ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
SourceVecs[i],
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
} else {
// An actual VEXT is needed
VEXTOffsets[i] = MinElts[i];
SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
SourceVecs[i],
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
SourceVecs[i],
- DAG.getIntPtrConstant(NumElts));
+ DAG.getIntPtrConstant(NumElts, dl));
ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2,
- DAG.getConstant(VEXTOffsets[i], MVT::i32));
+ DAG.getConstant(VEXTOffsets[i], dl,
+ MVT::i32));
}
}
@@ -5421,13 +5431,13 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
case OP_VDUP2:
case OP_VDUP3:
return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
- OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
+ OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
case OP_VEXT1:
case OP_VEXT2:
case OP_VEXT3:
return DAG.getNode(ARMISD::VEXT, dl, VT,
OpLHS, OpRHS,
- DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
+ DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
case OP_VUZPL:
case OP_VUZPR:
return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
@@ -5454,7 +5464,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
SmallVector<SDValue, 8> VTBLMask;
for (ArrayRef<int>::iterator
I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
- VTBLMask.push_back(DAG.getConstant(*I, MVT::i32));
+ VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
if (V2.getNode()->getOpcode() == ISD::UNDEF)
return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
@@ -5478,7 +5488,7 @@ static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
// into the bottom double word. The v8i16 case is similar.
unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
- DAG.getConstant(ExtractNum, MVT::i32));
+ DAG.getConstant(ExtractNum, DL, MVT::i32));
}
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
@@ -5522,7 +5532,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
}
return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
- DAG.getConstant(Lane, MVT::i32));
+ DAG.getConstant(Lane, dl, MVT::i32));
}
bool ReverseVEXT;
@@ -5531,7 +5541,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (ReverseVEXT)
std::swap(V1, V2);
return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
- DAG.getConstant(Imm, MVT::i32));
+ DAG.getConstant(Imm, dl, MVT::i32));
}
if (isVREVMask(ShuffleMask, VT, 64))
@@ -5544,7 +5554,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (V2->getOpcode() == ISD::UNDEF &&
isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
- DAG.getConstant(Imm, MVT::i32));
+ DAG.getConstant(Imm, dl, MVT::i32));
}
// Check for Neon shuffles that modify both input vectors in place.
@@ -5612,7 +5622,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
ShuffleMask[i] < (int)NumElts ? V1 : V2,
DAG.getConstant(ShuffleMask[i] & (NumElts-1),
- MVT::i32)));
+ dl, MVT::i32)));
}
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
@@ -5667,11 +5677,11 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
if (Op0.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
if (Op1.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
- DAG.getIntPtrConstant(1));
+ DAG.getIntPtrConstant(1, dl));
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
}
@@ -5843,14 +5853,15 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
unsigned NumElts = VT.getVectorNumElements();
MVT TruncVT = MVT::getIntegerVT(EltSize);
SmallVector<SDValue, 8> Ops;
+ SDLoc dl(N);
for (unsigned i = 0; i != NumElts; ++i) {
ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
const APInt &CInt = C->getAPIntValue();
// Element types smaller than 32 bits are not legal, so use i32 elements.
// The values are implicitly truncated so sext vs. zext doesn't matter.
- Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32));
+ Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
}
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N),
+ return DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::getVectorVT(TruncVT, NumElts), Ops);
}
@@ -5963,14 +5974,15 @@ LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
// Get reciprocal estimate.
// float4 recip = vrecpeq_f32(yf);
Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
- DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y);
+ DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
+ Y);
// Because char has a smaller range than uchar, we can actually get away
// without any newton steps. This requires that we use a weird bias
// of 0xb000, however (again, this has been exhaustively tested).
// float4 result = as_float4(as_int4(xf*recip) + 0xb000);
X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
- Y = DAG.getConstant(0xb000, MVT::i32);
+ Y = DAG.getConstant(0xb000, dl, MVT::i32);
Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y);
X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
@@ -5995,9 +6007,10 @@ LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) {
// float4 recip = vrecpeq_f32(yf);
// recip *= vrecpsq_f32(yf, recip);
N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
- DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1);
+ DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
+ N1);
N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
- DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
+ DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
N1, N2);
N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
// Because short has a smaller range than ushort, we can actually get away
@@ -6006,7 +6019,7 @@ LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) {
// float4 result = as_float4(as_int4(xf*recip) + 0x89);
N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
- N1 = DAG.getConstant(0x89, MVT::i32);
+ N1 = DAG.getConstant(0x89, dl, MVT::i32);
N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
@@ -6032,13 +6045,13 @@ static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, dl));
N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, dl));
N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
@@ -6067,13 +6080,13 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, dl));
N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, dl));
N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
@@ -6082,7 +6095,8 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
N0 = LowerCONCAT_VECTORS(N0, DAG);
N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
- DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32),
+ DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
+ MVT::i32),
N0);
return N0;
}
@@ -6100,13 +6114,14 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
// recip *= vrecpsq_f32(yf, recip);
// recip *= vrecpsq_f32(yf, recip);
N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
- DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1);
+ DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
+ BN1);
N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
- DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
+ DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
BN1, N2);
N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
- DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32),
+ DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
BN1, N2);
N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
// Simply multiplying by the reciprocal estimate can leave us a few ulps
@@ -6115,7 +6130,7 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
// float4 result = as_float4(as_int4(xf*recip) + 2);
N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
- N1 = DAG.getConstant(2, MVT::i32);
+ N1 = DAG.getConstant(2, dl, MVT::i32);
N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
@@ -6202,7 +6217,7 @@ SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
// Address of cos field.
SDValue Add = DAG.getNode(ISD::ADD, dl, getPointerTy(), SRet,
- DAG.getIntPtrConstant(ArgVT.getStoreSize()));
+ DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
SDValue LoadCos = DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add,
MachinePointerInfo(), false, false, false, 0);
@@ -6232,12 +6247,12 @@ static void ReplaceREADCYCLECOUNTER(SDNode *N,
// Under Power Management extensions, the cycle-count is:
// mrc p15, #0, <Rt>, c9, c13, #0
SDValue Ops[] = { N->getOperand(0), // Chain
- DAG.getConstant(Intrinsic::arm_mrc, MVT::i32),
- DAG.getConstant(15, MVT::i32),
- DAG.getConstant(0, MVT::i32),
- DAG.getConstant(9, MVT::i32),
- DAG.getConstant(13, MVT::i32),
- DAG.getConstant(0, MVT::i32)
+ DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
+ DAG.getConstant(15, DL, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(9, DL, MVT::i32),
+ DAG.getConstant(13, DL, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32)
};
Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
@@ -6247,13 +6262,13 @@ static void ReplaceREADCYCLECOUNTER(SDNode *N,
// Intrinsic is defined to return 0 on unsupported platforms. Technically
// there are older ARM CPUs that have implementation-specific ways of
// obtaining this information (FIXME!).
- Cycles32 = DAG.getConstant(0, MVT::i32);
+ Cycles32 = DAG.getConstant(0, DL, MVT::i32);
OutChain = DAG.getEntryNode();
}
SDValue Cycles64 = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
- Cycles32, DAG.getConstant(0, MVT::i32));
+ Cycles32, DAG.getConstant(0, DL, MVT::i32));
Results.push_back(Cycles64);
Results.push_back(OutChain);
}
@@ -7660,6 +7675,7 @@ static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
return false;
// Fall through.
case ISD::SIGN_EXTEND: {
+ SDLoc dl(N);
EVT VT = N->getValueType(0);
CC = N->getOperand(0);
if (CC.getValueType() != MVT::i1)
@@ -7668,12 +7684,13 @@ static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
if (AllOnes)
// When looking for an AllOnes constant, N is an sext, and the 'other'
// value is 0.
- OtherOp = DAG.getConstant(0, VT);
+ OtherOp = DAG.getConstant(0, dl, VT);
else if (N->getOpcode() == ISD::ZERO_EXTEND)
// When looking for a 0 constant, N can be zext or sext.
- OtherOp = DAG.getConstant(1, VT);
+ OtherOp = DAG.getConstant(1, dl, VT);
else
- OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
+ OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
+ VT);
return true;
}
}
@@ -7812,9 +7829,11 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
SelectionDAG &DAG = DCI.DAG;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDLoc dl(N);
+
// Build operand list.
SmallVector<SDValue, 8> Ops;
- Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls,
+ Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
TLI.getPointerTy()));
// Input is the vector.
@@ -7833,9 +7852,9 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
llvm_unreachable("Invalid vector element type for padd optimization.");
}
- SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), widenType, Ops);
+ SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
- return DAG.getNode(ExtOp, SDLoc(N), VT, tmp);
+ return DAG.getNode(ExtOp, dl, VT, tmp);
}
static SDValue findMUL_LOHI(SDValue V) {
@@ -8129,14 +8148,14 @@ static SDValue PerformMULCombine(SDNode *N,
V,
DAG.getNode(ISD::SHL, DL, VT,
V,
- DAG.getConstant(Log2_32(MulAmt - 1),
+ DAG.getConstant(Log2_32(MulAmt - 1), DL,
MVT::i32)));
} else if (isPowerOf2_32(MulAmt + 1)) {
// (mul x, 2^N - 1) => (sub (shl x, N), x)
Res = DAG.getNode(ISD::SUB, DL, VT,
DAG.getNode(ISD::SHL, DL, VT,
V,
- DAG.getConstant(Log2_32(MulAmt + 1),
+ DAG.getConstant(Log2_32(MulAmt + 1), DL,
MVT::i32)),
V);
} else
@@ -8149,7 +8168,7 @@ static SDValue PerformMULCombine(SDNode *N,
V,
DAG.getNode(ISD::SHL, DL, VT,
V,
- DAG.getConstant(Log2_32(MulAmtAbs + 1),
+ DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
MVT::i32)));
} else if (isPowerOf2_32(MulAmtAbs - 1)) {
// (mul x, -(2^N + 1)) => - (add (shl x, N), x)
@@ -8157,10 +8176,10 @@ static SDValue PerformMULCombine(SDNode *N,
V,
DAG.getNode(ISD::SHL, DL, VT,
V,
- DAG.getConstant(Log2_32(MulAmtAbs-1),
+ DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
MVT::i32)));
Res = DAG.getNode(ISD::SUB, DL, VT,
- DAG.getConstant(0, MVT::i32),Res);
+ DAG.getConstant(0, DL, MVT::i32), Res);
} else
return SDValue();
@@ -8168,7 +8187,7 @@ static SDValue PerformMULCombine(SDNode *N,
if (ShiftAmt != 0)
Res = DAG.getNode(ISD::SHL, DL, VT,
- Res, DAG.getConstant(ShiftAmt, MVT::i32));
+ Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
@@ -8197,7 +8216,7 @@ static SDValue PerformANDCombine(SDNode *N,
EVT VbicVT;
SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VbicVT, VT.is128BitVector(),
+ DAG, dl, VbicVT, VT.is128BitVector(),
OtherModImm);
if (Val.getNode()) {
SDValue Input =
@@ -8240,7 +8259,7 @@ static SDValue PerformORCombine(SDNode *N,
EVT VorrVT;
SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
SplatUndef.getZExtValue(), SplatBitSize,
- DAG, VorrVT, VT.is128BitVector(),
+ DAG, dl, VorrVT, VT.is128BitVector(),
OtherModImm);
if (Val.getNode()) {
SDValue Input =
@@ -8344,8 +8363,8 @@ static SDValue PerformORCombine(SDNode *N,
Val >>= countTrailingZeros(~Mask);
Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
- DAG.getConstant(Val, MVT::i32),
- DAG.getConstant(Mask, MVT::i32));
+ DAG.getConstant(Val, DL, MVT::i32),
+ DAG.getConstant(Mask, DL, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
@@ -8370,9 +8389,9 @@ static SDValue PerformORCombine(SDNode *N,
// 2a
unsigned amt = countTrailingZeros(Mask2);
Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
- DAG.getConstant(amt, MVT::i32));
+ DAG.getConstant(amt, DL, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
- DAG.getConstant(Mask, MVT::i32));
+ DAG.getConstant(Mask, DL, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
return SDValue();
@@ -8386,9 +8405,9 @@ static SDValue PerformORCombine(SDNode *N,
// 2b
unsigned lsb = countTrailingZeros(Mask);
Res = DAG.getNode(ISD::SRL, DL, VT, N00,
- DAG.getConstant(lsb, MVT::i32));
+ DAG.getConstant(lsb, DL, MVT::i32));
Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
- DAG.getConstant(Mask2, MVT::i32));
+ DAG.getConstant(Mask2, DL, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
return SDValue();
@@ -8407,7 +8426,7 @@ static SDValue PerformORCombine(SDNode *N,
return SDValue();
Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
- DAG.getConstant(~Mask, MVT::i32));
+ DAG.getConstant(~Mask, DL, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
@@ -8488,7 +8507,7 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
LD->getAlignment());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
- DAG.getConstant(4, MVT::i32));
+ DAG.getConstant(4, DL, MVT::i32));
SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr,
LD->getPointerInfo(), LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(),
@@ -8654,7 +8673,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
// Make the DAGCombiner fold the bitcasts.
DCI.AddToWorklist(V.getNode());
}
- SDValue LaneIdx = DAG.getConstant(Idx, MVT::i32);
+ SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
}
Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
@@ -8756,6 +8775,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
SDValue Addr = N->getOperand(AddrOpIdx);
MemSDNode *MemN = cast<MemSDNode>(N);
+ SDLoc dl(N);
// Search for a use of the address operand that is an increment.
for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
@@ -8917,16 +8937,16 @@ static SDValue CombineBaseUpdate(SDNode *N,
}
// For all node types, the alignment operand is always the last one.
- Ops.push_back(DAG.getConstant(Alignment, MVT::i32));
+ Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
// If this is a non-standard-aligned STORE, the penultimate operand is the
// stored value. Bitcast it to the aligned type.
if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
SDValue &StVal = Ops[Ops.size()-2];
- StVal = DAG.getNode(ISD::BITCAST, SDLoc(N), AlignedVecTy, StVal);
+ StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
}
- SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
+ SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys,
Ops, AlignedVecTy,
MemN->getMemOperand());
@@ -8939,7 +8959,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
// value. Bitcast it to the expected result type.
if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
SDValue &LdVal = NewResults[0];
- LdVal = DAG.getNode(ISD::BITCAST, SDLoc(N), VecTy, LdVal);
+ LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
}
NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
@@ -9152,7 +9172,7 @@ static SDValue PerformSTORECombine(SDNode *N,
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
- SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
+ SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, DL,
TLI.getPointerTy());
SDValue BasePtr = St->getBasePtr();
@@ -9161,7 +9181,7 @@ static SDValue PerformSTORECombine(SDNode *N,
for (unsigned I = 0; I < E; I++) {
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
StoreType, ShuffWide,
- DAG.getIntPtrConstant(I));
+ DAG.getIntPtrConstant(I, DL));
SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
St->getPointerInfo(), St->isVolatile(),
St->isNonTemporal(), St->getAlignment());
@@ -9189,7 +9209,7 @@ static SDValue PerformSTORECombine(SDNode *N,
St->isNonTemporal(), St->getAlignment());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
- DAG.getConstant(4, MVT::i32));
+ DAG.getConstant(4, DL, MVT::i32));
return DAG.getStore(NewST1.getValue(0), DL,
StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
OffsetPtr, St->getPointerInfo(), St->isVolatile(),
@@ -9296,15 +9316,17 @@ static SDValue PerformVCVTCombine(SDNode *N,
return SDValue();
}
+ SDLoc dl(N);
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
Intrinsic::arm_neon_vcvtfp2fxu;
- SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
+ SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
- DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
- DAG.getConstant(Log2_64(C), MVT::i32));
+ DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
+ N0,
+ DAG.getConstant(Log2_64(C), dl, MVT::i32));
if (IntTy.getSizeInBits() < FloatTy.getSizeInBits())
- FixConv = DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), FixConv);
+ FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
return FixConv;
}
@@ -9346,19 +9368,20 @@ static SDValue PerformVDIVCombine(SDNode *N,
return SDValue();
}
+ SDLoc dl(N);
SDValue ConvInput = Op.getOperand(0);
unsigned NumLanes = Op.getValueType().getVectorNumElements();
if (IntTy.getSizeInBits() < FloatTy.getSizeInBits())
ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
- SDLoc(N), NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
+ dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
ConvInput);
unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
Intrinsic::arm_neon_vcvtfxu2fp;
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N),
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
Op.getValueType(),
- DAG.getConstant(IntrinsicOpcode, MVT::i32),
- ConvInput, DAG.getConstant(Log2_64(C), MVT::i32));
+ DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
+ ConvInput, DAG.getConstant(Log2_64(C), dl, MVT::i32));
}
/// Getvshiftimm - Check if this is a valid build_vector for the immediate
@@ -9519,8 +9542,9 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
VShiftOpc = ARMISD::VQRSHRNsu; break;
}
- return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
- N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
+ SDLoc dl(N);
+ return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
+ N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
}
case Intrinsic::arm_neon_vshiftins: {
@@ -9536,9 +9560,10 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
}
- return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
+ SDLoc dl(N);
+ return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
N->getOperand(1), N->getOperand(2),
- DAG.getConstant(Cnt, MVT::i32));
+ DAG.getConstant(Cnt, dl, MVT::i32));
}
case Intrinsic::arm_neon_vqrshifts:
@@ -9583,9 +9608,11 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
default: llvm_unreachable("unexpected shift opcode");
case ISD::SHL:
- if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
- return DAG.getNode(ARMISD::VSHL, SDLoc(N), VT, N->getOperand(0),
- DAG.getConstant(Cnt, MVT::i32));
+ if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
+ SDLoc dl(N);
+ return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0),
+ DAG.getConstant(Cnt, dl, MVT::i32));
+ }
break;
case ISD::SRA:
@@ -9593,8 +9620,9 @@ static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
ARMISD::VSHRs : ARMISD::VSHRu);
- return DAG.getNode(VShiftOpc, SDLoc(N), VT, N->getOperand(0),
- DAG.getConstant(Cnt, MVT::i32));
+ SDLoc dl(N);
+ return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
+ DAG.getConstant(Cnt, dl, MVT::i32));
}
}
return SDValue();
@@ -10225,7 +10253,7 @@ static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
if (RHSC < 0 && RHSC > -256) {
assert(Ptr->getOpcode() == ISD::ADD);
isInc = false;
- Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
+ Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
return true;
}
}
@@ -10239,7 +10267,7 @@ static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
if (RHSC < 0 && RHSC > -0x1000) {
assert(Ptr->getOpcode() == ISD::ADD);
isInc = false;
- Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
+ Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
Base = Ptr->getOperand(0);
return true;
}
@@ -10282,11 +10310,11 @@ static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
assert(Ptr->getOpcode() == ISD::ADD);
isInc = false;
- Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
+ Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
return true;
} else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
isInc = Ptr->getOpcode() == ISD::ADD;
- Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
+ Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
return true;
}
}
@@ -10734,7 +10762,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
}
return;
}
- Result = DAG.getTargetConstant(CVal, Op.getValueType());
+ Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
break;
}
@@ -10802,7 +10830,7 @@ ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue Size = Op.getOperand(1);
SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, DL, MVT::i32));
SDValue Flag;
Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index 52f35554995..bda6c944ce3 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -323,12 +323,12 @@ class RegConstraint<string C> {
// imm_neg_XFORM - Return the negation of an i32 immediate value.
def imm_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(-(int)N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
// imm_not_XFORM - Return the complement of a i32 immediate value.
def imm_not_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(~(int)N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
/// imm16_31 predicate - True if the 32-bit immediate is in the range [16,31].
@@ -343,7 +343,8 @@ def sext_16_node : PatLeaf<(i32 GPR:$a), [{
/// Split a 32-bit immediate into two 16 bit parts.
def hi16 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
+ return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, SDLoc(N),
+ MVT::i32);
}]>;
def lo16AllZero : PatLeaf<(i32 imm), [{
@@ -485,10 +486,10 @@ def neon_vcvt_imm32 : Operand<i32> {
def rot_imm_XFORM: SDNodeXForm<imm, [{
switch (N->getZExtValue()){
default: llvm_unreachable(nullptr);
- case 0: return CurDAG->getTargetConstant(0, MVT::i32);
- case 8: return CurDAG->getTargetConstant(1, MVT::i32);
- case 16: return CurDAG->getTargetConstant(2, MVT::i32);
- case 24: return CurDAG->getTargetConstant(3, MVT::i32);
+ case 0: return CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
+ case 8: return CurDAG->getTargetConstant(1, SDLoc(N), MVT::i32);
+ case 16: return CurDAG->getTargetConstant(2, SDLoc(N), MVT::i32);
+ case 24: return CurDAG->getTargetConstant(3, SDLoc(N), MVT::i32);
}
}]>;
def RotImmAsmOperand : AsmOperandClass {
@@ -767,7 +768,8 @@ def bf_inv_mask_imm : Operand<i32>,
}
def imm1_32_XFORM: SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, MVT::i32);
+ return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, SDLoc(N),
+ MVT::i32);
}]>;
def Imm1_32AsmOperand: AsmOperandClass { let Name = "Imm1_32"; }
def imm1_32 : Operand<i32>, PatLeaf<(imm), [{
@@ -780,7 +782,8 @@ def imm1_32 : Operand<i32>, PatLeaf<(imm), [{
}
def imm1_16_XFORM: SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, MVT::i32);
+ return CurDAG->getTargetConstant((int)N->getZExtValue() - 1, SDLoc(N),
+ MVT::i32);
}]>;
def Imm1_16AsmOperand: AsmOperandClass { let Name = "Imm1_16"; }
def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }],
diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td
index a6a07a8f02e..f035d6150ec 100644
--- a/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -2393,36 +2393,41 @@ def : Pat<(byte_alignedstore (v2f64 QPR:$value), addrmode6:$addr),
// Extract D sub-registers of Q registers.
def DSubReg_i8_reg : SDNodeXForm<imm, [{
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/8, MVT::i32);
+ return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/8, SDLoc(N),
+ MVT::i32);
}]>;
def DSubReg_i16_reg : SDNodeXForm<imm, [{
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/4, MVT::i32);
+ return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/4, SDLoc(N),
+ MVT::i32);
}]>;
def DSubReg_i32_reg : SDNodeXForm<imm, [{
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/2, MVT::i32);
+ return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/2, SDLoc(N),
+ MVT::i32);
}]>;
def DSubReg_f64_reg : SDNodeXForm<imm, [{
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue(), SDLoc(N),
+ MVT::i32);
}]>;
// Extract S sub-registers of Q/D registers.
def SSubReg_f32_reg : SDNodeXForm<imm, [{
assert(ARM::ssub_3 == ARM::ssub_0+3 && "Unexpected subreg numbering");
- return CurDAG->getTargetConstant(ARM::ssub_0 + N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(ARM::ssub_0 + N->getZExtValue(), SDLoc(N),
+ MVT::i32);
}]>;
// Translate lane numbers from Q registers to D subregs.
def SubReg_i8_lane : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue() & 7, SDLoc(N), MVT::i32);
}]>;
def SubReg_i16_lane : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue() & 3, SDLoc(N), MVT::i32);
}]>;
def SubReg_i32_lane : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue() & 1, SDLoc(N), MVT::i32);
}]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb.td b/llvm/lib/Target/ARM/ARMInstrThumb.td
index d0ade771b5e..1c94fe4ba21 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -21,7 +21,7 @@ def ARMtcall : SDNode<"ARMISD::tCALL", SDT_ARMcall,
def imm_sr_XFORM: SDNodeXForm<imm, [{
unsigned Imm = N->getZExtValue();
- return CurDAG->getTargetConstant((Imm == 32 ? 0 : Imm), MVT::i32);
+ return CurDAG->getTargetConstant((Imm == 32 ? 0 : Imm), SDLoc(N), MVT::i32);
}]>;
def ThumbSRImmAsmOperand: AsmOperandClass { let Name = "ImmThumbSR"; }
def imm_sr : Operand<i32>, PatLeaf<(imm), [{
@@ -33,7 +33,8 @@ def imm_sr : Operand<i32>, PatLeaf<(imm), [{
}
def imm_comp_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
+ return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), SDLoc(N),
+ MVT::i32);
}]>;
def imm0_7_neg : PatLeaf<(i32 imm), [{
@@ -61,12 +62,12 @@ def thumb_immshifted : PatLeaf<(imm), [{
def thumb_immshifted_val : SDNodeXForm<imm, [{
unsigned V = ARM_AM::getThumbImmNonShiftedVal((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
+ return CurDAG->getTargetConstant(V, SDLoc(N), MVT::i32);
}]>;
def thumb_immshifted_shamt : SDNodeXForm<imm, [{
unsigned V = ARM_AM::getThumbImmValShift((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
+ return CurDAG->getTargetConstant(V, SDLoc(N), MVT::i32);
}]>;
// Scaled 4 immediate.
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 103ee002cac..d31eb3d04e3 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -54,12 +54,14 @@ def t2_so_reg : Operand<i32>, // reg imm
// t2_so_imm_not_XFORM - Return the complement of a t2_so_imm value
def t2_so_imm_not_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
+ return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), SDLoc(N),
+ MVT::i32);
}]>;
// t2_so_imm_neg_XFORM - Return the negation of a t2_so_imm value
def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-((int)N->getZExtValue()), MVT::i32);
+ return CurDAG->getTargetConstant(-((int)N->getZExtValue()), SDLoc(N),
+ MVT::i32);
}]>;
// so_imm_notSext_XFORM - Return a so_imm value packed into the format
@@ -68,7 +70,7 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
def t2_so_imm_notSext16_XFORM : SDNodeXForm<imm, [{
APInt apIntN = N->getAPIntValue();
unsigned N16bitSignExt = apIntN.trunc(16).sext(32).getZExtValue();
- return CurDAG->getTargetConstant(~N16bitSignExt, MVT::i32);
+ return CurDAG->getTargetConstant(~N16bitSignExt, SDLoc(N), MVT::i32);
}]>;
// t2_so_imm - Match a 32-bit immediate operand, which is an
diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index afff01692a2..e83f8c85063 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -37,7 +37,7 @@ def vfp_f32imm : Operand<f32>,
}], SDNodeXForm<fpimm, [{
APFloat InVal = N->getValueAPF();
uint32_t enc = ARM_AM::getFP32Imm(InVal);
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>> {
let PrintMethod = "printFPImmOperand";
let ParserMatchClass = FPImmOperand;
@@ -49,7 +49,7 @@ def vfp_f64imm : Operand<f64>,
}], SDNodeXForm<fpimm, [{
APFloat InVal = N->getValueAPF();
uint32_t enc = ARM_AM::getFP64Imm(InVal);
- return CurDAG->getTargetConstant(enc, MVT::i32);
+ return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
}]>> {
let PrintMethod = "printFPImmOperand";
let ParserMatchClass = FPImmOperand;
diff --git a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 636205fc9e3..06bde40e037 100644
--- a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -67,7 +67,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
- DAG.getConstant(SrcOff, MVT::i32)),
+ DAG.getConstant(SrcOff, dl, MVT::i32)),
SrcPtrInfo.getWithOffset(SrcOff), isVolatile,
false, false, 0);
TFOps[i] = Loads[i].getValue(1);
@@ -80,7 +80,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
- DAG.getConstant(DstOff, MVT::i32)),
+ DAG.getConstant(DstOff, dl, MVT::i32)),
DstPtrInfo.getWithOffset(DstOff),
isVolatile, false, 0);
DstOff += VTSize;
@@ -108,7 +108,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
- DAG.getConstant(SrcOff, MVT::i32)),
+ DAG.getConstant(SrcOff, dl, MVT::i32)),
SrcPtrInfo.getWithOffset(SrcOff),
false, false, false, 0);
TFOps[i] = Loads[i].getValue(1);
@@ -132,7 +132,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
- DAG.getConstant(DstOff, MVT::i32)),
+ DAG.getConstant(DstOff, dl, MVT::i32)),
DstPtrInfo.getWithOffset(DstOff), false, false, 0);
++i;
DstOff += VTSize;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index d02c6eeb3b9..7a213aad072 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -108,24 +108,24 @@ public:
// XformMskToBitPosU5Imm - Returns the bit position which
// the single bit 32 bit mask represents.
// Used in Clr and Set bit immediate memops.
- SDValue XformMskToBitPosU5Imm(uint32_t Imm) {
+ SDValue XformMskToBitPosU5Imm(uint32_t Imm, SDLoc DL) {
int32_t bitPos;
bitPos = Log2_32(Imm);
assert(bitPos >= 0 && bitPos < 32 &&
"Constant out of range for 32 BitPos Memops");
- return CurDAG->getTargetConstant(bitPos, MVT::i32);
+ return CurDAG->getTargetConstant(bitPos, DL, MVT::i32);
}
// XformMskToBitPosU4Imm - Returns the bit position which the single-bit
// 16 bit mask represents. Used in Clr and Set bit immediate memops.
- SDValue XformMskToBitPosU4Imm(uint16_t Imm) {
- return XformMskToBitPosU5Imm(Imm);
+ SDValue XformMskToBitPosU4Imm(uint16_t Imm, SDLoc DL) {
+ return XformMskToBitPosU5Imm(Imm, DL);
}
// XformMskToBitPosU3Imm - Returns the bit position which the single-bit
// 8 bit mask represents. Used in Clr and Set bit immediate memops.
- SDValue XformMskToBitPosU3Imm(uint8_t Imm) {
- return XformMskToBitPosU5Imm(Imm);
+ SDValue XformMskToBitPosU3Imm(uint8_t Imm, SDLoc DL) {
+ return XformMskToBitPosU5Imm(Imm, DL);
}
// Return true if there is exactly one bit set in V, i.e., if V is one of the
@@ -137,37 +137,37 @@ public:
// XformM5ToU5Imm - Return a target constant with the specified value, of
// type i32 where the negative literal is transformed into a positive literal
// for use in -= memops.
- inline SDValue XformM5ToU5Imm(signed Imm) {
+ inline SDValue XformM5ToU5Imm(signed Imm, SDLoc DL) {
assert( (Imm >= -31 && Imm <= -1) && "Constant out of range for Memops");
- return CurDAG->getTargetConstant( - Imm, MVT::i32);
+ return CurDAG->getTargetConstant( - Imm, DL, MVT::i32);
}
// XformU7ToU7M1Imm - Return a target constant decremented by 1, in range
// [1..128], used in cmpb.gtu instructions.
- inline SDValue XformU7ToU7M1Imm(signed Imm) {
+ inline SDValue XformU7ToU7M1Imm(signed Imm, SDLoc DL) {
assert((Imm >= 1 && Imm <= 128) && "Constant out of range for cmpb op");
- return CurDAG->getTargetConstant(Imm - 1, MVT::i8);
+ return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i8);
}
// XformS8ToS8M1Imm - Return a target constant decremented by 1.
- inline SDValue XformSToSM1Imm(signed Imm) {
- return CurDAG->getTargetConstant(Imm - 1, MVT::i32);
+ inline SDValue XformSToSM1Imm(signed Imm, SDLoc DL) {
+ return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i32);
}
// XformU8ToU8M1Imm - Return a target constant decremented by 1.
- inline SDValue XformUToUM1Imm(unsigned Imm) {
+ inline SDValue XformUToUM1Imm(unsigned Imm, SDLoc DL) {
assert((Imm >= 1) && "Cannot decrement unsigned int less than 1");
- return CurDAG->getTargetConstant(Imm - 1, MVT::i32);
+ return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i32);
}
// XformSToSM2Imm - Return a target constant decremented by 2.
- inline SDValue XformSToSM2Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm - 2, MVT::i32);
+ inline SDValue XformSToSM2Imm(unsigned Imm, SDLoc DL) {
+ return CurDAG->getTargetConstant(Imm - 2, DL, MVT::i32);
}
// XformSToSM3Imm - Return a target constant decremented by 3.
- inline SDValue XformSToSM3Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm - 3, MVT::i32);
+ inline SDValue XformSToSM3Imm(unsigned Imm, SDLoc DL) {
+ return CurDAG->getTargetConstant(Imm - 3, DL, MVT::i32);
}
// Include the pieces autogenerated from the target description.
@@ -259,7 +259,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
const HexagonInstrInfo &TII = *HST->getInstrInfo();
if (TII.isValidAutoIncImm(LoadedVT, Val)) {
- SDValue TargetConst = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConst = CurDAG->getTargetConstant(Val, dl, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32,
MVT::Other, Base, TargetConst,
Chain);
@@ -278,8 +278,8 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
return Result_2;
}
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Other,
Base, TargetConst0, Chain);
SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
@@ -313,8 +313,8 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
const HexagonInstrInfo &TII = *HST->getInstrInfo();
if (TII.isValidAutoIncImm(LoadedVT, Val)) {
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
MVT::i32, MVT::Other, Base,
TargetConstVal, Chain);
@@ -336,8 +336,8 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
}
// Generate an indirect load.
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
MVT::Other, Base, TargetConst0,
Chain);
@@ -411,7 +411,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
return SelectIndexedLoadSignExtend64(LD, Opcode, dl);
if (TII.isValidAutoIncImm(LoadedVT, Val)) {
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
LD->getValueType(0),
MVT::i32, MVT::Other, Base,
@@ -430,8 +430,8 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
ReplaceUses(Froms, Tos, 3);
return Result;
} else {
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl,
LD->getValueType(0),
MVT::Other, Base, TargetConst0,
@@ -502,7 +502,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
Value = CurDAG->getTargetExtractSubreg(Hexagon::subreg_loreg,
dl, MVT::i32, Value);
}
- SDValue Ops[] = {Base, CurDAG->getTargetConstant(Val, MVT::i32), Value,
+ SDValue Ops[] = {Base, CurDAG->getTargetConstant(Val, dl, MVT::i32), Value,
Chain};
// Build post increment store.
SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
@@ -520,7 +520,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
// def S2_storerd_io
// : STInst<(outs), (ins IntRegs:$base, imm:$offset, DoubleRegs:$src1), ...
// and it differs for POST_ST* for instance.
- SDValue Ops[] = { Base, CurDAG->getTargetConstant(0, MVT::i32), Value,
+ SDValue Ops[] = { Base, CurDAG->getTargetConstant(0, dl, MVT::i32), Value,
Chain};
unsigned Opcode = 0;
@@ -532,7 +532,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
else llvm_unreachable("unknown memory type");
// Build regular store.
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
// Build splitted incriment instruction.
SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
@@ -599,7 +599,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
}
SDValue Chain = LD->getChain();
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
OP0 = SDValue(CurDAG->getMachineNode(Hexagon::L2_loadri_io, dl, MVT::i32,
MVT::Other,
LD->getBasePtr(), TargetConst0,
@@ -625,7 +625,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
}
SDValue Chain = LD->getChain();
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
OP1 = SDValue(CurDAG->getMachineNode(Hexagon::L2_loadri_io, dl, MVT::i32,
MVT::Other,
LD->getBasePtr(), TargetConst0,
@@ -661,7 +661,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
int32_t MulConst =
cast<ConstantSDNode>(Mul_1.getNode())->getSExtValue();
int32_t ValConst = MulConst << ShlConst;
- SDValue Val = CurDAG->getTargetConstant(ValConst,
+ SDValue Val = CurDAG->getTargetConstant(ValConst, dl,
MVT::i32);
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val.getNode()))
if (isInt<9>(CN->getSExtValue())) {
@@ -689,7 +689,8 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
int32_t Shl2Const =
cast<ConstantSDNode>(Shl2_1.getNode())->getSExtValue();
int32_t ValConst = 1 << (ShlConst+Shl2Const);
- SDValue Val = CurDAG->getTargetConstant(-ValConst, MVT::i32);
+ SDValue Val = CurDAG->getTargetConstant(-ValConst, dl,
+ MVT::i32);
if (ConstantSDNode *CN =
dyn_cast<ConstantSDNode>(Val.getNode()))
if (isInt<9>(CN->getSExtValue())) {
@@ -738,13 +739,14 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
MV |= Bit;
Bit <<= ES;
}
- SDValue Ones = CurDAG->getTargetConstant(MV, MVT::i64);
+ SDValue Ones = CurDAG->getTargetConstant(MV, dl, MVT::i64);
SDNode *OnesReg = CurDAG->getMachineNode(Hexagon::CONST64_Int_Real, dl,
MVT::i64, Ones);
if (ExVT.getSizeInBits() == 32) {
SDNode *And = CurDAG->getMachineNode(Hexagon::A2_andp, dl, MVT::i64,
SDValue(Mask,0), SDValue(OnesReg,0));
- SDValue SubR = CurDAG->getTargetConstant(Hexagon::subreg_loreg, MVT::i32);
+ SDValue SubR = CurDAG->getTargetConstant(Hexagon::subreg_loreg, dl,
+ MVT::i32);
return CurDAG->getMachineNode(Hexagon::EXTRACT_SUBREG, dl, ExVT,
SDValue(And,0), SubR);
}
@@ -760,7 +762,7 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
// Now we need to differentiate target data types.
if (N->getValueType(0) == MVT::i64) {
// Convert the zero_extend to Rs = Pd followed by A2_combinew(0,Rs).
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
SDNode *Result_1 = CurDAG->getMachineNode(Hexagon::C2_tfrpr, dl,
MVT::i32,
SDValue(IsIntrinsic, 0));
@@ -867,7 +869,7 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
Ops.push_back(Load);
Ops.push_back(ModifierExpr);
int32_t Val = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
- Ops.push_back(CurDAG->getTargetConstant(Val, MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(Val, dl, MVT::i32));
Ops.push_back(Chain);
SDNode* Result = CurDAG->getMachineNode(opc, dl, ResTys, Ops);
@@ -1022,11 +1024,11 @@ SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
APFloat APF = CN->getValueAPF();
if (N->getValueType(0) == MVT::f32) {
return CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32,
- CurDAG->getTargetConstantFP(APF.convertToFloat(), MVT::f32));
+ CurDAG->getTargetConstantFP(APF.convertToFloat(), dl, MVT::f32));
}
else if (N->getValueType(0) == MVT::f64) {
return CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64,
- CurDAG->getTargetConstantFP(APF.convertToDouble(), MVT::f64));
+ CurDAG->getTargetConstantFP(APF.convertToDouble(), dl, MVT::f64));
}
return SelectCode(N);
@@ -1166,7 +1168,7 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
SDNode *Result;
// Get the right SDVal for the opcode.
- SDValue SDVal = CurDAG->getTargetConstant(bitpos, MVT::i32);
+ SDValue SDVal = CurDAG->getTargetConstant(bitpos, dl, MVT::i32);
if (ValueVT == MVT::i32 || ValueVT == MVT::f32) {
Result = CurDAG->getMachineNode(BitOpc, dl, ValueVT,
@@ -1181,11 +1183,11 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
SDNode *Reg = N->getOperand(0).getNode();
SDValue RegClass = CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID,
- MVT::i64);
+ dl, MVT::i64);
- SDValue SubregHiIdx = CurDAG->getTargetConstant(Hexagon::subreg_hireg,
+ SDValue SubregHiIdx = CurDAG->getTargetConstant(Hexagon::subreg_hireg, dl,
MVT::i32);
- SDValue SubregLoIdx = CurDAG->getTargetConstant(Hexagon::subreg_loreg,
+ SDValue SubregLoIdx = CurDAG->getTargetConstant(Hexagon::subreg_loreg, dl,
MVT::i32);
SDValue SubregHI = CurDAG->getTargetExtractSubreg(Hexagon::subreg_hireg, dl,
@@ -1204,7 +1206,7 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
dl, ValueVT, Ops);
} else {
if (Opc != ISD::FABS && Opc != ISD::FNEG)
- SDVal = CurDAG->getTargetConstant(bitpos-32, MVT::i32);
+ SDVal = CurDAG->getTargetConstant(bitpos - 32, dl, MVT::i32);
SDNode *Result0 = CurDAG->getMachineNode(BitOpc, dl, SubValueVT,
SubregHI, SDVal);
const SDValue Ops[] = { RegClass, SDValue(Result0, 0), SubregHiIdx,
@@ -1226,8 +1228,8 @@ SDNode *HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
unsigned StkA = HFI->getStackAlignment();
unsigned MaxA = MFI->getMaxAlignment();
SDValue FI = CurDAG->getTargetFrameIndex(FX, MVT::i32);
- SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
SDLoc DL(N);
+ SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
SDNode *R = 0;
// Use TFR_FI when:
@@ -1321,7 +1323,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
break;
}
- OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
return false;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 86fb06a34ef..22a6ed7a6fd 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -344,7 +344,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
SDLoc dl) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile=*/false, /*AlwaysInline=*/false,
/*isTailCall=*/false,
@@ -542,7 +542,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (VA.isMemLoc()) {
unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue MemAddr = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
+ SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
+ StackPtr.getValueType());
MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
if (Flags.isByVal()) {
// The argument is a struct passed by value. According to LLVM, "Arg"
@@ -570,7 +571,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (!isTailCall) {
- SDValue C = DAG.getConstant(NumBytes, getPointerTy(), true);
+ SDValue C = DAG.getConstant(NumBytes, dl, getPointerTy(), true);
Chain = DAG.getCALLSEQ_START(Chain, C, dl);
}
@@ -644,8 +645,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -808,7 +809,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
SDValue JumpTableBase = DAG.getNode(HexagonISD::JT, dl,
getPointerTy(), TargetJT);
SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, dl, MVT::i32));
SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
ShiftIndex);
SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
@@ -841,7 +842,7 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
dbgs() << "\n";
});
- SDValue AC = DAG.getConstant(A, MVT::i32);
+ SDValue AC = DAG.getConstant(A, dl, MVT::i32);
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
return DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
}
@@ -994,7 +995,7 @@ SDValue HexagonTargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
SDValue InpVal = Op.getOperand(0);
if (isa<ConstantSDNode>(InpVal)) {
uint64_t V = cast<ConstantSDNode>(InpVal)->getZExtValue();
- return DAG.getTargetConstant(countPopulation(V), MVT::i64);
+ return DAG.getTargetConstant(countPopulation(V), dl, MVT::i64);
}
SDValue PopOut = DAG.getNode(HexagonISD::POPCOUNT, dl, MVT::i32, InpVal);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, PopOut);
@@ -1095,7 +1096,7 @@ SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadNode->isInvariant(),
Alignment);
// Base+2 load.
- SDValue Increment = DAG.getConstant(2, MVT::i32);
+ SDValue Increment = DAG.getConstant(2, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[1] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
LoadNode->getPointerInfo(), MVT::i16,
@@ -1104,11 +1105,11 @@ SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadNode->isInvariant(),
Alignment);
// SHL 16, then OR base and base+2.
- SDValue ShiftAmount = DAG.getConstant(16, MVT::i32);
+ SDValue ShiftAmount = DAG.getConstant(16, DL, MVT::i32);
SDValue Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[1], ShiftAmount);
SDValue Tmp2 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[0]);
// Base + 4.
- Increment = DAG.getConstant(4, MVT::i32);
+ Increment = DAG.getConstant(4, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[2] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
LoadNode->getPointerInfo(), MVT::i16,
@@ -1117,7 +1118,7 @@ SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadNode->isInvariant(),
Alignment);
// Base + 6.
- Increment = DAG.getConstant(6, MVT::i32);
+ Increment = DAG.getConstant(6, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[3] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
LoadNode->getPointerInfo(), MVT::i16,
@@ -1183,7 +1184,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset = DAG.getConstant(4, MVT::i32);
+ SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
MachinePointerInfo(), false, false, false, 0);
@@ -1822,7 +1823,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (IsScalarToVector)
return createSplat(DAG, dl, VT, V1.getOperand(0));
}
- return createSplat(DAG, dl, VT, DAG.getConstant(Lane, MVT::i32));
+ return createSplat(DAG, dl, VT, DAG.getConstant(Lane, dl, MVT::i32));
}
// FIXME: We need to support more general vector shuffles. See
@@ -1930,7 +1931,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
unsigned SplatBits = APSplatBits.getZExtValue();
int32_t SextVal = ((int32_t) (SplatBits << (32 - SplatBitSize)) >>
(32 - SplatBitSize));
- return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, MVT::i32));
+ return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, dl, MVT::i32));
}
// Try to generate COMBINE to build v2i32 vectors.
@@ -1939,9 +1940,9 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SDValue V1 = BVN->getOperand(1);
if (V0.getOpcode() == ISD::UNDEF)
- V0 = DAG.getConstant(0, MVT::i32);
+ V0 = DAG.getConstant(0, dl, MVT::i32);
if (V1.getOpcode() == ISD::UNDEF)
- V1 = DAG.getConstant(0, MVT::i32);
+ V1 = DAG.getConstant(0, dl, MVT::i32);
ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(V0);
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(V1);
@@ -2002,17 +2003,17 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
}
if (Size == 64)
- ConstVal = DAG.getConstant(Res, MVT::i64);
+ ConstVal = DAG.getConstant(Res, dl, MVT::i64);
else
- ConstVal = DAG.getConstant(Res, MVT::i32);
+ ConstVal = DAG.getConstant(Res, dl, MVT::i32);
// When there are non constant operands, add them with INSERT_VECTOR_ELT to
// ConstVal, the constant part of the vector.
if (HasNonConstantElements) {
EVT EltVT = VT.getVectorElementType();
- SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), MVT::i64);
+ SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), dl, MVT::i64);
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
for (unsigned i = 0, e = NElts; i != e; ++i) {
// LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon
@@ -2025,11 +2026,11 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (VT.getSizeInBits() == 64 &&
Operand.getValueType().getSizeInBits() == 32) {
- SDValue C = DAG.getConstant(0, MVT::i32);
+ SDValue C = DAG.getConstant(0, dl, MVT::i32);
Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
}
- SDValue Idx = DAG.getConstant(OpIdx, MVT::i64);
+ SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
const SDValue Ops[] = {ConstVal, Operand, Combined};
@@ -2052,10 +2053,10 @@ HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
unsigned NElts = Op.getNumOperands();
SDValue Vec = Op.getOperand(0);
EVT VecVT = Vec.getValueType();
- SDValue Width = DAG.getConstant(VecVT.getSizeInBits(), MVT::i64);
+ SDValue Width = DAG.getConstant(VecVT.getSizeInBits(), dl, MVT::i64);
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
- SDValue ConstVal = DAG.getConstant(0, MVT::i64);
+ DAG.getConstant(32, dl, MVT::i64));
+ SDValue ConstVal = DAG.getConstant(0, dl, MVT::i64);
ConstantSDNode *W = dyn_cast<ConstantSDNode>(Width);
ConstantSDNode *S = dyn_cast<ConstantSDNode>(Shifted);
@@ -2084,11 +2085,11 @@ HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
if (VT.getSizeInBits() == 64 &&
Operand.getValueType().getSizeInBits() == 32) {
- SDValue C = DAG.getConstant(0, MVT::i32);
+ SDValue C = DAG.getConstant(0, dl, MVT::i32);
Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
}
- SDValue Idx = DAG.getConstant(OpIdx, MVT::i64);
+ SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
const SDValue Ops[] = {ConstVal, Operand, Combined};
@@ -2114,12 +2115,12 @@ HexagonTargetLowering::LowerEXTRACT_VECTOR(SDValue Op,
EVT EltVT = VecVT.getVectorElementType();
int EltSize = EltVT.getSizeInBits();
SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT ?
- EltSize : VTN * EltSize, MVT::i64);
+ EltSize : VTN * EltSize, dl, MVT::i64);
// Constant element number.
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Idx)) {
uint64_t X = CI->getZExtValue();
- SDValue Offset = DAG.getConstant(X * EltSize, MVT::i32);
+ SDValue Offset = DAG.getConstant(X * EltSize, dl, MVT::i32);
const SDValue Ops[] = {Vec, Width, Offset};
ConstantSDNode *CW = dyn_cast<ConstantSDNode>(Width);
@@ -2158,9 +2159,9 @@ HexagonTargetLowering::LowerEXTRACT_VECTOR(SDValue Op,
// Variable element number.
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
- DAG.getConstant(EltSize, MVT::i32));
+ DAG.getConstant(EltSize, dl, MVT::i32));
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
const SDValue Ops[] = {Vec, Combined};
@@ -2189,10 +2190,10 @@ HexagonTargetLowering::LowerINSERT_VECTOR(SDValue Op,
EVT EltVT = VecVT.getVectorElementType();
int EltSize = EltVT.getSizeInBits();
SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::INSERT_VECTOR_ELT ?
- EltSize : VTN * EltSize, MVT::i64);
+ EltSize : VTN * EltSize, dl, MVT::i64);
if (ConstantSDNode *C = cast<ConstantSDNode>(Idx)) {
- SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, MVT::i32);
+ SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, dl, MVT::i32);
const SDValue Ops[] = {Vec, Val, Width, Offset};
SDValue N;
@@ -2206,14 +2207,14 @@ HexagonTargetLowering::LowerINSERT_VECTOR(SDValue Op,
// Variable element number.
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
- DAG.getConstant(EltSize, MVT::i32));
+ DAG.getConstant(EltSize, dl, MVT::i32));
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
if (VT.getSizeInBits() == 64 &&
Val.getValueType().getSizeInBits() == 32) {
- SDValue C = DAG.getConstant(0, MVT::i32);
+ SDValue C = DAG.getConstant(0, dl, MVT::i32);
Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
}
@@ -2257,7 +2258,7 @@ HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
DAG.getRegister(Hexagon::R30, getPointerTy()),
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, dl));
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
false, false, 0);
Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.td b/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
index 6df811e49f2..3b32c10ed5b 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -36,28 +36,28 @@ def HiReg: OutPatFrag<(ops node:$Rs),
def DEC_CONST_SIGNED : SDNodeXForm<imm, [{
// Return the byte immediate const-1 as an SDNode.
int32_t imm = N->getSExtValue();
- return XformSToSM1Imm(imm);
+ return XformSToSM1Imm(imm, SDLoc(N));
}]>;
// SDNode for converting immediate C to C-2.
def DEC2_CONST_SIGNED : SDNodeXForm<imm, [{
// Return the byte immediate const-2 as an SDNode.
int32_t imm = N->getSExtValue();
- return XformSToSM2Imm(imm);
+ return XformSToSM2Imm(imm, SDLoc(N));
}]>;
// SDNode for converting immediate C to C-3.
def DEC3_CONST_SIGNED : SDNodeXForm<imm, [{
// Return the byte immediate const-3 as an SDNode.
int32_t imm = N->getSExtValue();
- return XformSToSM3Imm(imm);
+ return XformSToSM3Imm(imm, SDLoc(N));
}]>;
// SDNode for converting immediate C to C-1.
def DEC_CONST_UNSIGNED : SDNodeXForm<imm, [{
// Return the byte immediate const-1 as an SDNode.
uint32_t imm = N->getZExtValue();
- return XformUToUM1Imm(imm);
+ return XformUToUM1Imm(imm, SDLoc(N));
}]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td b/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
index ecfde172c4d..8b667c64515 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -57,7 +57,7 @@ def BITPOS32 : SDNodeXForm<imm, [{
// Return the bit position we will set [0-31].
// As an SDNode.
int32_t imm = N->getSExtValue();
- return XformMskToBitPosU5Imm(imm);
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
@@ -1153,14 +1153,14 @@ def IMM_BYTE : SDNodeXForm<imm, [{
// -1 etc is represented as 255 etc
// assigning to a byte restores our desired signed value.
int8_t imm = N->getSExtValue();
- return CurDAG->getTargetConstant(imm, MVT::i32);
+ return CurDAG->getTargetConstant(imm, SDLoc(N), MVT::i32);
}]>;
def IMM_HALF : SDNodeXForm<imm, [{
// -1 etc is represented as 65535 etc
// assigning to a short restores our desired signed value.
int16_t imm = N->getSExtValue();
- return CurDAG->getTargetConstant(imm, MVT::i32);
+ return CurDAG->getTargetConstant(imm, SDLoc(N), MVT::i32);
}]>;
def IMM_WORD : SDNodeXForm<imm, [{
@@ -1169,7 +1169,7 @@ def IMM_WORD : SDNodeXForm<imm, [{
// might convert -1 to a large +ve number.
// assigning to a word restores our desired signed value.
int32_t imm = N->getSExtValue();
- return CurDAG->getTargetConstant(imm, MVT::i32);
+ return CurDAG->getTargetConstant(imm, SDLoc(N), MVT::i32);
}]>;
def ToImmByte : OutPatFrag<(ops node:$R), (IMM_BYTE $R)>;
@@ -2805,7 +2805,7 @@ def MEMOPIMM : SDNodeXForm<imm, [{
// Call the transformation function XformM5ToU5Imm to get the negative
// immediate's positive counterpart.
int32_t imm = N->getSExtValue();
- return XformM5ToU5Imm(imm);
+ return XformM5ToU5Imm(imm, SDLoc(N));
}]>;
def MEMOPIMM_HALF : SDNodeXForm<imm, [{
@@ -2814,7 +2814,7 @@ def MEMOPIMM_HALF : SDNodeXForm<imm, [{
// Call the transformation function XformM5ToU5Imm to get the negative
// immediate's positive counterpart.
int16_t imm = N->getSExtValue();
- return XformM5ToU5Imm(imm);
+ return XformM5ToU5Imm(imm, SDLoc(N));
}]>;
def MEMOPIMM_BYTE : SDNodeXForm<imm, [{
@@ -2823,14 +2823,14 @@ def MEMOPIMM_BYTE : SDNodeXForm<imm, [{
// Call the transformation function XformM5ToU5Imm to get the negative
// immediate's positive counterpart.
int8_t imm = N->getSExtValue();
- return XformM5ToU5Imm(imm);
+ return XformM5ToU5Imm(imm, SDLoc(N));
}]>;
def SETMEMIMM : SDNodeXForm<imm, [{
// Return the bit position we will set [0-31].
// As an SDNode.
int32_t imm = N->getSExtValue();
- return XformMskToBitPosU5Imm(imm);
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
def CLRMEMIMM : SDNodeXForm<imm, [{
@@ -2838,14 +2838,14 @@ def CLRMEMIMM : SDNodeXForm<imm, [{
// As an SDNode.
// we bit negate the value first
int32_t imm = ~(N->getSExtValue());
- return XformMskToBitPosU5Imm(imm);
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
def SETMEMIMM_SHORT : SDNodeXForm<imm, [{
// Return the bit position we will set [0-15].
// As an SDNode.
int16_t imm = N->getSExtValue();
- return XformMskToBitPosU4Imm(imm);
+ return XformMskToBitPosU4Imm(imm, SDLoc(N));
}]>;
def CLRMEMIMM_SHORT : SDNodeXForm<imm, [{
@@ -2853,14 +2853,14 @@ def CLRMEMIMM_SHORT : SDNodeXForm<imm, [{
// As an SDNode.
// we bit negate the value first
int16_t imm = ~(N->getSExtValue());
- return XformMskToBitPosU4Imm(imm);
+ return XformMskToBitPosU4Imm(imm, SDLoc(N));
}]>;
def SETMEMIMM_BYTE : SDNodeXForm<imm, [{
// Return the bit position we will set [0-7].
// As an SDNode.
int8_t imm = N->getSExtValue();
- return XformMskToBitPosU3Imm(imm);
+ return XformMskToBitPosU3Imm(imm, SDLoc(N));
}]>;
def CLRMEMIMM_BYTE : SDNodeXForm<imm, [{
@@ -2868,7 +2868,7 @@ def CLRMEMIMM_BYTE : SDNodeXForm<imm, [{
// As an SDNode.
// we bit negate the value first
int8_t imm = ~(N->getSExtValue());
- return XformMskToBitPosU3Imm(imm);
+ return XformMskToBitPosU3Imm(imm, SDLoc(N));
}]>;
//===----------------------------------------------------------------------===//
@@ -3202,7 +3202,7 @@ def: Pat<(i1 (setne (i32 IntRegs:$src1), s32ImmPred:$src2)),
def DEC_CONST_BYTE : SDNodeXForm<imm, [{
// Return the byte immediate const-1 as an SDNode.
int32_t imm = N->getSExtValue();
- return XformU7ToU7M1Imm(imm);
+ return XformU7ToU7M1Imm(imm, SDLoc(N));
}]>;
// For the sequence
diff --git a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 591ceb55d57..5ce5013d898 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -274,7 +274,7 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N,
Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, 0,
0/*AM.SymbolFlags*/);
else
- Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i16);
+ Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(N), MVT::i16);
return true;
}
@@ -401,10 +401,10 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i16);
if (Node->hasOneUse())
- return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, MVT::i16,
- TFI, CurDAG->getTargetConstant(0, MVT::i16));
- return CurDAG->getMachineNode(MSP430::ADD16ri, dl, MVT::i16,
- TFI, CurDAG->getTargetConstant(0, MVT::i16));
+ return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, MVT::i16, TFI,
+ CurDAG->getTargetConstant(0, dl, MVT::i16));
+ return CurDAG->getMachineNode(MSP430::ADD16ri, dl, MVT::i16, TFI,
+ CurDAG->getTargetConstant(0, dl, MVT::i16));
}
case ISD::LOAD:
if (SDNode *ResNode = SelectIndexedLoad(Node))
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 08f41a80971..f0194dcda15 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -593,7 +593,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
- Chain = DAG.getCALLSEQ_START(Chain ,DAG.getConstant(NumBytes,
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, dl,
getPointerTy(), true),
dl);
@@ -634,13 +634,14 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(),
StackPtr,
- DAG.getIntPtrConstant(VA.getLocMemOffset()));
+ DAG.getIntPtrConstant(VA.getLocMemOffset(),
+ dl));
SDValue MemOp;
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (Flags.isByVal()) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i16);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i16);
MemOp = DAG.getMemcpy(Chain, dl, PtrOff, Arg, SizeNode,
Flags.getByValAlign(),
/*isVolatile*/false,
@@ -700,8 +701,9 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, getPointerTy(), true),
- DAG.getConstant(0, getPointerTy(), true),
+ DAG.getConstant(NumBytes, dl, getPointerTy(),
+ true),
+ DAG.getConstant(0, dl, getPointerTy(), true),
InFlag, dl);
InFlag = Chain.getValue(1);
@@ -843,7 +845,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
// fold constant into instruction.
if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(LHS)) {
LHS = RHS;
- RHS = DAG.getConstant(C->getSExtValue() + 1, C->getValueType(0));
+ RHS = DAG.getConstant(C->getSExtValue() + 1, dl, C->getValueType(0));
TCC = MSP430CC::COND_LO;
break;
}
@@ -856,7 +858,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
// fold constant into instruction.
if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(LHS)) {
LHS = RHS;
- RHS = DAG.getConstant(C->getSExtValue() + 1, C->getValueType(0));
+ RHS = DAG.getConstant(C->getSExtValue() + 1, dl, C->getValueType(0));
TCC = MSP430CC::COND_HS;
break;
}
@@ -869,7 +871,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
// fold constant into instruction.
if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(LHS)) {
LHS = RHS;
- RHS = DAG.getConstant(C->getSExtValue() + 1, C->getValueType(0));
+ RHS = DAG.getConstant(C->getSExtValue() + 1, dl, C->getValueType(0));
TCC = MSP430CC::COND_L;
break;
}
@@ -882,7 +884,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
// fold constant into instruction.
if (const ConstantSDNode * C = dyn_cast<ConstantSDNode>(LHS)) {
LHS = RHS;
- RHS = DAG.getConstant(C->getSExtValue() + 1, C->getValueType(0));
+ RHS = DAG.getConstant(C->getSExtValue() + 1, dl, C->getValueType(0));
TCC = MSP430CC::COND_GE;
break;
}
@@ -890,7 +892,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
break;
}
- TargetCC = DAG.getConstant(TCC, MVT::i8);
+ TargetCC = DAG.getConstant(TCC, dl, MVT::i8);
return DAG.getNode(MSP430ISD::CMP, dl, MVT::Glue, LHS, RHS);
}
@@ -967,7 +969,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
break;
}
EVT VT = Op.getValueType();
- SDValue One = DAG.getConstant(1, VT);
+ SDValue One = DAG.getConstant(1, dl, VT);
if (Convert) {
SDValue SR = DAG.getCopyFromReg(DAG.getEntryNode(), dl, MSP430::SR,
MVT::i16, Flag);
@@ -979,7 +981,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SR = DAG.getNode(ISD::XOR, dl, MVT::i16, SR, One);
return SR;
} else {
- SDValue Zero = DAG.getConstant(0, VT);
+ SDValue Zero = DAG.getConstant(0, dl, VT);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
SDValue Ops[] = {One, Zero, TargetCC, Flag};
return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, Ops);
@@ -1048,7 +1050,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
- DAG.getConstant(getDataLayout()->getPointerSize(), MVT::i16);
+ DAG.getConstant(getDataLayout()->getPointerSize(), dl, MVT::i16);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
@@ -1129,7 +1131,7 @@ bool MSP430TargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
return false;
Base = Op->getOperand(0);
- Offset = DAG.getConstant(RHSC, VT);
+ Offset = DAG.getConstant(RHSC, SDLoc(N), VT);
AM = ISD::POST_INC;
return true;
}
diff --git a/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index 3221ccb7749..7b6a2a15447 100644
--- a/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -163,14 +163,15 @@ void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
bool Mips16DAGToDAGISel::selectAddr16(
SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset,
SDValue &Alias) {
+ SDLoc DL(Addr);
EVT ValTy = Addr.getValueType();
- Alias = CurDAG->getTargetConstant(0, ValTy);
+ Alias = CurDAG->getTargetConstant(0, DL, ValTy);
// if Address is FI, get the TargetFrameIndex.
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- Offset = CurDAG->getTargetConstant(0, ValTy);
+ Offset = CurDAG->getTargetConstant(0, DL, ValTy);
getMips16SPRefReg(Parent, Alias);
return true;
}
@@ -199,7 +200,7 @@ bool Mips16DAGToDAGISel::selectAddr16(
else
Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), DL, ValTy);
return true;
}
}
@@ -235,7 +236,7 @@ bool Mips16DAGToDAGISel::selectAddr16(
}
}
Base = Addr;
- Offset = CurDAG->getTargetConstant(0, ValTy);
+ Offset = CurDAG->getTargetConstant(0, DL, ValTy);
return true;
}
diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.h b/llvm/lib/Target/Mips/MipsISelDAGToDAG.h
index aec731e0dff..7096a52f9ef 100644
--- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -119,7 +119,7 @@ private:
// getImm - Return a target constant with the specified value.
inline SDValue getImm(const SDNode *Node, uint64_t Imm) {
- return CurDAG->getTargetConstant(Imm, Node->getValueType(0));
+ return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0));
}
virtual void processFunctionAfterISel(MachineFunction &MF) = 0;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index c78b79fcf1a..92abe0ef0c9 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -544,7 +544,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
- DAG.getConstant(condCodeToFCC(CC), MVT::i32));
+ DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
}
// Creates and returns a CMovFPT/F node.
@@ -699,9 +699,11 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
return SDValue();
- return DAG.getNode(MipsISD::Ext, SDLoc(N), ValTy,
- ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
- DAG.getConstant(SMSize, MVT::i32));
+ SDLoc DL(N);
+ return DAG.getNode(MipsISD::Ext, DL, ValTy,
+ ShiftRight.getOperand(0),
+ DAG.getConstant(Pos, DL, MVT::i32),
+ DAG.getConstant(SMSize, DL, MVT::i32));
}
static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
@@ -753,9 +755,11 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
return SDValue();
- return DAG.getNode(MipsISD::Ins, SDLoc(N), ValTy, Shl.getOperand(0),
- DAG.getConstant(SMPos0, MVT::i32),
- DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
+ SDLoc DL(N);
+ return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
+ DAG.getConstant(SMPos0, DL, MVT::i32),
+ DAG.getConstant(SMSize0, DL, MVT::i32),
+ And0.getOperand(0));
}
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
@@ -1556,7 +1560,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout());
Index = DAG.getNode(ISD::MUL, DL, PTy, Index,
- DAG.getConstant(EntrySize, PTy));
+ DAG.getConstant(EntrySize, DL, PTy));
SDValue Addr = DAG.getNode(ISD::ADD, DL, PTy, Index, Table);
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
@@ -1594,7 +1598,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Mips::CondCode CC =
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
- SDValue BrCode = DAG.getConstant(Opc, MVT::i32);
+ SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
FCC0, Dest, CondRes);
@@ -1635,10 +1639,11 @@ SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
assert(Cond.getOpcode() == MipsISD::FPCmp &&
"Floating point operand expected.");
- SDValue True = DAG.getConstant(1, MVT::i32);
- SDValue False = DAG.getConstant(0, MVT::i32);
+ SDLoc DL(Op);
+ SDValue True = DAG.getConstant(1, DL, MVT::i32);
+ SDValue False = DAG.getConstant(0, DL, MVT::i32);
- return createCMovFP(DAG, Cond, True, False, SDLoc(Op));
+ return createCMovFP(DAG, Cond, True, False, DL);
}
SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
@@ -1837,19 +1842,19 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
VAList = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
- DAG.getConstant(Align - 1,
- VAList.getValueType()));
+ DAG.getConstant(Align - 1, DL, VAList.getValueType()));
VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
- DAG.getConstant(-(int64_t)Align,
+ DAG.getConstant(-(int64_t)Align, DL,
VAList.getValueType()));
}
// Increment the pointer, VAList, to the next vaarg.
unsigned ArgSizeInBytes = getDataLayout()->getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
- DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes, ArgSlotSizeInBytes),
- VAList.getValueType()));
+ DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
+ ArgSlotSizeInBytes),
+ DL, VAList.getValueType()));
// Store the incremented VAList to the legalized pointer
Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
MachinePointerInfo(SV), false, false, 0);
@@ -1862,7 +1867,7 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
- DAG.getIntPtrConstant(Adjustment));
+ DAG.getIntPtrConstant(Adjustment, DL));
}
// Load the actual argument out of the pointer VAList
return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo(), false, false,
@@ -1873,9 +1878,9 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
bool HasExtractInsert) {
EVT TyX = Op.getOperand(0).getValueType();
EVT TyY = Op.getOperand(1).getValueType();
- SDValue Const1 = DAG.getConstant(1, MVT::i32);
- SDValue Const31 = DAG.getConstant(31, MVT::i32);
SDLoc DL(Op);
+ SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
+ SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
SDValue Res;
// If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
@@ -1911,7 +1916,8 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Op.getOperand(0), DAG.getConstant(0, MVT::i32));
+ Op.getOperand(0),
+ DAG.getConstant(0, DL, MVT::i32));
return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
@@ -1920,8 +1926,8 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
- SDValue Const1 = DAG.getConstant(1, MVT::i32);
SDLoc DL(Op);
+ SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
// Bitcast to integer nodes.
SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
@@ -1931,7 +1937,7 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
// ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
// ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
- DAG.getConstant(WidthY - 1, MVT::i32), Const1);
+ DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
if (WidthX > WidthY)
E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
@@ -1939,7 +1945,8 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
- DAG.getConstant(WidthX - 1, MVT::i32), Const1, X);
+ DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
+ X);
return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
}
@@ -1951,7 +1958,7 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
- DAG.getConstant(WidthY - 1, MVT::i32));
+ DAG.getConstant(WidthY - 1, DL, MVT::i32));
if (WidthX > WidthY)
SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
@@ -1959,7 +1966,7 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
- DAG.getConstant(WidthX - 1, MVT::i32));
+ DAG.getConstant(WidthX - 1, DL, MVT::i32));
SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
}
@@ -2042,7 +2049,7 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
unsigned SType = 0;
SDLoc DL(Op);
return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
- DAG.getConstant(SType, MVT::i32));
+ DAG.getConstant(SType, DL, MVT::i32));
}
SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
@@ -2059,17 +2066,17 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
// lo = 0
// hi = (shl lo, shamt[4:0])
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
- DAG.getConstant(-1, MVT::i32));
+ DAG.getConstant(-1, DL, MVT::i32));
SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, DL, VT));
SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
- DAG.getConstant(0x20, MVT::i32));
+ DAG.getConstant(0x20, DL, MVT::i32));
Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
- DAG.getConstant(0, VT), ShiftLeftLo);
+ DAG.getConstant(0, DL, VT), ShiftLeftLo);
Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
SDValue Ops[2] = {Lo, Hi};
@@ -2097,20 +2104,21 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
// lo = (srl hi, shamt[4:0])
// hi = 0
SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
- DAG.getConstant(-1, MVT::i32));
+ DAG.getConstant(-1, DL, MVT::i32));
SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, DL, VT));
SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
DL, VT, Hi, Shamt);
SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
- DAG.getConstant(0x20, MVT::i32));
- SDValue Shift31 = DAG.getNode(ISD::SRA, DL, VT, Hi, DAG.getConstant(31, VT));
+ DAG.getConstant(0x20, DL, MVT::i32));
+ SDValue Shift31 = DAG.getNode(ISD::SRA, DL, VT, Hi,
+ DAG.getConstant(31, DL, VT));
Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
- IsSRA ? Shift31 : DAG.getConstant(0, VT), ShiftRightHi);
+ IsSRA ? Shift31 : DAG.getConstant(0, DL, VT), ShiftRightHi);
SDValue Ops[2] = {Lo, Hi};
return DAG.getMergeValues(Ops, DL);
@@ -2126,7 +2134,7 @@ static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
if (Offset)
Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
- DAG.getConstant(Offset, BasePtrVT));
+ DAG.getConstant(Offset, DL, BasePtrVT));
SDValue Ops[] = { Chain, Ptr, Src };
return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
@@ -2191,7 +2199,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// (set tmp2, (shl tmp1, 32))
// (set dst, (srl tmp2, 32))
SDLoc DL(LD);
- SDValue Const32 = DAG.getConstant(32, MVT::i32);
+ SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
SDValue Ops[] = { SRL, LWR.getValue(1) };
@@ -2207,7 +2215,7 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
if (Offset)
Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
- DAG.getConstant(Offset, BasePtrVT));
+ DAG.getConstant(Offset, DL, BasePtrVT));
SDValue Ops[] = { Chain, Value, Ptr };
return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
@@ -2289,8 +2297,9 @@ SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op->getValueType(0);
int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy);
- return DAG.getNode(ISD::ADD, SDLoc(Op), ValTy, InArgsAddr,
- DAG.getConstant(0, ValTy));
+ SDLoc DL(Op);
+ return DAG.getNode(ISD::ADD, DL, ValTy, InArgsAddr,
+ DAG.getConstant(0, DL, ValTy));
}
SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
@@ -2447,7 +2456,7 @@ MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
bool IsTailCall, SelectionDAG &DAG) const {
if (!IsTailCall) {
SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
- DAG.getIntPtrConstant(Offset));
+ DAG.getIntPtrConstant(Offset, DL));
return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo(), false,
false, 0);
}
@@ -2573,7 +2582,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// byval arguments to the stack.
unsigned StackAlignment = TFL->getStackAlignment();
NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
- SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
+ SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
if (!IsTailCall)
Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
@@ -2625,9 +2634,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Arg, DAG.getConstant(0, MVT::i32));
+ Arg, DAG.getConstant(0, DL, MVT::i32));
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Arg, DAG.getConstant(1, MVT::i32));
+ Arg, DAG.getConstant(1, DL, MVT::i32));
if (!Subtarget.isLittle())
std::swap(Lo, Hi);
unsigned LocRegLo = VA.getLocReg();
@@ -2666,7 +2675,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
Arg = DAG.getNode(
ISD::SHL, DL, VA.getLocVT(), Arg,
- DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
}
// Arguments that can be passed on register must be kept at
@@ -2755,7 +2764,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
- DAG.getIntPtrConstant(0, true), InFlag, DL);
+ DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -2794,7 +2803,7 @@ SDValue MipsTargetLowering::LowerCallResult(
VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
Val = DAG.getNode(
Shift, DL, VA.getLocVT(), Val,
- DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
}
switch (VA.getLocInfo()) {
@@ -2847,7 +2856,7 @@ static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
Val = DAG.getNode(
Opcode, DL, VA.getLocVT(), Val,
- DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
break;
}
}
@@ -3121,7 +3130,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
Val = DAG.getNode(
ISD::SHL, DL, VA.getLocVT(), Val,
- DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT()));
+ DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
}
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
@@ -3423,6 +3432,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
+ SDLoc DL(Op);
SDValue Result;
// Only support length 1 constraints for now.
@@ -3437,7 +3447,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
int64_t Val = C->getSExtValue();
if (isInt<16>(Val)) {
- Result = DAG.getTargetConstant(Val, Type);
+ Result = DAG.getTargetConstant(Val, DL, Type);
break;
}
}
@@ -3447,7 +3457,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
int64_t Val = C->getZExtValue();
if (Val == 0) {
- Result = DAG.getTargetConstant(0, Type);
+ Result = DAG.getTargetConstant(0, DL, Type);
break;
}
}
@@ -3457,7 +3467,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
uint64_t Val = (uint64_t)C->getZExtValue();
if (isUInt<16>(Val)) {
- Result = DAG.getTargetConstant(Val, Type);
+ Result = DAG.getTargetConstant(Val, DL, Type);
break;
}
}
@@ -3467,7 +3477,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
int64_t Val = C->getSExtValue();
if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
- Result = DAG.getTargetConstant(Val, Type);
+ Result = DAG.getTargetConstant(Val, DL, Type);
break;
}
}
@@ -3477,7 +3487,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
int64_t Val = C->getSExtValue();
if ((Val >= -65535) && (Val <= -1)) {
- Result = DAG.getTargetConstant(Val, Type);
+ Result = DAG.getTargetConstant(Val, DL, Type);
break;
}
}
@@ -3487,7 +3497,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
int64_t Val = C->getSExtValue();
if ((isInt<15>(Val))) {
- Result = DAG.getTargetConstant(Val, Type);
+ Result = DAG.getTargetConstant(Val, DL, Type);
break;
}
}
@@ -3497,7 +3507,7 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
EVT Type = Op.getValueType();
int64_t Val = C->getSExtValue();
if ((Val <= 65535) && (Val >= 1)) {
- Result = DAG.getTargetConstant(Val, Type);
+ Result = DAG.getTargetConstant(Val, DL, Type);
break;
}
}
@@ -3603,7 +3613,7 @@ void MipsTargetLowering::copyByValRegs(
unsigned VReg = addLiveIn(MF, ArgReg, RC);
unsigned Offset = I * GPRSizeInBytes;
SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
- DAG.getConstant(Offset, PtrTy));
+ DAG.getConstant(Offset, DL, PtrTy));
SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
StorePtr, MachinePointerInfo(FuncArg, Offset),
false, false, 0);
@@ -3634,7 +3644,7 @@ void MipsTargetLowering::passByValArg(
// Copy words to registers.
for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
- DAG.getConstant(OffsetInBytes, PtrTy));
+ DAG.getConstant(OffsetInBytes, DL, PtrTy));
SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
MachinePointerInfo(), false, false, false,
Alignment);
@@ -3660,7 +3670,8 @@ void MipsTargetLowering::passByValArg(
// Load subword.
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
- DAG.getConstant(OffsetInBytes, PtrTy));
+ DAG.getConstant(OffsetInBytes, DL,
+ PtrTy));
SDValue LoadVal = DAG.getExtLoad(
ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, false,
@@ -3676,7 +3687,7 @@ void MipsTargetLowering::passByValArg(
Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
- DAG.getConstant(Shamt, MVT::i32));
+ DAG.getConstant(Shamt, DL, MVT::i32));
if (Val.getNode())
Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
@@ -3697,10 +3708,11 @@ void MipsTargetLowering::passByValArg(
// Copy remainder of byval arg to it with memcpy.
unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
- DAG.getConstant(OffsetInBytes, PtrTy));
+ DAG.getConstant(OffsetInBytes, DL, PtrTy));
SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
- DAG.getIntPtrConstant(VA.getLocMemOffset()));
- Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),
+ DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
+ Chain = DAG.getMemcpy(Chain, DL, Dst, Src,
+ DAG.getConstant(MemCpySize, DL, PtrTy),
Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false,
/*isTailCall=*/false,
MachinePointerInfo(), MachinePointerInfo());
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 6daa632b7a3..fe9c13fd456 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -253,9 +253,10 @@ SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
// that SLTu64 produces an i32. We need to fix this in the long run but for
// now, just make the DAG type-correct by asserting the upper bits are zero.
Carry = CurDAG->getMachineNode(Mips::SUBREG_TO_REG, DL, VT,
- CurDAG->getTargetConstant(0, VT),
+ CurDAG->getTargetConstant(0, DL, VT),
SDValue(Carry, 0),
- CurDAG->getTargetConstant(Mips::sub_32, VT));
+ CurDAG->getTargetConstant(Mips::sub_32, DL,
+ VT));
}
// Generate a second addition only if we know that RHS is not a
@@ -276,7 +277,7 @@ bool MipsSEDAGToDAGISel::selectAddrFrameIndex(SDValue Addr, SDValue &Base,
EVT ValTy = Addr.getValueType();
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- Offset = CurDAG->getTargetConstant(0, ValTy);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), ValTy);
return true;
}
return false;
@@ -298,7 +299,8 @@ bool MipsSEDAGToDAGISel::selectAddrFrameIndexOffset(SDValue Addr, SDValue &Base,
else
Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr),
+ ValTy);
return true;
}
}
@@ -372,7 +374,7 @@ bool MipsSEDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
bool MipsSEDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
Base = Addr;
- Offset = CurDAG->getTargetConstant(0, Addr.getValueType());
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Addr.getValueType());
return true;
}
@@ -523,7 +525,7 @@ selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
if (( Signed && ImmValue.isSignedIntN(ImmBitSize)) ||
(!Signed && ImmValue.isIntN(ImmBitSize))) {
- Imm = CurDAG->getTargetConstant(ImmValue, EltTy);
+ Imm = CurDAG->getTargetConstant(ImmValue, SDLoc(N), EltTy);
return true;
}
}
@@ -599,7 +601,7 @@ bool MipsSEDAGToDAGISel::selectVSplatUimmPow2(SDValue N, SDValue &Imm) const {
int32_t Log2 = ImmValue.exactLogBase2();
if (Log2 != -1) {
- Imm = CurDAG->getTargetConstant(Log2, EltTy);
+ Imm = CurDAG->getTargetConstant(Log2, SDLoc(N), EltTy);
return true;
}
}
@@ -632,7 +634,8 @@ bool MipsSEDAGToDAGISel::selectVSplatMaskL(SDValue N, SDValue &Imm) const {
// as the original value.
if (ImmValue == ~(~ImmValue & ~(~ImmValue + 1))) {
- Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
+ Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), SDLoc(N),
+ EltTy);
return true;
}
}
@@ -663,7 +666,8 @@ bool MipsSEDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
// Extract the run of set bits starting with bit zero, and test that the
// result is the same as the original value
if (ImmValue == (ImmValue & ~(ImmValue + 1))) {
- Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
+ Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), SDLoc(N),
+ EltTy);
return true;
}
}
@@ -684,7 +688,7 @@ bool MipsSEDAGToDAGISel::selectVSplatUimmInvPow2(SDValue N,
int32_t Log2 = (~ImmValue).exactLogBase2();
if (Log2 != -1) {
- Imm = CurDAG->getTargetConstant(Log2, EltTy);
+ Imm = CurDAG->getTargetConstant(Log2, SDLoc(N), EltTy);
return true;
}
}
@@ -762,7 +766,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDLoc DL(CN);
SDNode *RegOpnd;
SDValue ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd),
- MVT::i64);
+ DL, MVT::i64);
// The first instruction can be a LUi which is different from other
// instructions (ADDiu, ORI and SLL) in that it does not have a register
@@ -777,7 +781,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
// The remaining instructions in the sequence are handled here.
for (++Inst; Inst != Seq.end(); ++Inst) {
- ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd),
+ ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd), DL,
MVT::i64);
RegOpnd = CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64,
SDValue(RegOpnd, 0), ImmOpnd);
@@ -848,7 +852,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
}
SDNode *Rdhwr =
- CurDAG->getMachineNode(RdhwrOpc, SDLoc(Node),
+ CurDAG->getMachineNode(RdhwrOpc, DL,
Node->getValueType(0),
CurDAG->getRegister(Mips::HWR29, MVT::i32));
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, DestReg,
@@ -911,10 +915,10 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
if (!SplatValue.isSignedIntN(10))
return std::make_pair(false, nullptr);
- SDValue Imm = CurDAG->getTargetConstant(SplatValue,
+ SDValue Imm = CurDAG->getTargetConstant(SplatValue, DL,
ViaVecTy.getVectorElementType());
- SDNode *Res = CurDAG->getMachineNode(LdiOp, SDLoc(Node), ViaVecTy, Imm);
+ SDNode *Res = CurDAG->getMachineNode(LdiOp, DL, ViaVecTy, Imm);
if (ResVecTy != ViaVecTy) {
// If LdiOp is writing to a different register class to ResVecTy, then
@@ -924,9 +928,9 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
const TargetLowering *TLI = getTargetLowering();
MVT ResVecTySimple = ResVecTy.getSimpleVT();
const TargetRegisterClass *RC = TLI->getRegClassFor(ResVecTySimple);
- Res = CurDAG->getMachineNode(Mips::COPY_TO_REGCLASS, SDLoc(Node),
+ Res = CurDAG->getMachineNode(Mips::COPY_TO_REGCLASS, DL,
ResVecTy, SDValue(Res, 0),
- CurDAG->getTargetConstant(RC->getID(),
+ CurDAG->getTargetConstant(RC->getID(), DL,
MVT::i32));
}
@@ -949,7 +953,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
// All memory constraints can at least accept raw pointers.
case InlineAsm::Constraint_i:
OutOps.push_back(Op);
- OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
return false;
case InlineAsm::Constraint_m:
if (selectAddrRegImm16(Op, Base, Offset)) {
@@ -958,7 +962,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
return false;
}
OutOps.push_back(Op);
- OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
return false;
case InlineAsm::Constraint_R:
// The 'R' constraint is supposed to be much more complicated than this.
@@ -972,7 +976,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
return false;
}
OutOps.push_back(Op);
- OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
return false;
case InlineAsm::Constraint_ZC:
// ZC matches whatever the pref, ll, and sc instructions can handle for the
@@ -999,7 +1003,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
}
// In all cases, 0-bit offsets are acceptable.
OutOps.push_back(Op);
- OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
return false;
}
return true;
diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
index 09ff4f93c5d..c273cfbe214 100644
--- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -800,7 +800,7 @@ static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
// Return 0.
if (C == 0)
- return DAG.getConstant(0, VT);
+ return DAG.getConstant(0, DL, VT);
// Return x.
if (C == 1)
@@ -809,7 +809,7 @@ static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
// If c is power of 2, return (shl x, log2(c)).
if (isPowerOf2_64(C))
return DAG.getNode(ISD::SHL, DL, VT, X,
- DAG.getConstant(Log2_64(C), ShiftTy));
+ DAG.getConstant(Log2_64(C), DL, ShiftTy));
unsigned Log2Ceil = Log2_64_Ceil(C);
uint64_t Floor = 1LL << Log2_64(C);
@@ -864,8 +864,9 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
(SplatValue.getZExtValue() >= EltSize))
return SDValue();
- return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
- DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
+ SDLoc DL(N);
+ return DAG.getNode(Opc, DL, Ty, N->getOperand(0),
+ DAG.getConstant(SplatValue.getZExtValue(), DL, MVT::i32));
}
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
@@ -1212,7 +1213,7 @@ SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Nd.getAlignment());
// i32 load from higher address.
- Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT));
SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
MachinePointerInfo(), Nd.isVolatile(),
Nd.isNonTemporal(), Nd.isInvariant(),
@@ -1237,9 +1238,9 @@ SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
EVT PtrVT = Ptr.getValueType();
SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Val, DAG.getConstant(0, MVT::i32));
+ Val, DAG.getConstant(0, DL, MVT::i32));
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Val, DAG.getConstant(1, MVT::i32));
+ Val, DAG.getConstant(1, DL, MVT::i32));
if (!Subtarget.isLittle())
std::swap(Lo, Hi);
@@ -1250,7 +1251,7 @@ SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Nd.getAAInfo());
// i32 store to higher address.
- Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT));
return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
Nd.isVolatile(), Nd.isNonTemporal(),
std::min(Nd.getAlignment(), 4U), Nd.getAAInfo());
@@ -1283,9 +1284,9 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, DL, MVT::i32));
return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi);
}
@@ -1381,7 +1382,7 @@ static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
SDValue LaneB = Op->getOperand(2);
if (ResVecTy == MVT::v2i64) {
- LaneA = DAG.getConstant(0, MVT::i32);
+ LaneA = DAG.getConstant(0, DL, MVT::i32);
ViaVecTy = MVT::v4i32;
} else
LaneA = LaneB;
@@ -1399,7 +1400,8 @@ static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
}
static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
- return DAG.getConstant(Op->getConstantOperandVal(ImmOp), Op->getValueType(0));
+ return DAG.getConstant(Op->getConstantOperandVal(ImmOp), SDLoc(Op),
+ Op->getValueType(0));
}
static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
@@ -1415,7 +1417,7 @@ static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
SplatValueA = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValue);
SplatValueB = DAG.getNode(ISD::SRL, DL, MVT::i64, SplatValue,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, DL, MVT::i32));
SplatValueB = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValueB);
}
@@ -1451,8 +1453,9 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
if (ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(Imm)) {
APInt BitImm = APInt(64, 1) << CImm->getAPIntValue();
- SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), MVT::i32);
- SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), MVT::i32);
+ SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), DL,
+ MVT::i32);
+ SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), DL, MVT::i32);
if (BigEndian)
std::swap(BitImmLoOp, BitImmHiOp);
@@ -1474,8 +1477,8 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
Exp2Imm = getBuildVectorSplat(VecTy, Imm, BigEndian, DAG);
- Exp2Imm =
- DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, VecTy), Exp2Imm);
+ Exp2Imm = DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, DL, VecTy),
+ Exp2Imm);
}
return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm);
@@ -1484,7 +1487,7 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) {
EVT ResTy = Op->getValueType(0);
SDLoc DL(Op);
- SDValue One = DAG.getConstant(1, ResTy);
+ SDValue One = DAG.getConstant(1, DL, ResTy);
SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2));
return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1),
@@ -1496,7 +1499,7 @@ static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
EVT ResTy = Op->getValueType(0);
APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
<< cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
- SDValue BitMask = DAG.getConstant(~BitImm, ResTy);
+ SDValue BitMask = DAG.getConstant(~BitImm, DL, ResTy);
return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), BitMask);
}
@@ -1578,8 +1581,8 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(),
Op->getConstantOperandVal(3));
return DAG.getNode(ISD::VSELECT, DL, VecTy,
- DAG.getConstant(Mask, VecTy, true), Op->getOperand(2),
- Op->getOperand(1));
+ DAG.getConstant(Mask, DL, VecTy, true),
+ Op->getOperand(2), Op->getOperand(1));
}
case Intrinsic::mips_binsri_b:
case Intrinsic::mips_binsri_h:
@@ -1591,8 +1594,8 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(),
Op->getConstantOperandVal(3));
return DAG.getNode(ISD::VSELECT, DL, VecTy,
- DAG.getConstant(Mask, VecTy, true), Op->getOperand(2),
- Op->getOperand(1));
+ DAG.getConstant(Mask, DL, VecTy, true),
+ Op->getOperand(2), Op->getOperand(1));
}
case Intrinsic::mips_bmnz_v:
return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
@@ -1613,7 +1616,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_bneg_w:
case Intrinsic::mips_bneg_d: {
EVT VecTy = Op->getValueType(0);
- SDValue One = DAG.getConstant(1, VecTy);
+ SDValue One = DAG.getConstant(1, DL, VecTy);
return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, DL, VecTy, One,
@@ -1649,7 +1652,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_bset_w:
case Intrinsic::mips_bset_d: {
EVT VecTy = Op->getValueType(0);
- SDValue One = DAG.getConstant(1, VecTy);
+ SDValue One = DAG.getConstant(1, DL, VecTy);
return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, DL, VecTy, One,
@@ -1923,7 +1926,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_insve_d:
return DAG.getNode(MipsISD::INSVE, DL, Op->getValueType(0),
Op->getOperand(1), Op->getOperand(2), Op->getOperand(3),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
case Intrinsic::mips_ldi_b:
case Intrinsic::mips_ldi_h:
case Intrinsic::mips_ldi_w:
@@ -2363,7 +2366,7 @@ SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
}
// SelectionDAG::getConstant will promote SplatValue appropriately.
- SDValue Result = DAG.getConstant(SplatValue, ViaVecTy);
+ SDValue Result = DAG.getConstant(SplatValue, DL, ViaVecTy);
// Bitcast to the type we originally wanted
if (ViaVecTy != ResTy)
@@ -2385,7 +2388,7 @@ SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
for (unsigned i = 0; i < NumElts; ++i) {
Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
Node->getOperand(i),
- DAG.getConstant(i, MVT::i32));
+ DAG.getConstant(i, DL, MVT::i32));
}
return Vector;
}
@@ -2455,8 +2458,9 @@ static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
Imm |= Idx & 0x3;
}
- return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
- DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
+ SDLoc DL(Op);
+ return DAG.getNode(MipsISD::SHF, DL, ResTy,
+ DAG.getConstant(Imm, DL, MVT::i32), Op->getOperand(0));
}
// Lower VECTOR_SHUFFLE into ILVEV (if possible).
@@ -2665,7 +2669,7 @@ static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
++I)
- Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
+ Ops.push_back(DAG.getTargetConstant(*I, DL, MaskEltTy));
SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, Ops);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 52c5e1b8788..fa38a686fcb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -703,9 +703,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
default:
return nullptr;
}
- SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(fromType),
- getI32Imm(fromTypeWidth), Addr, Chain };
+ SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
+ getI32Imm(vecType, dl), getI32Imm(fromType, dl),
+ getI32Imm(fromTypeWidth, dl), Addr, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
} else if (TM.is64Bit() ? SelectADDRsi64(N1.getNode(), N1, Base, Offset)
: SelectADDRsi(N1.getNode(), N1, Base, Offset)) {
@@ -731,9 +731,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
default:
return nullptr;
}
- SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(fromType),
- getI32Imm(fromTypeWidth), Base, Offset, Chain };
+ SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
+ getI32Imm(vecType, dl), getI32Imm(fromType, dl),
+ getI32Imm(fromTypeWidth, dl), Base, Offset, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
} else if (TM.is64Bit() ? SelectADDRri64(N1.getNode(), N1, Base, Offset)
: SelectADDRri(N1.getNode(), N1, Base, Offset)) {
@@ -784,9 +784,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
return nullptr;
}
}
- SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(fromType),
- getI32Imm(fromTypeWidth), Base, Offset, Chain };
+ SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
+ getI32Imm(vecType, dl), getI32Imm(fromType, dl),
+ getI32Imm(fromTypeWidth, dl), Base, Offset, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
} else {
if (TM.is64Bit()) {
@@ -836,9 +836,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
return nullptr;
}
}
- SDValue Ops[] = { getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(fromType),
- getI32Imm(fromTypeWidth), N1, Chain };
+ SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
+ getI32Imm(vecType, dl), getI32Imm(fromType, dl),
+ getI32Imm(fromTypeWidth, dl), N1, Chain };
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
}
@@ -962,9 +962,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
break;
}
- SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace),
- getI32Imm(VecType), getI32Imm(FromType),
- getI32Imm(FromTypeWidth), Addr, Chain };
+ SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(FromType, DL),
+ getI32Imm(FromTypeWidth, DL), Addr, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else if (TM.is64Bit() ? SelectADDRsi64(Op1.getNode(), Op1, Base, Offset)
: SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
@@ -1015,9 +1015,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
break;
}
- SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace),
- getI32Imm(VecType), getI32Imm(FromType),
- getI32Imm(FromTypeWidth), Base, Offset, Chain };
+ SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(FromType, DL),
+ getI32Imm(FromTypeWidth, DL), Base, Offset, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else if (TM.is64Bit() ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
: SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
@@ -1117,9 +1117,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
}
}
- SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace),
- getI32Imm(VecType), getI32Imm(FromType),
- getI32Imm(FromTypeWidth), Base, Offset, Chain };
+ SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(FromType, DL),
+ getI32Imm(FromTypeWidth, DL), Base, Offset, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
} else {
@@ -1219,9 +1219,9 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
}
}
- SDValue Ops[] = { getI32Imm(IsVolatile), getI32Imm(CodeAddrSpace),
- getI32Imm(VecType), getI32Imm(FromType),
- getI32Imm(FromTypeWidth), Op1, Chain };
+ SDValue Ops[] = { getI32Imm(IsVolatile, DL), getI32Imm(CodeAddrSpace, DL),
+ getI32Imm(VecType, DL), getI32Imm(FromType, DL),
+ getI32Imm(FromTypeWidth, DL), Op1, Chain };
LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
}
@@ -2068,9 +2068,10 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
default:
return nullptr;
}
- SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(toType),
- getI32Imm(toTypeWidth), Addr, Chain };
+ SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
+ getI32Imm(codeAddrSpace, dl), getI32Imm(vecType, dl),
+ getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), Addr,
+ Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
} else if (TM.is64Bit() ? SelectADDRsi64(N2.getNode(), N2, Base, Offset)
: SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
@@ -2096,9 +2097,10 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
default:
return nullptr;
}
- SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(toType),
- getI32Imm(toTypeWidth), Base, Offset, Chain };
+ SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
+ getI32Imm(codeAddrSpace, dl), getI32Imm(vecType, dl),
+ getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), Base,
+ Offset, Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
} else if (TM.is64Bit() ? SelectADDRri64(N2.getNode(), N2, Base, Offset)
: SelectADDRri(N2.getNode(), N2, Base, Offset)) {
@@ -2149,9 +2151,10 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
return nullptr;
}
}
- SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(toType),
- getI32Imm(toTypeWidth), Base, Offset, Chain };
+ SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
+ getI32Imm(codeAddrSpace, dl), getI32Imm(vecType, dl),
+ getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), Base,
+ Offset, Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
} else {
if (TM.is64Bit()) {
@@ -2201,9 +2204,10 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
return nullptr;
}
}
- SDValue Ops[] = { N1, getI32Imm(isVolatile), getI32Imm(codeAddrSpace),
- getI32Imm(vecType), getI32Imm(toType),
- getI32Imm(toTypeWidth), N2, Chain };
+ SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
+ getI32Imm(codeAddrSpace, dl), getI32Imm(vecType, dl),
+ getI32Imm(toType, dl), getI32Imm(toTypeWidth, dl), N2,
+ Chain };
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
}
@@ -2277,11 +2281,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
return nullptr;
}
- StOps.push_back(getI32Imm(IsVolatile));
- StOps.push_back(getI32Imm(CodeAddrSpace));
- StOps.push_back(getI32Imm(VecType));
- StOps.push_back(getI32Imm(ToType));
- StOps.push_back(getI32Imm(ToTypeWidth));
+ StOps.push_back(getI32Imm(IsVolatile, DL));
+ StOps.push_back(getI32Imm(CodeAddrSpace, DL));
+ StOps.push_back(getI32Imm(VecType, DL));
+ StOps.push_back(getI32Imm(ToType, DL));
+ StOps.push_back(getI32Imm(ToTypeWidth, DL));
if (SelectDirectAddr(N2, Addr)) {
switch (N->getOpcode()) {
@@ -2710,13 +2714,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
SmallVector<SDValue, 2> Ops;
- Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
Ops.push_back(Chain);
Ops.push_back(Flag);
- SDNode *Ret =
- CurDAG->getMachineNode(Opc, DL, VTs, Ops);
- return Ret;
+ return CurDAG->getMachineNode(Opc, DL, VTs, Ops);
}
SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
@@ -2746,7 +2748,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
SmallVector<SDValue, 6> Ops;
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(N->getOperand(i + 2));
- Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
Ops.push_back(Chain);
// Determine target opcode
@@ -2874,8 +2876,8 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(N->getOperand(i + 3));
- Ops.push_back(CurDAG->getTargetConstant(ParamVal, MVT::i32));
- Ops.push_back(CurDAG->getTargetConstant(OffsetVal, MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(ParamVal, DL, MVT::i32));
+ Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
Ops.push_back(Chain);
Ops.push_back(Flag);
@@ -2970,7 +2972,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
// the selected StoreParam node.
case NVPTXISD::StoreParamU32: {
Opcode = NVPTX::StoreParamI32;
- SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE,
+ SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, DL,
MVT::i32);
SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_u32_u16, DL,
MVT::i32, Ops[0], CvtNone);
@@ -2979,7 +2981,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
}
case NVPTXISD::StoreParamS32: {
Opcode = NVPTX::StoreParamI32;
- SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE,
+ SDValue CvtNone = CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE, DL,
MVT::i32);
SDNode *Cvt = CurDAG->getMachineNode(NVPTX::CVT_s32_s16, DL,
MVT::i32, Ops[0], CvtNone);
@@ -4727,6 +4729,7 @@ SDNode *NVPTXDAGToDAGISel::SelectSurfaceIntrinsic(SDNode *N) {
/// SelectBFE - Look for instruction sequences that can be made more efficient
/// by using the 'bfe' (bit-field extract) PTX instruction
SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
+ SDLoc DL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDValue Len;
@@ -4758,7 +4761,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
// How many bits are in our mask?
uint64_t NumBits = countTrailingOnes(MaskVal);
- Len = CurDAG->getTargetConstant(NumBits, MVT::i32);
+ Len = CurDAG->getTargetConstant(NumBits, DL, MVT::i32);
if (LHS.getOpcode() == ISD::SRL || LHS.getOpcode() == ISD::SRA) {
// We have a 'srl/and' pair, extract the effective start bit and length
@@ -4776,7 +4779,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
// emitting the srl/and pair.
return NULL;
}
- Start = CurDAG->getTargetConstant(StartVal, MVT::i32);
+ Start = CurDAG->getTargetConstant(StartVal, DL, MVT::i32);
} else {
// Do not handle the case where the shift amount (can be zero if no srl
// was found) is not constant. We could handle this case, but it would
@@ -4841,8 +4844,8 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
}
Val = AndLHS;
- Start = CurDAG->getTargetConstant(ShiftAmt, MVT::i32);
- Len = CurDAG->getTargetConstant(NumBits, MVT::i32);
+ Start = CurDAG->getTargetConstant(ShiftAmt, DL, MVT::i32);
+ Len = CurDAG->getTargetConstant(NumBits, DL, MVT::i32);
} else if (LHS->getOpcode() == ISD::SHL) {
// Here, we have a pattern like:
//
@@ -4882,10 +4885,10 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
}
Start =
- CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, MVT::i32);
+ CurDAG->getTargetConstant(OuterShiftAmt - InnerShiftAmt, DL, MVT::i32);
Len =
CurDAG->getTargetConstant(Val.getValueType().getSizeInBits() -
- OuterShiftAmt, MVT::i32);
+ OuterShiftAmt, DL, MVT::i32);
if (N->getOpcode() == ISD::SRA) {
// If we have a arithmetic right shift, we need to use the signed bfe
@@ -4926,10 +4929,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
Val, Start, Len
};
- SDNode *Ret =
- CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
-
- return Ret;
+ return CurDAG->getMachineNode(Opc, DL, N->getVTList(), Ops);
}
// SelectDirectAddr - Match a direct address for DAG.
@@ -4961,7 +4961,8 @@ bool NVPTXDAGToDAGISel::SelectADDRsi_imp(
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) {
SDValue base = Addr.getOperand(0);
if (SelectDirectAddr(base, Base)) {
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), mvt);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(OpNode),
+ mvt);
return true;
}
}
@@ -4986,7 +4987,7 @@ bool NVPTXDAGToDAGISel::SelectADDRri_imp(
SDNode *OpNode, SDValue Addr, SDValue &Base, SDValue &Offset, MVT mvt) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), mvt);
- Offset = CurDAG->getTargetConstant(0, mvt);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(OpNode), mvt);
return true;
}
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
@@ -5004,7 +5005,8 @@ bool NVPTXDAGToDAGISel::SelectADDRri_imp(
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), mvt);
else
Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), mvt);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(OpNode),
+ mvt);
return true;
}
}
@@ -5049,7 +5051,7 @@ bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand(
case InlineAsm::Constraint_m: // memory
if (SelectDirectAddr(Op, Op0)) {
OutOps.push_back(Op0);
- OutOps.push_back(CurDAG->getTargetConstant(0, MVT::i32));
+ OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
return false;
}
if (SelectADDRri(Op.getNode(), Op, Op0, Op1)) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index 6d845c9d5d1..fe20580c83a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -71,8 +71,8 @@ private:
SDNode *SelectSurfaceIntrinsic(SDNode *N);
SDNode *SelectBFE(SDNode *N);
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
+ return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
}
// Match direct address complex pattern.
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 8b0665708b9..d498f3c5f27 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1053,9 +1053,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
const Function *F = MF.getFunction();
SDValue tempChain = Chain;
- Chain =
- DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
- dl);
+ Chain = DAG.getCALLSEQ_START(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, dl, true),
+ dl);
SDValue InFlag = Chain.getValue(1);
unsigned paramCount = 0;
@@ -1086,9 +1086,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// declare .param .align <align> .b8 .param<n>[<size>];
unsigned sz = TD->getTypeAllocSize(Ty);
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(sz, MVT::i32), InFlag };
+ SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, dl,
+ MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32),
+ InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
DeclareParamOps);
InFlag = Chain.getValue(1);
@@ -1103,8 +1105,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(Offsets[j], MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(Offsets[j], dl, MVT::i32),
StVal, InFlag };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
CopyParamVTs, CopyParamOps,
@@ -1124,9 +1126,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// declare .param .align <align> .b8 .param<n>[<size>];
unsigned sz = TD->getTypeAllocSize(Ty);
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(sz, MVT::i32), InFlag };
+ SDValue DeclareParamOps[] = { Chain,
+ DAG.getConstant(align, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32),
+ InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
DeclareParamOps);
InFlag = Chain.getValue(1);
@@ -1147,8 +1151,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(0, MVT::i32), Elt,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), Elt,
InFlag };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
CopyParamVTs, CopyParamOps,
@@ -1164,9 +1168,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(0, MVT::i32), Elt0, Elt1,
- InFlag };
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), Elt0,
+ Elt1, InFlag };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
CopyParamVTs, CopyParamOps,
MemVT, MachinePointerInfo());
@@ -1196,8 +1200,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue StoreVal;
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
- Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
- Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
+ Ops.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
+ Ops.push_back(DAG.getConstant(curOffset, dl, MVT::i32));
unsigned Opc = NVPTXISD::StoreParamV2;
@@ -1264,9 +1268,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue DeclareParamOps[] = { Chain,
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(sz, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag };
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
DeclareParamOps);
InFlag = Chain.getValue(1);
@@ -1279,8 +1283,10 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
}
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(0, MVT::i32), OutV, InFlag };
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), OutV,
+ InFlag };
unsigned opcode = NVPTXISD::StoreParam;
if (Outs[OIdx].Flags.isZExt())
@@ -1309,9 +1315,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// so we don't need to worry about natural alignment or not.
// See TargetLowering::LowerCallTo().
SDValue DeclareParamOps[] = {
- Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
- DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
- InFlag
+ Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32), InFlag
};
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
DeclareParamOps);
@@ -1322,7 +1328,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
SDValue srcAddr =
DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
- DAG.getConstant(curOffset, getPointerTy()));
+ DAG.getConstant(curOffset, dl, getPointerTy()));
SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
MachinePointerInfo(), false, false, false,
PartAlign);
@@ -1330,9 +1336,10 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
}
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(curOffset, MVT::i32), theVal,
- InFlag };
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(curOffset, dl, MVT::i32),
+ theVal, InFlag };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
CopyParamOps, elemtype,
MachinePointerInfo());
@@ -1364,9 +1371,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (resultsz < 32)
resultsz = 32;
SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(resultsz, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag };
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(resultsz, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
DeclareRetOps);
InFlag = Chain.getValue(1);
@@ -1374,9 +1381,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue DeclareRetOps[] = { Chain,
- DAG.getConstant(retAlignment, MVT::i32),
- DAG.getConstant(resultsz / 8, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag };
+ DAG.getConstant(retAlignment, dl, MVT::i32),
+ DAG.getConstant(resultsz / 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
DeclareRetOps);
InFlag = Chain.getValue(1);
@@ -1404,7 +1411,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Op to just print "call"
SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue PrintCallOps[] = {
- Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
+ Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
};
Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
dl, PrintCallVTs, PrintCallOps);
@@ -1430,20 +1437,22 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else
opcode = NVPTXISD::CallArg;
SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(i, MVT::i32), InFlag };
+ SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(i, dl, MVT::i32), InFlag };
Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
InFlag = Chain.getValue(1);
}
SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
+ SDValue CallArgEndOps[] = { Chain,
+ DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
InFlag };
Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
InFlag = Chain.getValue(1);
if (!Func) {
SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
+ SDValue PrototypeOps[] = { Chain,
+ DAG.getConstant(uniqueCallSite, dl, MVT::i32),
InFlag };
Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
InFlag = Chain.getValue(1);
@@ -1474,8 +1483,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
LoadRetVTs.push_back(EltVT);
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag};
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
@@ -1501,8 +1510,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag};
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParamV2, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
@@ -1544,8 +1553,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(Ofst, MVT::i32), InFlag};
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Ofst, dl, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
Opc, dl, DAG.getVTList(LoadRetVTs),
LoadRetOps, EltVT, MachinePointerInfo());
@@ -1599,8 +1608,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(Offsets[i], MVT::i32), InFlag};
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Offsets[i], dl, MVT::i32),
+ InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps,
@@ -1615,8 +1625,10 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
}
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
- DAG.getIntPtrConstant(uniqueCallSite + 1, true),
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, dl, true),
+ DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
+ true),
InFlag, dl);
uniqueCallSite++;
@@ -1642,7 +1654,7 @@ NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
unsigned NumSubElem = VVT.getVectorNumElements();
for (unsigned j = 0; j < NumSubElem; ++j) {
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
- DAG.getIntPtrConstant(j)));
+ DAG.getIntPtrConstant(j, dl)));
}
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
@@ -1691,16 +1703,18 @@ SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
// dHi = aHi >> Amt
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, MVT::i32), ShAmt);
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, MVT::i32));
+ DAG.getConstant(VTBits, dl, MVT::i32));
SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
@@ -1751,16 +1765,18 @@ SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
// dHi = (aHi << Amt) | (aLo >> (size-Amt))
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, MVT::i32), ShAmt);
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, MVT::i32));
+ DAG.getConstant(VTBits, dl, MVT::i32));
SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
@@ -1933,7 +1949,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
// Then the split values
for (unsigned i = 0; i < NumElts; ++i) {
SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
- DAG.getIntPtrConstant(i));
+ DAG.getIntPtrConstant(i, DL));
if (NeedExt)
ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
Ops.push_back(ExtVal);
@@ -2077,7 +2093,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
(theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
: nullptr))) {
assert(isKernel && "Only kernels can have image/sampler params");
- InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
+ InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
continue;
}
@@ -2139,7 +2155,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
llvm::ADDRESS_SPACE_PARAM));
SDValue srcAddr =
DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
- DAG.getConstant(offsets[parti], getPointerTy()));
+ DAG.getConstant(offsets[parti], dl, getPointerTy()));
unsigned partAlign =
aggregateIsPacked ? 1
: TD->getABITypeAlignment(
@@ -2204,9 +2220,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
P.getNode()->setIROrder(idx + 1);
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
- DAG.getIntPtrConstant(1));
+ DAG.getIntPtrConstant(1, dl));
if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
@@ -2239,7 +2255,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
llvm::ADDRESS_SPACE_PARAM));
SDValue SrcAddr =
DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
- DAG.getConstant(Ofst, getPointerTy()));
+ DAG.getConstant(Ofst, dl, getPointerTy()));
SDValue P = DAG.getLoad(
VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
false, true,
@@ -2251,7 +2267,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
if (i + j >= NumElts)
break;
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
- DAG.getIntPtrConstant(j));
+ DAG.getIntPtrConstant(j, dl));
if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
InVals.push_back(Elt);
@@ -2309,7 +2325,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
else {
SDValue p2 = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
- DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
+ DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, dl, MVT::i32), p);
InVals.push_back(p2);
}
}
@@ -2363,7 +2379,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// We only have one element, so just directly store it
if (NeedExtend)
StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
- SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
+ SDValue Ops[] = { Chain, DAG.getConstant(0, dl, MVT::i32), StoreVal };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
DAG.getVTList(MVT::Other), Ops,
EltVT, MachinePointerInfo());
@@ -2378,7 +2394,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
}
- SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
+ SDValue Ops[] = { Chain, DAG.getConstant(0, dl, MVT::i32), StoreVal0,
StoreVal1 };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
DAG.getVTList(MVT::Other), Ops,
@@ -2410,7 +2426,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SDValue StoreVal;
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
- Ops.push_back(DAG.getConstant(Offset, MVT::i32));
+ Ops.push_back(DAG.getConstant(Offset, dl, MVT::i32));
unsigned Opc = NVPTXISD::StoreRetvalV2;
EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
@@ -2475,7 +2491,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
if (TheValType.isVector())
TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
TheValType.getVectorElementType(), TmpVal,
- DAG.getIntPtrConstant(j));
+ DAG.getIntPtrConstant(j, dl));
EVT TheStoreType = ValVTs[i];
if (RetTy->isIntegerTy() &&
TD->getTypeAllocSizeInBits(RetTy) < 32) {
@@ -2489,7 +2505,7 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SDValue Ops[] = {
Chain,
- DAG.getConstant(Offsets[i], MVT::i32),
+ DAG.getConstant(Offsets[i], dl, MVT::i32),
TmpVal };
Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
DAG.getVTList(MVT::Other), Ops,
@@ -4111,6 +4127,7 @@ static SDValue TryMULWIDECombine(SDNode *N,
return SDValue();
}
+ SDLoc DL(N);
unsigned OptSize = MulType.getSizeInBits() >> 1;
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -4133,7 +4150,7 @@ static SDValue TryMULWIDECombine(SDNode *N,
unsigned BitWidth = MulType.getSizeInBits();
if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
- RHS = DCI.DAG.getConstant(MulVal, MulType);
+ RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
} else {
return SDValue();
}
@@ -4155,9 +4172,9 @@ static SDValue TryMULWIDECombine(SDNode *N,
// Truncate the operands to the correct size. Note that these are just for
// type consistency and will (likely) be eliminated in later phases.
SDValue TruncLHS =
- DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, LHS);
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
SDValue TruncRHS =
- DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, RHS);
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
unsigned Opc;
if (Signed) {
@@ -4166,7 +4183,7 @@ static SDValue TryMULWIDECombine(SDNode *N,
Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
}
- return DCI.DAG.getNode(Opc, SDLoc(N), MulType, TruncLHS, TruncRHS);
+ return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
}
/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
@@ -4294,7 +4311,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
// The select routine does not have access to the LoadSDNode instance, so
// pass along the extension information
- OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
+ OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
LD->getMemoryVT(),
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 68f0d9fa41e..6fdd60f3ed2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -452,13 +452,13 @@ def Int4Const : PatLeaf<(imm), [{
def SHL2MUL32 : SDNodeXForm<imm, [{
const APInt &v = N->getAPIntValue();
APInt temp(32, 1);
- return CurDAG->getTargetConstant(temp.shl(v), MVT::i32);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
}]>;
def SHL2MUL16 : SDNodeXForm<imm, [{
const APInt &v = N->getAPIntValue();
APInt temp(16, 1);
- return CurDAG->getTargetConstant(temp.shl(v), MVT::i16);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
}]>;
def MULWIDES64
@@ -1138,7 +1138,7 @@ def ROT32imm_sw : NVPTXInst<(outs Int32Regs:$dst),
[]>;
def SUB_FRM_32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(32-N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(32-N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
@@ -1189,7 +1189,7 @@ def ROT64imm_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
[]>;
def SUB_FRM_64 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(64-N->getZExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
diff --git a/llvm/lib/Target/NVPTX/NVPTXVector.td b/llvm/lib/Target/NVPTX/NVPTXVector.td
index 85aa34e9aea..a237247e483 100644
--- a/llvm/lib/Target/NVPTX/NVPTXVector.td
+++ b/llvm/lib/Target/NVPTX/NVPTXVector.td
@@ -735,19 +735,19 @@ def VecShuffle_v2i64 : NVPTXVecInst<(outs V2I64Regs:$dst),
def ShuffleMask0 : SDNodeXForm<vector_shuffle, [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return CurDAG->getTargetConstant(SVOp->getMaskElt(0), MVT::i32);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(0), SDLoc(N), MVT::i32);
}]>;
def ShuffleMask1 : SDNodeXForm<vector_shuffle, [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return CurDAG->getTargetConstant(SVOp->getMaskElt(1), MVT::i32);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(1), SDLoc(N), MVT::i32);
}]>;
def ShuffleMask2 : SDNodeXForm<vector_shuffle, [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return CurDAG->getTargetConstant(SVOp->getMaskElt(2), MVT::i32);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(2), SDLoc(N), MVT::i32);
}]>;
def ShuffleMask3 : SDNodeXForm<vector_shuffle, [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return CurDAG->getTargetConstant(SVOp->getMaskElt(3), MVT::i32);
+ return CurDAG->getTargetConstant(SVOp->getMaskElt(3), SDLoc(N), MVT::i32);
}]>;
// The spurious call is here to silence a compiler warning about N being
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 4f8d01b059c..512eddcb0da 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -90,19 +90,19 @@ namespace {
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
/// getI64Imm - Return a target constant with the specified value, of type
/// i64.
- inline SDValue getI64Imm(uint64_t Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i64);
+ inline SDValue getI64Imm(uint64_t Imm, SDLoc dl) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i64);
}
/// getSmallIPtrImm - Return a target constant of pointer type.
- inline SDValue getSmallIPtrImm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, PPCLowering->getPointerTy());
+ inline SDValue getSmallIPtrImm(unsigned Imm, SDLoc dl) {
+ return CurDAG->getTargetConstant(Imm, dl, PPCLowering->getPointerTy());
}
/// isRotateAndMask - Returns true if Mask and Shift can be folded into a
@@ -197,10 +197,11 @@ namespace {
// (because we might end up lowering this as 0(%op)).
const TargetRegisterInfo *TRI = PPCSubTarget->getRegisterInfo();
const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1);
- SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
+ SDLoc dl(Op);
+ SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
SDValue NewOp =
SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
- SDLoc(Op), Op.getValueType(),
+ dl, Op.getValueType(),
Op, RC), 0);
OutOps.push_back(NewOp);
@@ -406,9 +407,9 @@ SDNode *PPCDAGToDAGISel::getFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) {
unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
if (SN->hasOneUse())
return CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI,
- getSmallIPtrImm(Offset));
+ getSmallIPtrImm(Offset, dl));
return CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI,
- getSmallIPtrImm(Offset));
+ getSmallIPtrImm(Offset, dl));
}
bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
@@ -523,8 +524,8 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
}
SH &= 31;
- SDValue Ops[] = { Op0, Op1, getI32Imm(SH), getI32Imm(MB),
- getI32Imm(ME) };
+ SDValue Ops[] = { Op0, Op1, getI32Imm(SH, dl), getI32Imm(MB, dl),
+ getI32Imm(ME, dl) };
return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops);
}
}
@@ -652,8 +653,8 @@ static SDNode *SelectInt64Direct(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
unsigned Lo = Imm & 0xFFFF;
unsigned Hi = (Imm >> 16) & 0xFFFF;
- auto getI32Imm = [CurDAG](unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ auto getI32Imm = [CurDAG, dl](unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
};
// Simple value.
@@ -743,8 +744,8 @@ static SDNode *SelectInt64(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
if (!RMin)
return SelectInt64Direct(CurDAG, dl, Imm);
- auto getI32Imm = [CurDAG](unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ auto getI32Imm = [CurDAG, dl](unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
};
SDValue Val = SDValue(SelectInt64Direct(CurDAG, dl, MatImm), 0);
@@ -1194,8 +1195,8 @@ class BitPermutationSelector {
}
}
- SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
uint64_t getZerosMask() {
@@ -1267,7 +1268,8 @@ class BitPermutationSelector {
SDValue VRot;
if (VRI.RLAmt) {
SDValue Ops[] =
- { VRI.V, getI32Imm(VRI.RLAmt), getI32Imm(0), getI32Imm(31) };
+ { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl),
+ getI32Imm(31, dl) };
VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32,
Ops), 0);
} else {
@@ -1277,10 +1279,10 @@ class BitPermutationSelector {
SDValue ANDIVal, ANDISVal;
if (ANDIMask != 0)
ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32,
- VRot, getI32Imm(ANDIMask)), 0);
+ VRot, getI32Imm(ANDIMask, dl)), 0);
if (ANDISMask != 0)
ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32,
- VRot, getI32Imm(ANDISMask)), 0);
+ VRot, getI32Imm(ANDISMask, dl)), 0);
SDValue TotalVal;
if (!ANDIVal)
@@ -1326,8 +1328,10 @@ class BitPermutationSelector {
if (VRI.RLAmt) {
if (InstCnt) *InstCnt += 1;
SDValue Ops[] =
- { VRI.V, getI32Imm(VRI.RLAmt), getI32Imm(0), getI32Imm(31) };
- Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
+ { VRI.V, getI32Imm(VRI.RLAmt, dl), getI32Imm(0, dl),
+ getI32Imm(31, dl) };
+ Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops),
+ 0);
} else {
Res = VRI.V;
}
@@ -1347,13 +1351,15 @@ class BitPermutationSelector {
for (auto &BG : BitGroups) {
if (!Res) {
SDValue Ops[] =
- { BG.V, getI32Imm(BG.RLAmt), getI32Imm(Bits.size() - BG.EndIdx - 1),
- getI32Imm(Bits.size() - BG.StartIdx - 1) };
+ { BG.V, getI32Imm(BG.RLAmt, dl),
+ getI32Imm(Bits.size() - BG.EndIdx - 1, dl),
+ getI32Imm(Bits.size() - BG.StartIdx - 1, dl) };
Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
} else {
SDValue Ops[] =
- { Res, BG.V, getI32Imm(BG.RLAmt), getI32Imm(Bits.size() - BG.EndIdx - 1),
- getI32Imm(Bits.size() - BG.StartIdx - 1) };
+ { Res, BG.V, getI32Imm(BG.RLAmt, dl),
+ getI32Imm(Bits.size() - BG.EndIdx - 1, dl),
+ getI32Imm(Bits.size() - BG.StartIdx - 1, dl) };
Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0);
}
}
@@ -1372,10 +1378,10 @@ class BitPermutationSelector {
SDValue ANDIVal, ANDISVal;
if (ANDIMask != 0)
ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo, dl, MVT::i32,
- Res, getI32Imm(ANDIMask)), 0);
+ Res, getI32Imm(ANDIMask, dl)), 0);
if (ANDISMask != 0)
ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo, dl, MVT::i32,
- Res, getI32Imm(ANDISMask)), 0);
+ Res, getI32Imm(ANDISMask, dl)), 0);
if (!ANDIVal)
Res = ANDISVal;
@@ -1426,27 +1432,27 @@ class BitPermutationSelector {
assert(InstMaskStart >= 32 && "Mask cannot start out of range");
assert(InstMaskEnd >= 32 && "Mask cannot end out of range");
SDValue Ops[] =
- { V, getI32Imm(RLAmt), getI32Imm(InstMaskStart - 32),
- getI32Imm(InstMaskEnd - 32) };
+ { V, getI32Imm(RLAmt, dl), getI32Imm(InstMaskStart - 32, dl),
+ getI32Imm(InstMaskEnd - 32, dl) };
return SDValue(CurDAG->getMachineNode(PPC::RLWINM8, dl, MVT::i64,
Ops), 0);
}
if (InstMaskEnd == 63) {
SDValue Ops[] =
- { V, getI32Imm(RLAmt), getI32Imm(InstMaskStart) };
+ { V, getI32Imm(RLAmt, dl), getI32Imm(InstMaskStart, dl) };
return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Ops), 0);
}
if (InstMaskStart == 0) {
SDValue Ops[] =
- { V, getI32Imm(RLAmt), getI32Imm(InstMaskEnd) };
+ { V, getI32Imm(RLAmt, dl), getI32Imm(InstMaskEnd, dl) };
return SDValue(CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Ops), 0);
}
if (InstMaskEnd == 63 - RLAmt) {
SDValue Ops[] =
- { V, getI32Imm(RLAmt), getI32Imm(InstMaskStart) };
+ { V, getI32Imm(RLAmt, dl), getI32Imm(InstMaskStart, dl) };
return SDValue(CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, Ops), 0);
}
@@ -1487,15 +1493,15 @@ class BitPermutationSelector {
assert(InstMaskStart >= 32 && "Mask cannot start out of range");
assert(InstMaskEnd >= 32 && "Mask cannot end out of range");
SDValue Ops[] =
- { Base, V, getI32Imm(RLAmt), getI32Imm(InstMaskStart - 32),
- getI32Imm(InstMaskEnd - 32) };
+ { Base, V, getI32Imm(RLAmt, dl), getI32Imm(InstMaskStart - 32, dl),
+ getI32Imm(InstMaskEnd - 32, dl) };
return SDValue(CurDAG->getMachineNode(PPC::RLWIMI8, dl, MVT::i64,
Ops), 0);
}
if (InstMaskEnd == 63 - RLAmt) {
SDValue Ops[] =
- { Base, V, getI32Imm(RLAmt), getI32Imm(InstMaskStart) };
+ { Base, V, getI32Imm(RLAmt, dl), getI32Imm(InstMaskStart, dl) };
return SDValue(CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops), 0);
}
@@ -1642,10 +1648,10 @@ class BitPermutationSelector {
SDValue ANDIVal, ANDISVal;
if (ANDIMask != 0)
ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64,
- VRot, getI32Imm(ANDIMask)), 0);
+ VRot, getI32Imm(ANDIMask, dl)), 0);
if (ANDISMask != 0)
ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64,
- VRot, getI32Imm(ANDISMask)), 0);
+ VRot, getI32Imm(ANDISMask, dl)), 0);
if (!ANDIVal)
TotalVal = ANDISVal;
@@ -1792,10 +1798,10 @@ class BitPermutationSelector {
SDValue ANDIVal, ANDISVal;
if (ANDIMask != 0)
ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDIo8, dl, MVT::i64,
- Res, getI32Imm(ANDIMask)), 0);
+ Res, getI32Imm(ANDIMask, dl)), 0);
if (ANDISMask != 0)
ANDISVal = SDValue(CurDAG->getMachineNode(PPC::ANDISo8, dl, MVT::i64,
- Res, getI32Imm(ANDISMask)), 0);
+ Res, getI32Imm(ANDISMask, dl)), 0);
if (!ANDIVal)
Res = ANDISVal;
@@ -1940,11 +1946,13 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
if (isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
- getI32Imm(Imm & 0xFFFF)), 0);
+ getI32Imm(Imm & 0xFFFF, dl)),
+ 0);
// If this is a 16-bit signed immediate, fold it.
if (isInt<16>((int)Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
- getI32Imm(Imm & 0xFFFF)), 0);
+ getI32Imm(Imm & 0xFFFF, dl)),
+ 0);
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
@@ -1956,21 +1964,22 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// cmplwi cr0,r0,0x5678
// beq cr0,L6
SDValue Xor(CurDAG->getMachineNode(PPC::XORIS, dl, MVT::i32, LHS,
- getI32Imm(Imm >> 16)), 0);
+ getI32Imm(Imm >> 16, dl)), 0);
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, Xor,
- getI32Imm(Imm & 0xFFFF)), 0);
+ getI32Imm(Imm & 0xFFFF, dl)), 0);
}
Opc = PPC::CMPLW;
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (isInt32Immediate(RHS, Imm) && isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
- getI32Imm(Imm & 0xFFFF)), 0);
+ getI32Imm(Imm & 0xFFFF, dl)), 0);
Opc = PPC::CMPLW;
} else {
short SImm;
if (isIntS16Immediate(RHS, SImm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
- getI32Imm((int)SImm & 0xFFFF)),
+ getI32Imm((int)SImm & 0xFFFF,
+ dl)),
0);
Opc = PPC::CMPW;
}
@@ -1981,11 +1990,13 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
if (isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
- getI32Imm(Imm & 0xFFFF)), 0);
+ getI32Imm(Imm & 0xFFFF, dl)),
+ 0);
// If this is a 16-bit signed immediate, fold it.
if (isInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
- getI32Imm(Imm & 0xFFFF)), 0);
+ getI32Imm(Imm & 0xFFFF, dl)),
+ 0);
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
@@ -1998,22 +2009,23 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// beq cr0,L6
if (isUInt<32>(Imm)) {
SDValue Xor(CurDAG->getMachineNode(PPC::XORIS8, dl, MVT::i64, LHS,
- getI64Imm(Imm >> 16)), 0);
+ getI64Imm(Imm >> 16, dl)), 0);
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, Xor,
- getI64Imm(Imm & 0xFFFF)), 0);
+ getI64Imm(Imm & 0xFFFF, dl)),
+ 0);
}
}
Opc = PPC::CMPLD;
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (isInt64Immediate(RHS.getNode(), Imm) && isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
- getI64Imm(Imm & 0xFFFF)), 0);
+ getI64Imm(Imm & 0xFFFF, dl)), 0);
Opc = PPC::CMPLD;
} else {
short SImm;
if (isIntS16Immediate(RHS, SImm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
- getI64Imm(SImm & 0xFFFF)),
+ getI64Imm(SImm & 0xFFFF, dl)),
0);
Opc = PPC::CMPD;
}
@@ -2215,26 +2227,29 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
default: break;
case ISD::SETEQ: {
Op = SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Op), 0);
- SDValue Ops[] = { Op, getI32Imm(27), getI32Imm(5), getI32Imm(31) };
+ SDValue Ops[] = { Op, getI32Imm(27, dl), getI32Imm(5, dl),
+ getI32Imm(31, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
case ISD::SETNE: {
if (isPPC64) break;
SDValue AD =
SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
- Op, getI32Imm(~0U)), 0);
+ Op, getI32Imm(~0U, dl)), 0);
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
AD.getValue(1));
}
case ISD::SETLT: {
- SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
+ SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl),
+ getI32Imm(31, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
case ISD::SETGT: {
SDValue T =
SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Op), 0);
T = SDValue(CurDAG->getMachineNode(PPC::ANDC, dl, MVT::i32, T, Op), 0);
- SDValue Ops[] = { T, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
+ SDValue Ops[] = { T, getI32Imm(1, dl), getI32Imm(31, dl),
+ getI32Imm(31, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
}
@@ -2245,34 +2260,35 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
case ISD::SETEQ:
if (isPPC64) break;
Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
- Op, getI32Imm(1)), 0);
+ Op, getI32Imm(1, dl)), 0);
return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
SDValue(CurDAG->getMachineNode(PPC::LI, dl,
MVT::i32,
- getI32Imm(0)), 0),
- Op.getValue(1));
+ getI32Imm(0, dl)),
+ 0), Op.getValue(1));
case ISD::SETNE: {
if (isPPC64) break;
Op = SDValue(CurDAG->getMachineNode(PPC::NOR, dl, MVT::i32, Op, Op), 0);
SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
- Op, getI32Imm(~0U));
+ Op, getI32Imm(~0U, dl));
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0),
Op, SDValue(AD, 1));
}
case ISD::SETLT: {
SDValue AD = SDValue(CurDAG->getMachineNode(PPC::ADDI, dl, MVT::i32, Op,
- getI32Imm(1)), 0);
+ getI32Imm(1, dl)), 0);
SDValue AN = SDValue(CurDAG->getMachineNode(PPC::AND, dl, MVT::i32, AD,
Op), 0);
- SDValue Ops[] = { AN, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
+ SDValue Ops[] = { AN, getI32Imm(1, dl), getI32Imm(31, dl),
+ getI32Imm(31, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
case ISD::SETGT: {
- SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
- Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops),
- 0);
+ SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl),
+ getI32Imm(31, dl) };
+ Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
- getI32Imm(1));
+ getI32Imm(1, dl));
}
}
}
@@ -2322,15 +2338,15 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
- SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
- getI32Imm(31), getI32Imm(31) };
+ SDValue Ops[] = { IntCR, getI32Imm((32 - (3 - Idx)) & 31, dl),
+ getI32Imm(31, dl), getI32Imm(31, dl) };
if (!Inv)
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
// Get the specified bit.
SDValue Tmp =
SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
- return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1));
+ return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1, dl));
}
SDNode *PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
@@ -2398,7 +2414,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue ShiftAmt =
CurDAG->getTargetConstant(*cast<ConstantSDNode>(N->getOperand(1))->
- getConstantIntValue(), N->getValueType(0));
+ getConstantIntValue(), dl,
+ N->getValueType(0));
if (N->getValueType(0) == MVT::i64) {
SDNode *Op =
CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, MVT::Glue,
@@ -2513,7 +2530,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (isInt32Immediate(N->getOperand(1), Imm) &&
isRotateAndMask(N->getOperand(0).getNode(), Imm, false, SH, MB, ME)) {
SDValue Val = N->getOperand(0).getOperand(0);
- SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
+ SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl),
+ getI32Imm(ME, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
// If this is just a masked value where the input is not handled above, and
@@ -2522,7 +2540,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
isRunOfOnes(Imm, MB, ME) &&
N->getOperand(0).getOpcode() != ISD::ROTL) {
SDValue Val = N->getOperand(0);
- SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
+ SDValue Ops[] = { Val, getI32Imm(0, dl), getI32Imm(MB, dl),
+ getI32Imm(ME, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
// If this is a 64-bit zero-extension mask, emit rldicl.
@@ -2544,7 +2563,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SH = 64 - Imm;
}
- SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB) };
+ SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops);
}
// AND X, 0 -> 0, not "rlwinm 32".
@@ -2562,7 +2581,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (isRunOfOnes(Imm, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
N->getOperand(0).getOperand(1),
- getI32Imm(0), getI32Imm(MB),getI32Imm(ME) };
+ getI32Imm(0, dl), getI32Imm(MB, dl),
+ getI32Imm(ME, dl) };
return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops);
}
}
@@ -2603,7 +2623,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
- getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
+ getI32Imm(SH, dl), getI32Imm(MB, dl),
+ getI32Imm(ME, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
@@ -2615,7 +2636,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
- getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
+ getI32Imm(SH, dl), getI32Imm(MB, dl),
+ getI32Imm(ME, dl) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
}
@@ -2635,11 +2657,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDIo8 : PPC::ANDIo;
SDValue AndI(CurDAG->getMachineNode(Opcode, dl, InVT, MVT::Glue,
N->getOperand(0),
- CurDAG->getTargetConstant(1, InVT)), 0);
+ CurDAG->getTargetConstant(1, dl, InVT)),
+ 0);
SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32);
SDValue SRIdxVal =
CurDAG->getTargetConstant(N->getOpcode() == PPCISD::ANDIo_1_EQ_BIT ?
- PPC::sub_eq : PPC::sub_gt, MVT::i32);
+ PPC::sub_eq : PPC::sub_gt, dl, MVT::i32);
return CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1,
CR0Reg, SRIdxVal,
@@ -2666,7 +2689,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
N->getValueType(0) == MVT::i32) {
SDNode *Tmp =
CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
- N->getOperand(0), getI32Imm(~0U));
+ N->getOperand(0), getI32Imm(~0U, dl));
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32,
SDValue(Tmp, 0), N->getOperand(0),
SDValue(Tmp, 1));
@@ -2730,7 +2753,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SelectCCOp = PPC::SELECT_CC_VRRC;
SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3),
- getI32Imm(BROpc) };
+ getI32Imm(BROpc, dl) };
return CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops);
}
case ISD::VSELECT:
@@ -2764,7 +2787,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
DM[1] = 1 - tmp;
}
- SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), MVT::i32);
+ SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), dl,
+ MVT::i32);
if (Op1 == Op2 && DM[0] == 0 && DM[1] == 0 &&
Op1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
@@ -2803,7 +2827,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Op #4 is the Flag.
// Prevent PPC::PRED_* from being selected into LI.
SDValue Pred =
- getI32Imm(cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
+ getI32Imm(cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(), dl);
SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3),
N->getOperand(0), N->getOperand(4) };
return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
@@ -2833,7 +2857,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
- SDValue Ops[] = { getI32Imm(PCC), CondCode,
+ SDValue Ops[] = { getI32Imm(PCC, dl), CondCode,
N->getOperand(4), N->getOperand(0) };
return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
}
@@ -2936,7 +2960,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Into: tmp = VSPLTIS[BHW] elt
// VADDU[BHW]M tmp, tmp
// Where: [BHW] = B for size = 1, H for size = 2, W for size = 4
- SDValue EltVal = getI32Imm(Elt >> 1);
+ SDValue EltVal = getI32Imm(Elt >> 1, dl);
SDNode *Tmp = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
SDValue TmpVal = SDValue(Tmp, 0);
return CurDAG->getMachineNode(Opc2, dl, VT, TmpVal, TmpVal);
@@ -2948,9 +2972,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Into: tmp1 = VSPLTIS[BHW] elt-16
// tmp2 = VSPLTIS[BHW] -16
// VSUBU[BHW]M tmp1, tmp2
- SDValue EltVal = getI32Imm(Elt - 16);
+ SDValue EltVal = getI32Imm(Elt - 16, dl);
SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
- EltVal = getI32Imm(-16);
+ EltVal = getI32Imm(-16, dl);
SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
return CurDAG->getMachineNode(Opc3, dl, VT, SDValue(Tmp1, 0),
SDValue(Tmp2, 0));
@@ -2962,9 +2986,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Into: tmp1 = VSPLTIS[BHW] elt+16
// tmp2 = VSPLTIS[BHW] -16
// VADDU[BHW]M tmp1, tmp2
- SDValue EltVal = getI32Imm(Elt + 16);
+ SDValue EltVal = getI32Imm(Elt + 16, dl);
SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
- EltVal = getI32Imm(-16);
+ EltVal = getI32Imm(-16, dl);
SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
return CurDAG->getMachineNode(Opc2, dl, VT, SDValue(Tmp1, 0),
SDValue(Tmp2, 0));
@@ -3173,7 +3197,8 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
bool NonTrivialMask = ((int64_t) Mask) != INT64_C(-1);
if (NonTrivialMask && !Alt) {
// Res = Mask & CMPB
- Res = CurDAG->getNode(ISD::AND, dl, VT, Res, CurDAG->getConstant(Mask, VT));
+ Res = CurDAG->getNode(ISD::AND, dl, VT, Res,
+ CurDAG->getConstant(Mask, dl, VT));
} else if (Alt) {
// Res = (CMPB & Mask) | (~CMPB & Alt)
// Which, as suggested here:
@@ -3182,8 +3207,9 @@ SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) {
// Res = Alt ^ ((Alt ^ Mask) & CMPB)
// useful because the (Alt ^ Mask) can be pre-computed.
Res = CurDAG->getNode(ISD::AND, dl, VT, Res,
- CurDAG->getConstant(Mask ^ Alt, VT));
- Res = CurDAG->getNode(ISD::XOR, dl, VT, Res, CurDAG->getConstant(Alt, VT));
+ CurDAG->getConstant(Mask ^ Alt, dl, VT));
+ Res = CurDAG->getNode(ISD::XOR, dl, VT, Res,
+ CurDAG->getConstant(Alt, dl, VT));
}
return Res;
@@ -3215,20 +3241,20 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
EVT VT = N->getValueType(0);
SDValue Cond = N->getOperand(0);
SDValue ConstTrue =
- CurDAG->getConstant(N->getOpcode() == ISD::SIGN_EXTEND ? -1 : 1, VT);
- SDValue ConstFalse = CurDAG->getConstant(0, VT);
+ CurDAG->getConstant(N->getOpcode() == ISD::SIGN_EXTEND ? -1 : 1, dl, VT);
+ SDValue ConstFalse = CurDAG->getConstant(0, dl, VT);
do {
SDNode *User = *N->use_begin();
if (User->getNumOperands() != 2)
break;
- auto TryFold = [this, N, User](SDValue Val) {
+ auto TryFold = [this, N, User, dl](SDValue Val) {
SDValue UserO0 = User->getOperand(0), UserO1 = User->getOperand(1);
SDValue O0 = UserO0.getNode() == N ? Val : UserO0;
SDValue O1 = UserO1.getNode() == N ? Val : UserO1;
- return CurDAG->FoldConstantArithmetic(User->getOpcode(),
+ return CurDAG->FoldConstantArithmetic(User->getOpcode(), dl,
User->getValueType(0),
O0.getNode(), O1.getNode());
};
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 4c0b6a6e871..10b29d15a36 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1349,17 +1349,17 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// Finally, check the least significant entry.
if (LeadingZero) {
if (!UniquedVals[Multiple-1].getNode())
- return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
+ return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
- if (Val < 16)
- return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
+ if (Val < 16) // 0,0,0,4 -> vspltisw(4)
+ return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
if (LeadingOnes) {
if (!UniquedVals[Multiple-1].getNode())
- return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
+ return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
- return DAG.getTargetConstant(Val, MVT::i32);
+ return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
return SDValue();
@@ -1403,7 +1403,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// Finally, if this value fits in a 5 bit sext field, return it
if (SignExtend32<5>(MaskVal) == MaskVal)
- return DAG.getTargetConstant(MaskVal, MVT::i32);
+ return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
return SDValue();
}
@@ -1562,7 +1562,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
short imm = 0;
if (isIntS16Immediate(N.getOperand(1), imm) &&
(!Aligned || (imm & 3) == 0)) {
- Disp = DAG.getTargetConstant(imm, N.getValueType());
+ Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
@@ -1602,7 +1602,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
} else {
Base = N.getOperand(0);
}
- Disp = DAG.getTargetConstant(imm, N.getValueType());
+ Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
return true;
}
}
@@ -1613,7 +1613,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// this as "d, 0"
short Imm;
if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
- Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
+ Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
CN->getValueType(0));
return true;
@@ -1626,16 +1626,17 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
int Addr = (int)CN->getZExtValue();
// Otherwise, break this down into an LIS + disp.
- Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
+ Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
- Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
+ Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
+ MVT::i32);
unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
return true;
}
}
- Disp = DAG.getTargetConstant(0, getPointerTy());
+ Disp = DAG.getTargetConstant(0, dl, getPointerTy());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
@@ -1794,9 +1795,9 @@ static bool GetLabelAccessInfo(const TargetMachine &TM,
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
SelectionDAG &DAG) {
- EVT PtrVT = HiPart.getValueType();
- SDValue Zero = DAG.getConstant(0, PtrVT);
SDLoc DL(HiPart);
+ EVT PtrVT = HiPart.getValueType();
+ SDValue Zero = DAG.getConstant(0, DL, PtrVT);
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
@@ -2080,7 +2081,7 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
unsigned Log2b = Log2_32(VT.getSizeInBits());
SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
- DAG.getConstant(Log2b, MVT::i32));
+ DAG.getConstant(Log2b, dl, MVT::i32));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
}
// Leave comparisons against 0 and -1 alone for now, since they're usually
@@ -2100,7 +2101,7 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
Op.getOperand(1));
- return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
+ return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
}
return SDValue();
}
@@ -2126,11 +2127,11 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
if (VT == MVT::i64) {
// Check if GprIndex is even
SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
- DAG.getConstant(0, MVT::i32), ISD::SETNE);
+ DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
// Align GprIndex to be even if it isn't
GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
GprIndex);
@@ -2138,7 +2139,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// fpr index is 1 byte after gpr
SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
// fpr
SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
@@ -2147,10 +2148,10 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
InChain = FprIndex.getValue(1);
SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
- DAG.getConstant(8, MVT::i32));
+ DAG.getConstant(8, dl, MVT::i32));
SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
- DAG.getConstant(4, MVT::i32));
+ DAG.getConstant(4, dl, MVT::i32));
// areas
SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
@@ -2165,12 +2166,12 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// select overflow_area if index > 8
SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
- DAG.getConstant(8, MVT::i32), ISD::SETLT);
+ DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
// adjustment constant gpr_index * 4/8
SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
VT.isInteger() ? GprIndex : FprIndex,
- DAG.getConstant(VT.isInteger() ? 4 : 8,
+ DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
MVT::i32));
// OurReg = RegSaveArea + RegConstant
@@ -2180,12 +2181,12 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// Floating types are 32 bytes into RegSaveArea
if (VT.isFloatingPoint())
OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, dl, MVT::i32));
// increase {f,g}pr_index by 1 (or 2 if VT is i64)
SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
VT.isInteger() ? GprIndex : FprIndex,
- DAG.getConstant(VT == MVT::i64 ? 2 : 1,
+ DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
MVT::i32));
InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
@@ -2199,7 +2200,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// increase overflow_area by 4/8 if gpr/fpr > 8
SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
DAG.getConstant(VT.isInteger() ? 4 : 8,
- MVT::i32));
+ dl, MVT::i32));
OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
OverflowAreaPlusN);
@@ -2221,8 +2222,8 @@ SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG,
// 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
return DAG.getMemcpy(Op.getOperand(0), Op,
Op.getOperand(1), Op.getOperand(2),
- DAG.getConstant(12, MVT::i32), 8, false, true, false,
- MachinePointerInfo(), MachinePointerInfo());
+ DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
+ false, MachinePointerInfo(), MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
@@ -2251,7 +2252,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
Entry.Node = Trmp; Args.push_back(Entry);
// TrampSize == (isPPC64 ? 48 : 40);
- Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
+ Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
isPPC64 ? MVT::i64 : MVT::i32);
Args.push_back(Entry);
@@ -2312,8 +2313,8 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
// } va_list[1];
- SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32);
- SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32);
+ SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
+ SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -2324,13 +2325,13 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
PtrVT);
uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
- SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
+ SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
- SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
+ SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
uint64_t FPROffset = 1;
- SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
+ SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
@@ -2791,7 +2792,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
- SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
+ SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
@@ -2810,7 +2811,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
- SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
+ SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
@@ -2986,7 +2987,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// address of the enclosing doubleword on big-endian systems.
SDValue Arg = FIN;
if (!isLittleEndian) {
- SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, PtrVT);
+ SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
}
InVals.push_back(Arg);
@@ -3032,7 +3033,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Addr = FIN;
if (j) {
- SDValue Off = DAG.getConstant(j, PtrVT);
+ SDValue Off = DAG.getConstant(j, dl, PtrVT);
Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
}
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
@@ -3102,7 +3103,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
if (ObjectVT == MVT::f32) {
if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, dl, MVT::i32));
ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
}
@@ -3230,7 +3231,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
- SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT);
+ SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
@@ -3596,7 +3597,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
MachinePointerInfo(), false, false, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
- SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
+ SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
}
}
@@ -3674,7 +3675,7 @@ static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
SignExtend32<26>(Addr) != Addr)
return nullptr; // Top 6 bits have to be sext of immediate.
- return DAG.getConstant((int)C->getZExtValue() >> 2,
+ return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op),
DAG.getTargetLoweringInfo().getPointerTy()).getNode();
}
@@ -3806,7 +3807,7 @@ static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
SDLoc dl) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
false, false, false, MachinePointerInfo(),
MachinePointerInfo());
@@ -3830,7 +3831,7 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
else
StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
- DAG.getConstant(ArgOffset, PtrVT));
+ DAG.getConstant(ArgOffset, dl, PtrVT));
}
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(), false, false, 0));
@@ -3861,8 +3862,8 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
isPPC64, isDarwinABI, dl);
// Emit callseq_end just before tailcall node.
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -4008,13 +4009,13 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
false, false, LoadsInv, 8);
// Load environment pointer into r11.
- SDValue PtrOff = DAG.getIntPtrConstant(16);
+ SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr,
MPI.getWithOffset(16), false, false,
LoadsInv, 8);
- SDValue TOCOff = DAG.getIntPtrConstant(8);
+ SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC,
MPI.getWithOffset(8), false, false,
@@ -4062,7 +4063,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
}
// If this is a tail call add stack pointer delta.
if (isTailCall)
- Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
+ Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
// Add argument registers to the end of the list so that they are known live
// into the call.
@@ -4213,7 +4214,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
- SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset);
+ SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff);
// The address needs to go after the chain input but before the flag (or
@@ -4229,8 +4230,8 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(BytesCalleePops, true),
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
+ DAG.getIntPtrConstant(BytesCalleePops, dl, true),
InFlag, dl);
if (!Ins.empty())
InFlag = Chain.getValue(1);
@@ -4374,7 +4375,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
dl);
SDValue CallSeqStart = Chain;
@@ -4414,7 +4415,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// Memory reserved in the local variable space of the callers stack frame.
unsigned LocMemOffset = ByValVA.getLocMemOffset();
- SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
// Create a copy of the argument in the local area of the current
@@ -4451,7 +4452,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
unsigned LocMemOffset = VA.getLocMemOffset();
if (!isTailCall) {
- SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
@@ -4664,7 +4665,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
dl);
SDValue CallSeqStart = Chain;
@@ -4708,7 +4709,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
- PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
+ PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
};
@@ -4765,7 +4766,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (GPR_idx == NumGPRs && Size < 8) {
SDValue AddPtr = PtrOff;
if (!isLittleEndian) {
- SDValue Const = DAG.getConstant(PtrByteSize - Size,
+ SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
PtrOff.getValueType());
AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
}
@@ -4805,7 +4806,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// parameter save area instead of a new local variable.
SDValue AddPtr = PtrOff;
if (!isLittleEndian) {
- SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType());
+ SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
}
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
@@ -4827,7 +4828,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// For aggregates larger than PtrByteSize, copy the pieces of the
// object that fit into registers from the parameter save area.
for (unsigned j=0; j<Size; j+=PtrByteSize) {
- SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
+ SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
@@ -4922,7 +4923,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
if (!isLittleEndian)
ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, dl, MVT::i32));
// Non-final even elements are skipped; they will be handled
// together the with subsequent argument on the next go-around.
@@ -4939,7 +4940,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// second (rightmost) word of the stack doubleword.
if (Arg.getValueType() == MVT::f32 &&
!isLittleEndian && !Flags.isInConsecutiveRegs()) {
- SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
+ SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
}
@@ -4999,7 +5000,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (GPR_idx == NumGPRs)
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
- DAG.getConstant(i, PtrVT));
+ DAG.getConstant(i, dl, PtrVT));
SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
@@ -5057,7 +5058,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (GPR_idx == NumGPRs)
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
- DAG.getConstant(i, PtrVT));
+ DAG.getConstant(i, dl, PtrVT));
SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
@@ -5104,7 +5105,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
// TOC save area offset.
unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
- SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset);
+ SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
MachinePointerInfo::getStack(TOCSaveOffset),
@@ -5223,7 +5224,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
dl);
SDValue CallSeqStart = Chain;
@@ -5279,7 +5280,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// register cannot be found for it.
SDValue PtrOff;
- PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
+ PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
@@ -5308,7 +5309,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
ArgOffset += PtrByteSize;
} else {
- SDValue Const = DAG.getConstant(PtrByteSize - Size,
+ SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
PtrOff.getValueType());
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
@@ -5329,7 +5330,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// copy the pieces of the object that fit into registers from the
// parameter save area.
for (unsigned j=0; j<Size; j+=PtrByteSize) {
- SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
+ SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
@@ -5382,7 +5383,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
- SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
+ SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
MachinePointerInfo(),
@@ -5427,7 +5428,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// We could elide this store in the case where the object fits
// entirely in R registers. Maybe later.
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
- DAG.getConstant(ArgOffset, PtrVT));
+ DAG.getConstant(ArgOffset, dl, PtrVT));
SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(), false, false, 0);
MemOpChains.push_back(Store);
@@ -5443,7 +5444,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
if (GPR_idx == NumGPRs)
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
- DAG.getConstant(i, PtrVT));
+ DAG.getConstant(i, dl, PtrVT));
SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
@@ -5675,7 +5676,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Negate the size.
SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
- DAG.getConstant(0, PtrVT), Size);
+ DAG.getConstant(0, dl, PtrVT), Size);
// Construct a node for the frame pointer save index.
SDValue FPSIdx = getFramePointerFrameIndex(DAG);
// Build a DYNALLOC node.
@@ -5905,7 +5906,7 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
// add in a bias.
if (Op.getValueType() == MVT::i32 && !i32Stack) {
FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
- DAG.getConstant(4, FIPtr.getValueType()));
+ DAG.getConstant(4, dl, FIPtr.getValueType()));
MPI = MPI.getWithOffset(4);
}
@@ -6077,7 +6078,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
- SDValue FPHalfs = DAG.getConstantFP(0.5, MVT::f64);
+ SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64);
FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
FPHalfs, FPHalfs, FPHalfs, FPHalfs);
@@ -6085,7 +6086,8 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
if (Op.getValueType() != MVT::v4f64)
Value = DAG.getNode(ISD::FP_ROUND, dl,
- Op.getValueType(), Value, DAG.getIntPtrConstant(1));
+ Op.getValueType(), Value,
+ DAG.getIntPtrConstant(1, dl));
return Value;
}
@@ -6095,8 +6097,8 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
if (Op.getOperand(0).getValueType() == MVT::i1)
return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
- DAG.getConstantFP(1.0, Op.getValueType()),
- DAG.getConstantFP(0.0, Op.getValueType()));
+ DAG.getConstantFP(1.0, dl, Op.getValueType()),
+ DAG.getConstantFP(0.0, dl, Op.getValueType()));
// If we have direct moves, we can do all the conversion, skip the store/load
// however, without FPCVT we can't do most conversions.
@@ -6140,12 +6142,12 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// bit 12 (value 2048) is set instead, so that the final rounding
// to single-precision gets the correct result.
SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
- SINT, DAG.getConstant(2047, MVT::i64));
+ SINT, DAG.getConstant(2047, dl, MVT::i64));
Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
- Round, DAG.getConstant(2047, MVT::i64));
+ Round, DAG.getConstant(2047, dl, MVT::i64));
Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
Round = DAG.getNode(ISD::AND, dl, MVT::i64,
- Round, DAG.getConstant(-2048, MVT::i64));
+ Round, DAG.getConstant(-2048, dl, MVT::i64));
// However, we cannot use that value unconditionally: if the magnitude
// of the input value is small, the bit-twiddling we did above might
@@ -6156,11 +6158,11 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// bits are all sign-bit copies, and use the rounded value computed
// above otherwise.
SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
- SINT, DAG.getConstant(53, MVT::i32));
+ SINT, DAG.getConstant(53, dl, MVT::i32));
Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
- Cond, DAG.getConstant(1, MVT::i64));
+ Cond, DAG.getConstant(1, dl, MVT::i64));
Cond = DAG.getSetCC(dl, MVT::i32,
- Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT);
+ Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
}
@@ -6233,7 +6235,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
FP = DAG.getNode(ISD::FP_ROUND, dl,
- MVT::f32, FP, DAG.getIntPtrConstant(0));
+ MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
return FP;
}
@@ -6303,7 +6305,8 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// FCFID it and return it.
SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
- FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
+ FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
+ DAG.getIntPtrConstant(0, dl));
return FP;
}
@@ -6347,7 +6350,7 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
StackSlot, MachinePointerInfo(), false, false,0);
// Load FP Control Word from low 32 bits of stack slot.
- SDValue Four = DAG.getConstant(4, PtrVT);
+ SDValue Four = DAG.getConstant(4, dl, PtrVT);
SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
false, false, false, 0);
@@ -6355,14 +6358,14 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// Transform as necessary
SDValue CWD1 =
DAG.getNode(ISD::AND, dl, MVT::i32,
- CWD, DAG.getConstant(3, MVT::i32));
+ CWD, DAG.getConstant(3, dl, MVT::i32));
SDValue CWD2 =
DAG.getNode(ISD::SRL, dl, MVT::i32,
DAG.getNode(ISD::AND, dl, MVT::i32,
DAG.getNode(ISD::XOR, dl, MVT::i32,
- CWD, DAG.getConstant(3, MVT::i32)),
- DAG.getConstant(3, MVT::i32)),
- DAG.getConstant(1, MVT::i32));
+ CWD, DAG.getConstant(3, dl, MVT::i32)),
+ DAG.getConstant(3, dl, MVT::i32)),
+ DAG.getConstant(1, dl, MVT::i32));
SDValue RetVal =
DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
@@ -6387,12 +6390,12 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT AmtVT = Amt.getValueType();
SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
- DAG.getConstant(BitWidth, AmtVT), Amt);
+ DAG.getConstant(BitWidth, dl, AmtVT), Amt);
SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
- DAG.getConstant(-BitWidth, AmtVT));
+ DAG.getConstant(-BitWidth, dl, AmtVT));
SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
@@ -6416,12 +6419,12 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT AmtVT = Amt.getValueType();
SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
- DAG.getConstant(BitWidth, AmtVT), Amt);
+ DAG.getConstant(BitWidth, dl, AmtVT), Amt);
SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
- DAG.getConstant(-BitWidth, AmtVT));
+ DAG.getConstant(-BitWidth, dl, AmtVT));
SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
@@ -6444,15 +6447,15 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT AmtVT = Amt.getValueType();
SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
- DAG.getConstant(BitWidth, AmtVT), Amt);
+ DAG.getConstant(BitWidth, dl, AmtVT), Amt);
SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
- DAG.getConstant(-BitWidth, AmtVT));
+ DAG.getConstant(-BitWidth, dl, AmtVT));
SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
- SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
+ SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
Tmp4, Tmp6, ISD::SETLE);
SDValue OutOps[] = { OutLo, OutHi };
return DAG.getMergeValues(OutOps, dl);
@@ -6481,7 +6484,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
EVT CanonicalVT = VTys[SplatSize-1];
// Build a canonical splat for this value.
- SDValue Elt = DAG.getConstant(Val, MVT::i32);
+ SDValue Elt = DAG.getConstant(Val, dl, MVT::i32);
SmallVector<SDValue, 8> Ops;
Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops);
@@ -6495,7 +6498,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op,
EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
- DAG.getConstant(IID, MVT::i32), Op);
+ DAG.getConstant(IID, dl, MVT::i32), Op);
}
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
@@ -6505,7 +6508,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = LHS.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
- DAG.getConstant(IID, MVT::i32), LHS, RHS);
+ DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
}
/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
@@ -6515,7 +6518,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
SDLoc dl, EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op0.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
- DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
+ DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
}
@@ -6607,7 +6610,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue;
unsigned Offset = 4*i;
- SDValue Idx = DAG.getConstant(Offset, FIdx.getValueType());
+ SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
@@ -6641,7 +6644,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SmallVector<SDValue, 2> Ops;
Ops.push_back(StoreChain);
- Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, MVT::i32));
+ Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32));
Ops.push_back(FIdx);
SmallVector<EVT, 2> ValueVTs;
@@ -6652,10 +6655,10 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
dl, VTs, Ops, MVT::v4i32, PtrInfo);
LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
- DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, MVT::i32),
+ DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
LoadedVect);
- SDValue FPZeros = DAG.getConstantFP(0.0, MVT::f64);
+ SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::f64);
FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
FPZeros, FPZeros, FPZeros, FPZeros);
@@ -6685,7 +6688,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (SplatBits == 0) {
// Canonicalize all zero vectors to be v4i32.
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
- SDValue Z = DAG.getConstant(0, MVT::i32);
+ SDValue Z = DAG.getConstant(0, dl, MVT::i32);
Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
}
@@ -6712,10 +6715,10 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// To avoid having these optimizations undone by constant folding,
// we convert to a pseudo that will be expanded later into one of
// the above forms.
- SDValue Elt = DAG.getConstant(SextVal, MVT::i32);
+ SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
EVT VT = (SplatSize == 1 ? MVT::v16i8 :
(SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
- SDValue EltSize = DAG.getConstant(SplatSize, MVT::i32);
+ SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
if (VT == Op.getValueType())
return RetVal;
@@ -6918,7 +6921,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
if (AlignIdx != -1) {
return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
- DAG.getConstant(AlignIdx, MVT::i32));
+ DAG.getConstant(AlignIdx, dl, MVT::i32));
} else if (SVOp->isSplat()) {
int SplatIdx = SVOp->getSplatIndex();
if (SplatIdx >= 4) {
@@ -6930,7 +6933,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// nothing to do.
return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
- DAG.getConstant(SplatIdx, MVT::i32));
+ DAG.getConstant(SplatIdx, dl, MVT::i32));
}
// Lower this into a qvgpci/qvfperm pair.
@@ -6944,7 +6947,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
}
SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
- DAG.getConstant(idx, MVT::i32));
+ DAG.getConstant(idx, dl, MVT::i32));
return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
}
@@ -7059,10 +7062,10 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
for (unsigned j = 0; j != BytesPerElement; ++j)
if (isLittleEndian)
- ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement+j),
- MVT::i32));
+ ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
+ dl, MVT::i32));
else
- ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
+ ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
MVT::i32));
}
@@ -7190,7 +7193,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (!isDot) {
SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
Op.getOperand(1), Op.getOperand(2),
- DAG.getConstant(CompareOpc, MVT::i32));
+ DAG.getConstant(CompareOpc, dl, MVT::i32));
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
}
@@ -7198,7 +7201,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue Ops[] = {
Op.getOperand(2), // LHS
Op.getOperand(3), // RHS
- DAG.getConstant(CompareOpc, MVT::i32)
+ DAG.getConstant(CompareOpc, dl, MVT::i32)
};
EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
@@ -7230,15 +7233,15 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// Shift the bit into the low position.
Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
- DAG.getConstant(8-(3-BitNo), MVT::i32));
+ DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
// Isolate the bit.
Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
// If we are supposed to, toggle the bit.
if (InvertBit)
Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
return Flags;
}
@@ -7304,7 +7307,7 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
// understand how to form the extending load.
- SDValue FPHalfs = DAG.getConstantFP(0.5, MVT::f64);
+ SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64);
FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
FPHalfs, FPHalfs, FPHalfs, FPHalfs);
@@ -7312,7 +7315,7 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// Now convert to an integer and store.
Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
- DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, MVT::i32),
+ DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
Value);
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
@@ -7324,7 +7327,7 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue StoreChain = DAG.getEntryNode();
SmallVector<SDValue, 2> Ops;
Ops.push_back(StoreChain);
- Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, MVT::i32));
+ Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32));
Ops.push_back(Value);
Ops.push_back(FIdx);
@@ -7337,7 +7340,7 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// Extract the value requested.
unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- SDValue Idx = DAG.getConstant(Offset, FIdx.getValueType());
+ SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
@@ -7401,12 +7404,13 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
LoadChains.push_back(Load.getValue(1));
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
- DAG.getConstant(Stride, BasePtr.getValueType()));
+ DAG.getConstant(Stride, dl,
+ BasePtr.getValueType()));
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl,
- Op.getValueType(), Vals);
+ Op.getValueType(), Vals);
if (LN->isIndexed()) {
SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
@@ -7425,7 +7429,7 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
SmallVector<SDValue, 4> VectElmts, VectElmtChains;
for (unsigned i = 0; i < 4; ++i) {
- SDValue Idx = DAG.getConstant(i, BasePtr.getValueType());
+ SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD,
@@ -7471,7 +7475,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
for (unsigned Idx = 0; Idx < 4; ++Idx) {
SDValue Ex =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
- DAG.getConstant(Idx, getVectorIdxTy()));
+ DAG.getConstant(Idx, dl, getVectorIdxTy()));
SDValue Store;
if (ScalarVT != ScalarMemVT)
Store =
@@ -7494,7 +7498,8 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
}
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
- DAG.getConstant(Stride, BasePtr.getValueType()));
+ DAG.getConstant(Stride, dl,
+ BasePtr.getValueType()));
Stores.push_back(Store);
}
@@ -7518,7 +7523,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
// FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
// understand how to form the extending load.
- SDValue FPHalfs = DAG.getConstantFP(0.5, MVT::f64);
+ SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64);
FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
FPHalfs, FPHalfs, FPHalfs, FPHalfs);
@@ -7526,7 +7531,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
// Now convert to an integer and store.
Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
- DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, MVT::i32),
+ DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
Value);
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
@@ -7537,7 +7542,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
SmallVector<SDValue, 2> Ops;
Ops.push_back(StoreChain);
- Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, MVT::i32));
+ Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32));
Ops.push_back(Value);
Ops.push_back(FIdx);
@@ -7552,7 +7557,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
SmallVector<SDValue, 4> Loads, LoadChains;
for (unsigned i = 0; i < 4; ++i) {
unsigned Offset = 4*i;
- SDValue Idx = DAG.getConstant(Offset, FIdx.getValueType());
+ SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
@@ -7565,7 +7570,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
SmallVector<SDValue, 4> Stores;
for (unsigned i = 0; i < 4; ++i) {
- SDValue Idx = DAG.getConstant(i, BasePtr.getValueType());
+ SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
Stores.push_back(DAG.getTruncStore(StoreChain, dl, Loads[i], Idx,
@@ -7765,10 +7770,10 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
assert(N->getOperand(0).getValueType() == MVT::ppcf128);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
MVT::f64, N->getOperand(0),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
MVT::f64, N->getOperand(0),
- DAG.getIntPtrConstant(1));
+ DAG.getIntPtrConstant(1, dl));
// Add the two halves of the long double in round-to-zero mode.
SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
@@ -9666,13 +9671,13 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
DAG.getConstant(APInt::getLowBitsSet(
N->getValueSizeInBits(0), PromBits),
- N->getValueType(0)));
+ dl, N->getValueType(0)));
assert(N->getOpcode() == ISD::SIGN_EXTEND &&
"Invalid extension type");
EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0));
SDValue ShiftCst =
- DAG.getConstant(N->getValueSizeInBits(0)-PromBits, ShiftAmountTy);
+ DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
return DAG.getNode(ISD::SRA, dl, N->getValueType(0),
DAG.getNode(ISD::SHL, dl, N->getValueType(0),
N->getOperand(0), ShiftCst), ShiftCst);
@@ -9738,7 +9743,7 @@ SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
FP = DAG.getNode(ISD::FP_ROUND, dl,
- MVT::f32, FP, DAG.getIntPtrConstant(0));
+ MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
DCI.AddToWorklist(FP.getNode());
}
@@ -10033,7 +10038,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
2*MemVT.getStoreSize()-1);
// Create the new base load.
- SDValue LDXIntID = DAG.getTargetConstant(IntrLD, getPointerTy());
+ SDValue LDXIntID = DAG.getTargetConstant(IntrLD, dl, getPointerTy());
SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
SDValue BaseLoad =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
@@ -10057,7 +10062,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
if (!findConsecutiveLoad(LD, DAG))
--IncValue;
- SDValue Increment = DAG.getConstant(IncValue, getPointerTy());
+ SDValue Increment = DAG.getConstant(IncValue, dl, getPointerTy());
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
MachineMemOperand *ExtraMMO =
@@ -10089,7 +10094,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
Perm = Subtarget.hasAltivec() ?
DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
- DAG.getTargetConstant(1, MVT::i64));
+ DAG.getTargetConstant(1, dl, MVT::i64));
// second argument is 1 because this rounding
// is always exact.
@@ -10358,7 +10363,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Ops[] = {
LHS.getOperand(2), // LHS of compare
LHS.getOperand(3), // RHS of compare
- DAG.getConstant(CompareOpc, MVT::i32)
+ DAG.getConstant(CompareOpc, dl, MVT::i32)
};
EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
@@ -10382,7 +10387,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
- DAG.getConstant(CompOpc, MVT::i32),
+ DAG.getConstant(CompOpc, dl, MVT::i32),
DAG.getRegister(PPC::CR6, MVT::i32),
N->getOperand(4), CompNode.getValue(1));
}
@@ -10410,14 +10415,14 @@ PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
bool IsNegPow2 = (-Divisor).isPowerOf2();
unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
- SDValue ShiftAmt = DAG.getConstant(Lg2, VT);
+ SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
if (Created)
Created->push_back(Op.getNode());
if (IsNegPow2) {
- Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), Op);
+ Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
if (Created)
Created->push_back(Op.getNode());
}
@@ -10680,6 +10685,7 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'P': {
ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
if (!CST) return; // Must be an immediate to match.
+ SDLoc dl(Op);
int64_t Value = CST->getSExtValue();
EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
// numbers are printed as such.
@@ -10687,35 +10693,35 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
default: llvm_unreachable("Unknown constraint letter!");
case 'I': // "I" is a signed 16-bit constant.
if (isInt<16>(Value))
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
if (isShiftedUInt<16, 16>(Value))
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
if (isShiftedInt<16, 16>(Value))
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
if (isUInt<16>(Value))
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'M': // "M" is a constant that is greater than 31.
if (Value > 31)
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'N': // "N" is a positive constant that is an exact power of two.
if (Value > 0 && isPowerOf2_64(Value))
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'O': // "O" is the constant zero.
if (Value == 0)
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
if (isInt<16>(-Value))
- Result = DAG.getTargetConstant(Value, TCVT);
+ Result = DAG.getTargetConstant(Value, dl, TCVT);
break;
}
break;
@@ -10790,7 +10796,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
- DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(),
+ DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
isPPC64 ? MVT::i64 : MVT::i32);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index d1d67cbba44..d62833037db 100644
--- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -56,22 +56,23 @@ def tlscall : Operand<i64> {
def SHL64 : SDNodeXForm<imm, [{
// Transformation function: 63 - imm
- return getI32Imm(63 - N->getZExtValue());
+ return getI32Imm(63 - N->getZExtValue(), SDLoc(N));
}]>;
def SRL64 : SDNodeXForm<imm, [{
// Transformation function: 64 - imm
- return N->getZExtValue() ? getI32Imm(64 - N->getZExtValue()) : getI32Imm(0);
+ return N->getZExtValue() ? getI32Imm(64 - N->getZExtValue(), SDLoc(N))
+ : getI32Imm(0, SDLoc(N));
}]>;
def HI32_48 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
- return getI32Imm((unsigned short)(N->getZExtValue() >> 32));
+ return getI32Imm((unsigned short)(N->getZExtValue() >> 32, SDLoc(N)));
}]>;
def HI48_64 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
- return getI32Imm((unsigned short)(N->getZExtValue() >> 48));
+ return getI32Imm((unsigned short)(N->getZExtValue() >> 48, SDLoc(N)));
}]>;
diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index 5c84a541b61..5441859f148 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -144,7 +144,7 @@ def vmrghw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
def VSLDOI_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, 0, *CurDAG));
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, 0, *CurDAG), SDLoc(N));
}]>;
def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
@@ -155,7 +155,7 @@ def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
/// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into
/// vector_shuffle(X,undef,mask) by the dag combiner.
def VSLDOI_unary_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, 1, *CurDAG));
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, 1, *CurDAG), SDLoc(N));
}]>;
def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
@@ -166,7 +166,7 @@ def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
/// VSLDOI_swapped* - These fragments are provided for little-endian, where
/// the inputs must be swapped for correct semantics.
def VSLDOI_swapped_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, 2, *CurDAG));
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, 2, *CurDAG), SDLoc(N));
}]>;
def vsldoi_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
@@ -176,21 +176,21 @@ def vsldoi_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
def VSPLTB_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::getVSPLTImmediate(N, 1, *CurDAG));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 1, *CurDAG), SDLoc(N));
}]>;
def vspltb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
}], VSPLTB_get_imm>;
def VSPLTH_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::getVSPLTImmediate(N, 2, *CurDAG));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 2, *CurDAG), SDLoc(N));
}]>;
def vsplth_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
}], VSPLTH_get_imm>;
def VSPLTW_get_imm : SDNodeXForm<vector_shuffle, [{
- return getI32Imm(PPC::getVSPLTImmediate(N, 4, *CurDAG));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 4, *CurDAG), SDLoc(N));
}]>;
def vspltw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
diff --git a/llvm/lib/Target/PowerPC/PPCInstrHTM.td b/llvm/lib/Target/PowerPC/PPCInstrHTM.td
index 20e6a628632..6c4e2129087 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrHTM.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrHTM.td
@@ -17,7 +17,7 @@
def HasHTM : Predicate<"PPCSubTarget->hasHTM()">;
def HTM_get_imm : SDNodeXForm<imm, [{
- return getI32Imm (N->getZExtValue());
+ return getI32Imm (N->getZExtValue(), SDLoc(N));
}]>;
let hasSideEffects = 1, usesCustomInserter = 1 in {
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 566e4e7b46b..15459f2780f 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -228,41 +228,42 @@ def PPCdynalloc : SDNode<"PPCISD::DYNALLOC", SDTDynOp, [SDNPHasChain]>;
def SHL32 : SDNodeXForm<imm, [{
// Transformation function: 31 - imm
- return getI32Imm(31 - N->getZExtValue());
+ return getI32Imm(31 - N->getZExtValue(), SDLoc(N));
}]>;
def SRL32 : SDNodeXForm<imm, [{
// Transformation function: 32 - imm
- return N->getZExtValue() ? getI32Imm(32 - N->getZExtValue()) : getI32Imm(0);
+ return N->getZExtValue() ? getI32Imm(32 - N->getZExtValue(), SDLoc(N))
+ : getI32Imm(0, SDLoc(N));
}]>;
def LO16 : SDNodeXForm<imm, [{
// Transformation function: get the low 16 bits.
- return getI32Imm((unsigned short)N->getZExtValue());
+ return getI32Imm((unsigned short)N->getZExtValue(), SDLoc(N));
}]>;
def HI16 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
- return getI32Imm((unsigned)N->getZExtValue() >> 16);
+ return getI32Imm((unsigned)N->getZExtValue() >> 16, SDLoc(N));
}]>;
def HA16 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
signed int Val = N->getZExtValue();
- return getI32Imm((Val - (signed short)Val) >> 16);
+ return getI32Imm((Val - (signed short)Val) >> 16, SDLoc(N));
}]>;
def MB : SDNodeXForm<imm, [{
// Transformation function: get the start bit of a mask
unsigned mb = 0, me;
(void)isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
- return getI32Imm(mb);
+ return getI32Imm(mb, SDLoc(N));
}]>;
def ME : SDNodeXForm<imm, [{
// Transformation function: get the end bit of a mask
unsigned mb, me = 0;
(void)isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
- return getI32Imm(me);
+ return getI32Imm(me, SDLoc(N));
}]>;
def maskimm32 : PatLeaf<(imm), [{
// maskImm predicate - True if immediate is a run of ones.
diff --git a/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
index def252a47b2..8898cf28a77 100644
--- a/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -50,7 +50,6 @@ public:
private:
bool isInlineImmediate(SDNode *N) const;
- inline SDValue getSmallIPtrImm(unsigned Imm);
bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
const R600InstrInfo *TII);
bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
@@ -189,27 +188,23 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
}
}
-SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
-}
-
bool AMDGPUDAGToDAGISel::SelectADDRParam(
SDValue Addr, SDValue& R1, SDValue& R2) {
if (Addr.getOpcode() == ISD::FrameIndex) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
} else {
R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
}
} else if (Addr.getOpcode() == ISD::ADD) {
R1 = Addr.getOperand(0);
R2 = Addr.getOperand(1);
} else {
R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
}
return true;
}
@@ -232,17 +227,17 @@ bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
if (Addr.getOpcode() == ISD::FrameIndex) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
} else {
R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
}
} else if (Addr.getOpcode() == ISD::ADD) {
R1 = Addr.getOperand(0);
R2 = Addr.getOperand(1);
} else {
R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
}
return true;
}
@@ -326,7 +321,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
}
}
- SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
+ SDLoc DL(N);
+ SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
if (NumVectorElts == 1) {
return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
@@ -340,7 +336,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
// 1 = Vector Register Class
SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
- RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
+ RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
bool IsRegSeq = true;
unsigned NOps = N->getNumOperands();
for (unsigned i = 0; i < NOps; i++) {
@@ -351,7 +347,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
}
RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
RegSeqArgs[1 + (2 * i) + 1] =
- CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
+ CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL,
+ MVT::i32);
}
if (NOps != NumVectorElts) {
@@ -359,11 +356,11 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- SDLoc(N), EltVT);
+ DL, EltVT);
for (unsigned i = NOps; i < NumVectorElts; ++i) {
RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
RegSeqArgs[1 + (2 * i) + 1] =
- CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
+ CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL, MVT::i32);
}
}
@@ -377,21 +374,22 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
break;
}
+ SDLoc DL(N);
if (N->getValueType(0) == MVT::i128) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
- SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
- SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
+ RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
+ SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
+ SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
} else if (N->getValueType(0) == MVT::i64) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
- SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
- SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
+ RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
+ SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
+ SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
} else {
llvm_unreachable("Unhandled value type for BUILD_PAIR");
}
const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
N->getOperand(1), SubReg1 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
- SDLoc(N), N->getValueType(0), Ops);
+ DL, N->getValueType(0), Ops);
}
case ISD::Constant:
@@ -408,17 +406,19 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
Imm = C->getZExtValue();
}
- SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
- CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
- SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
- CurDAG->getConstant(Imm >> 32, MVT::i32));
+ SDLoc DL(N);
+ SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
+ CurDAG->getConstant(Imm & 0xFFFFFFFF, DL,
+ MVT::i32));
+ SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
+ CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
const SDValue Ops[] = {
- CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
- SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
- SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
+ CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
+ SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
+ SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
};
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
N->getValueType(0), Ops);
}
@@ -474,15 +474,17 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
break;
SDValue Addr, Offset;
+ SDLoc DL(N);
SelectADDRIndirect(N->getOperand(1), Addr, Offset);
const SDValue Ops[] = {
Addr,
Offset,
- CurDAG->getTargetConstant(0, MVT::i32),
+ CurDAG->getTargetConstant(0, DL, MVT::i32),
N->getOperand(0),
};
- return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
- CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
+ return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, DL,
+ CurDAG->getVTList(MVT::i32, MVT::i64,
+ MVT::Other),
Ops);
}
case AMDGPUISD::REGISTER_STORE: {
@@ -490,14 +492,15 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
break;
SDValue Addr, Offset;
SelectADDRIndirect(N->getOperand(2), Addr, Offset);
+ SDLoc DL(N);
const SDValue Ops[] = {
N->getOperand(1),
Addr,
Offset,
- CurDAG->getTargetConstant(0, MVT::i32),
+ CurDAG->getTargetConstant(0, DL, MVT::i32),
N->getOperand(0),
};
- return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
+ return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, DL,
CurDAG->getVTList(MVT::Other),
Ops);
}
@@ -682,7 +685,8 @@ const char *AMDGPUDAGToDAGISel::getPassName() const {
bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
SDValue& IntPtr) {
if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
- IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
+ IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
+ true);
return true;
}
return false;
@@ -692,7 +696,7 @@ bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
SDValue& BaseReg, SDValue &Offset) {
if (!isa<ConstantSDNode>(Addr)) {
BaseReg = Addr;
- Offset = CurDAG->getIntPtrConstant(0, true);
+ Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
return true;
}
return false;
@@ -707,7 +711,8 @@ bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
&& isInt<16>(IMMOffset->getZExtValue())) {
Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
+ MVT::i32);
return true;
// If the pointer address is constant, we can move it to the offset field.
} else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
@@ -715,30 +720,32 @@ bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
SDLoc(CurDAG->getEntryNode()),
AMDGPU::ZERO, MVT::i32);
- Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
+ MVT::i32);
return true;
}
// Default case, no offset
Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
return true;
}
bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
SDValue &Offset) {
ConstantSDNode *C;
+ SDLoc DL(Addr);
if ((C = dyn_cast<ConstantSDNode>(Addr))) {
Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
- Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
} else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
(C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
} else {
Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
}
return true;
@@ -751,8 +758,8 @@ SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
bool IsAdd = (N->getOpcode() == ISD::ADD);
- SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
- SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
+ SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
+ SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
DL, MVT::i32, LHS, Sub0);
@@ -778,7 +785,7 @@ SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
SDValue Args[5] = {
- CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
+ CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
SDValue(AddLo,0),
Sub0,
SDValue(AddHi,0),
@@ -835,15 +842,17 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
}
}
+ SDLoc DL(Addr);
+
// If we have a constant address, prefer to put the constant into the
// offset. This can save moves to load the constant address since multiple
// operations can share the zero base address register, and enables merging
// into read2 / write2 instructions.
if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
if (isUInt<16>(CAddr->getZExtValue())) {
- SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
- SDLoc(Addr), MVT::i32, Zero);
+ DL, MVT::i32, Zero);
Base = SDValue(MovZero, 0);
Offset = Addr;
return true;
@@ -852,13 +861,15 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
// default case
Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i16);
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
return true;
}
bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
SDValue &Offset0,
SDValue &Offset1) const {
+ SDLoc DL(Addr);
+
if (CurDAG->isBaseWithConstantOffset(Addr)) {
SDValue N0 = Addr.getOperand(0);
SDValue N1 = Addr.getOperand(1);
@@ -868,8 +879,8 @@ bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
// (add n0, c0)
if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
Base = N0;
- Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
- Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
+ Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
+ Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
return true;
}
}
@@ -880,21 +891,21 @@ bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
assert(4 * DWordOffset0 == CAddr->getZExtValue());
if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
- SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
MachineSDNode *MovZero
= CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
- SDLoc(Addr), MVT::i32, Zero);
+ DL, MVT::i32, Zero);
Base = SDValue(MovZero, 0);
- Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
- Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
+ Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
+ Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
return true;
}
}
// default case
Base = Addr;
- Offset0 = CurDAG->getTargetConstant(0, MVT::i8);
- Offset1 = CurDAG->getTargetConstant(1, MVT::i8);
+ Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
+ Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
return true;
}
@@ -910,14 +921,14 @@ void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
SDValue &TFE) const {
SDLoc DL(Addr);
- GLC = CurDAG->getTargetConstant(0, MVT::i1);
- SLC = CurDAG->getTargetConstant(0, MVT::i1);
- TFE = CurDAG->getTargetConstant(0, MVT::i1);
+ GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
- Idxen = CurDAG->getTargetConstant(0, MVT::i1);
- Offen = CurDAG->getTargetConstant(0, MVT::i1);
- Addr64 = CurDAG->getTargetConstant(0, MVT::i1);
- SOffset = CurDAG->getTargetConstant(0, MVT::i32);
+ Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
if (CurDAG->isBaseWithConstantOffset(Addr)) {
SDValue N0 = Addr.getOperand(0);
@@ -928,24 +939,25 @@ void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
// (add (add N2, N3), C1) -> addr64
SDValue N2 = N0.getOperand(0);
SDValue N3 = N0.getOperand(1);
- Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
+ Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
Ptr = N2;
VAddr = N3;
} else {
// (add N0, C1) -> offset
- VAddr = CurDAG->getTargetConstant(0, MVT::i32);
+ VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
Ptr = N0;
}
if (isLegalMUBUFImmOffset(C1)) {
- Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
return;
} else if (isUInt<32>(C1->getZExtValue())) {
// Illegal offset, store it in soffset.
- Offset = CurDAG->getTargetConstant(0, MVT::i16);
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
- CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i32)), 0);
+ CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
+ 0);
return;
}
}
@@ -954,17 +966,17 @@ void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
// (add N0, N1) -> addr64
SDValue N0 = Addr.getOperand(0);
SDValue N1 = Addr.getOperand(1);
- Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
+ Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
Ptr = N0;
VAddr = N1;
- Offset = CurDAG->getTargetConstant(0, MVT::i16);
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
return;
}
// default case -> offset
- VAddr = CurDAG->getTargetConstant(0, MVT::i32);
+ VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
Ptr = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i16);
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
}
@@ -995,7 +1007,7 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
SDValue &VAddr, SDValue &SOffset,
SDValue &Offset,
SDValue &SLC) const {
- SLC = CurDAG->getTargetConstant(0, MVT::i1);
+ SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
SDValue GLC, TFE;
return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE);
@@ -1026,11 +1038,11 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym1), 0);
const SDValue RsrcOps[] = {
- CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
+ CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
ScratchRsrcDword0,
- CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
+ CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
ScratchRsrcDword1,
- CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
+ CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
};
SDValue ScratchPtr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
MVT::v2i32, RsrcOps), 0);
@@ -1045,14 +1057,14 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
if (isLegalMUBUFImmOffset(C1)) {
VAddr = Addr.getOperand(0);
- ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
+ ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
return true;
}
}
// (node)
VAddr = Addr;
- ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
+ ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
return true;
}
@@ -1125,7 +1137,7 @@ SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
DL,
DestVT,
Src,
- CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32));
+ CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32));
}
@@ -1134,19 +1146,20 @@ SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
// FIXME: This is probably wrong, we should never be defining
// a register class with both VGPRs and SGPRs
- SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, MVT::i32);
+ SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, DL,
+ MVT::i32);
const SDValue Ops[] = {
RC,
Src,
- CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
- SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
- CurDAG->getConstant(0, MVT::i32)), 0),
- CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
+ CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
+ SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
+ CurDAG->getConstant(0, DL, MVT::i32)), 0),
+ CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
};
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
- SDLoc(N), N->getValueType(0), Ops);
+ DL, N->getValueType(0), Ops);
}
assert(SrcSize == 64 && DestSize == 64);
@@ -1159,7 +1172,7 @@ SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
// the format expected by the S_BFE_I32 / S_BFE_U32. In the second
// source, bits [5:0] contain the offset and bits [22:16] the width.
uint32_t PackedVal = Offset | (Width << 16);
- SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, MVT::i32);
+ SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
}
@@ -1259,7 +1272,7 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
Src = Src.getOperand(0);
}
- SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
+ SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
return true;
}
@@ -1267,9 +1280,10 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
SDValue &SrcMods, SDValue &Clamp,
SDValue &Omod) const {
+ SDLoc DL(In);
// FIXME: Handle Clamp and Omod
- Clamp = CurDAG->getTargetConstant(0, MVT::i32);
- Omod = CurDAG->getTargetConstant(0, MVT::i32);
+ Clamp = CurDAG->getTargetConstant(0, DL, MVT::i32);
+ Omod = CurDAG->getTargetConstant(0, DL, MVT::i32);
return SelectVOP3Mods(In, Src, SrcMods);
}
@@ -1278,7 +1292,7 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
SDValue &SrcMods,
SDValue &Omod) const {
// FIXME: Handle Omod
- Omod = CurDAG->getTargetConstant(0, MVT::i32);
+ Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
return SelectVOP3Mods(In, Src, SrcMods);
}
@@ -1287,7 +1301,7 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
SDValue &SrcMods,
SDValue &Clamp,
SDValue &Omod) const {
- Clamp = Omod = CurDAG->getTargetConstant(0, MVT::i32);
+ Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
return SelectVOP3Mods(In, Src, SrcMods);
}
diff --git a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
index c99f222d783..abbcee523a8 100644
--- a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -680,7 +680,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
EVT VT = EVT::getEVT(InitTy);
PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
- return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
+ return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
TD->getPrefTypeAlignment(InitTy));
}
@@ -688,7 +688,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
EVT VT = EVT::getEVT(CFP->getType());
PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
- return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
+ return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
TD->getPrefTypeAlignment(CFP->getType()));
}
@@ -700,7 +700,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
SmallVector<SDValue, 8> Chains;
for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
- SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
+ SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT);
SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
Constant *Elt = Init->getAggregateElement(I);
@@ -724,7 +724,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
SmallVector<SDValue, 8> Chains;
for (unsigned i = 0; i < NumElements; ++i) {
- SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
+ SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
Constant *Elt = Init->getAggregateElement(i);
@@ -786,7 +786,8 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
Offset = MFI->LocalMemoryObjects[GV];
}
- return DAG.getConstant(Offset, getPointerTy(AMDGPUAS::LOCAL_ADDRESS));
+ return DAG.getConstant(Offset, SDLoc(Op),
+ getPointerTy(AMDGPUAS::LOCAL_ADDRESS));
}
case AMDGPUAS::CONSTANT_ADDRESS: {
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
@@ -868,7 +869,7 @@ SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
unsigned FrameIndex = FIN->getIndex();
unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
- return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
+ return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op),
Op.getValueType());
}
@@ -943,9 +944,9 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
- DAG.getConstantFP(Max, VT));
+ DAG.getConstantFP(Max, DL, VT));
return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
- DAG.getConstantFP(Min, VT));
+ DAG.getConstantFP(Min, DL, VT));
} else {
return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
}
@@ -1040,8 +1041,8 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
- SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
- Op.getOperand(1));
+ SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
+ Op.getOperand(1));
return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
}
@@ -1053,7 +1054,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
- DAG.getConstantFP(1.0f, MVT::f32),
+ DAG.getConstantFP(1.0f, DL, MVT::f32),
Op.getOperand(1));
SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
Op.getOperand(3));
@@ -1201,7 +1202,7 @@ SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
for (unsigned i = 0; i < NumElts; ++i) {
SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
- DAG.getConstant(i * MemEltSize, PtrVT));
+ DAG.getConstant(i * MemEltSize, SL, PtrVT));
SDValue NewLoad
= DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
@@ -1252,7 +1253,8 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
Load->isInvariant(), Load->getAlignment());
SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
- DAG.getConstant(LoMemVT.getStoreSize(), PtrVT));
+ DAG.getConstant(LoMemVT.getStoreSize(), SL,
+ PtrVT));
SDValue HiLoad
= DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
@@ -1292,18 +1294,18 @@ SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
unsigned MemEltBits = MemEltVT.getSizeInBits();
unsigned MemNumElements = MemVT.getVectorNumElements();
unsigned PackedSize = MemVT.getStoreSizeInBits();
- SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
+ SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32);
assert(Value.getValueType().getScalarSizeInBits() >= 32);
SDValue PackedValue;
for (unsigned i = 0; i < MemNumElements; ++i) {
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
- DAG.getConstant(i, MVT::i32));
+ DAG.getConstant(i, DL, MVT::i32));
Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
- SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
+ SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32);
Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
if (i == 0) {
@@ -1345,9 +1347,9 @@ SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
for (unsigned i = 0, e = NumElts; i != e; ++i) {
SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
Store->getValue(),
- DAG.getConstant(i, MVT::i32));
+ DAG.getConstant(i, SL, MVT::i32));
- SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), PtrVT);
+ SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), SL, PtrVT);
SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
SDValue NewStore =
DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
@@ -1386,7 +1388,8 @@ SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
EVT PtrVT = BasePtr.getValueType();
SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
- DAG.getConstant(LoMemVT.getStoreSize(), PtrVT));
+ DAG.getConstant(LoMemVT.getStoreSize(), SL,
+ PtrVT));
MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
SDValue LoStore
@@ -1444,16 +1447,16 @@ SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, DL, MVT::i32));
SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
Load->getChain(), Ptr,
- DAG.getTargetConstant(0, MVT::i32),
+ DAG.getTargetConstant(0, DL, MVT::i32),
Op.getOperand(2));
SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
Load->getBasePtr(),
- DAG.getConstant(0x3, MVT::i32));
+ DAG.getConstant(0x3, DL, MVT::i32));
SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
- DAG.getConstant(3, MVT::i32));
+ DAG.getConstant(3, DL, MVT::i32));
Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
@@ -1503,15 +1506,16 @@ SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
}
SDValue BasePtr = Store->getBasePtr();
SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, DL, MVT::i32));
SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
- Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
+ Chain, Ptr,
+ DAG.getTargetConstant(0, DL, MVT::i32));
SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
- DAG.getConstant(0x3, MVT::i32));
+ DAG.getConstant(0x3, DL, MVT::i32));
SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
- DAG.getConstant(3, MVT::i32));
+ DAG.getConstant(3, DL, MVT::i32));
SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
Store->getValue());
@@ -1521,15 +1525,17 @@ SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
MaskedValue, ShiftAmt);
- SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
+ SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32,
+ DAG.getConstant(Mask, DL, MVT::i32),
ShiftAmt);
DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
- DAG.getConstant(0xffffffff, MVT::i32));
+ DAG.getConstant(0xffffffff, DL, MVT::i32));
Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
- Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
+ Chain, Value, Ptr,
+ DAG.getTargetConstant(0, DL, MVT::i32));
}
return SDValue();
}
@@ -1556,17 +1562,18 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
unsigned BitSize = VT.getScalarType().getSizeInBits();
- SDValue jq = DAG.getConstant(1, IntVT);
+ SDValue jq = DAG.getConstant(1, DL, IntVT);
if (sign) {
// char|short jq = ia ^ ib;
jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
// jq = jq >> (bitsize - 2)
- jq = DAG.getNode(ISD::SRA, DL, VT, jq, DAG.getConstant(BitSize - 2, VT));
+ jq = DAG.getNode(ISD::SRA, DL, VT, jq,
+ DAG.getConstant(BitSize - 2, DL, VT));
// jq = jq | 0x1
- jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, VT));
+ jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
// jq = (int)jq
jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
@@ -1615,7 +1622,7 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
// jq = (cv ? jq : 0);
- jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, VT));
+ jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
// dst = trunc/extend to legal type
iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
@@ -1643,8 +1650,8 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
EVT VT = Op.getValueType();
EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
- SDValue one = DAG.getConstant(1, HalfVT);
- SDValue zero = DAG.getConstant(0, HalfVT);
+ SDValue one = DAG.getConstant(1, DL, HalfVT);
+ SDValue zero = DAG.getConstant(0, DL, HalfVT);
//HiLo split
SDValue LHS = Op.getOperand(0);
@@ -1683,18 +1690,18 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
for (unsigned i = 0; i < halfBitWidth; ++i) {
const unsigned bitPos = halfBitWidth - i - 1;
- SDValue POS = DAG.getConstant(bitPos, HalfVT);
+ SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
// Get value of high bit
SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
// Shift
- REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, VT));
+ REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
// Add LHS high bit
REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
- SDValue BIT = DAG.getConstant(1 << bitPos, HalfVT);
+ SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT);
SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
@@ -1744,11 +1751,11 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
// NEG_RCP_LO = -RCP_LO
- SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
+ SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
RCP_LO);
// ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
- SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
+ SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
NEG_RCP_LO, RCP_LO,
ISD::SETEQ);
// Calculate the rounding error from the URECIP instruction
@@ -1762,7 +1769,7 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
// Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
- SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
+ SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
RCP_A_E, RCP_S_E,
ISD::SETEQ);
// Quotient = mulhu(Tmp0, Num)
@@ -1776,14 +1783,14 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
// Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
- DAG.getConstant(-1, VT),
- DAG.getConstant(0, VT),
+ DAG.getConstant(-1, DL, VT),
+ DAG.getConstant(0, DL, VT),
ISD::SETUGE);
// Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
Num_S_Remainder,
- DAG.getConstant(-1, VT),
- DAG.getConstant(0, VT),
+ DAG.getConstant(-1, DL, VT),
+ DAG.getConstant(0, DL, VT),
ISD::SETUGE);
// Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
@@ -1793,18 +1800,18 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
// Quotient_A_One = Quotient + 1
SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, DL, VT));
// Quotient_S_One = Quotient - 1
SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, DL, VT));
// Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
- SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
+ SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
Quotient, Quotient_A_One, ISD::SETEQ);
// Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
- Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
+ Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
Quotient_S_One, Div, ISD::SETEQ);
// Calculate Rem result:
@@ -1816,11 +1823,11 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
// Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
- SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
+ SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
Remainder, Remainder_S_Den, ISD::SETEQ);
// Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
- Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
+ Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
Remainder_A_Den, Rem, ISD::SETEQ);
SDValue Ops[2] = {
Div,
@@ -1837,8 +1844,8 @@ SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- SDValue Zero = DAG.getConstant(0, VT);
- SDValue NegOne = DAG.getConstant(-1, VT);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue NegOne = DAG.getConstant(-1, DL, VT);
if (VT == MVT::i32 &&
DAG.ComputeNumSignBits(LHS) > 8 &&
@@ -1913,8 +1920,8 @@ SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
- const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
- const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
+ const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
+ const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
@@ -1932,10 +1939,10 @@ static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
Hi,
- DAG.getConstant(FractBits - 32, MVT::i32),
- DAG.getConstant(ExpBits, MVT::i32));
+ DAG.getConstant(FractBits - 32, SL, MVT::i32),
+ DAG.getConstant(ExpBits, SL, MVT::i32));
SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
- DAG.getConstant(1023, MVT::i32));
+ DAG.getConstant(1023, SL, MVT::i32));
return Exp;
}
@@ -1946,8 +1953,8 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType() == MVT::f64);
- const SDValue Zero = DAG.getConstant(0, MVT::i32);
- const SDValue One = DAG.getConstant(1, MVT::i32);
+ const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+ const SDValue One = DAG.getConstant(1, SL, MVT::i32);
SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
@@ -1960,7 +1967,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
const unsigned FractBits = 52;
// Extract the sign bit.
- const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
+ const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
// Extend back to to 64-bits.
@@ -1970,7 +1977,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
const SDValue FractMask
- = DAG.getConstant((UINT64_C(1) << FractBits) - 1, MVT::i64);
+ = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
@@ -1978,7 +1985,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
- const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
+ const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
@@ -1996,7 +2003,7 @@ SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType() == MVT::f64);
APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
- SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
+ SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
@@ -2005,7 +2012,7 @@ SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
- SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
+ SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
@@ -2031,9 +2038,9 @@ SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const
SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
- const SDValue Zero = DAG.getConstantFP(0.0, MVT::f32);
- const SDValue One = DAG.getConstantFP(1.0, MVT::f32);
- const SDValue Half = DAG.getConstantFP(0.5, MVT::f32);
+ const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32);
+ const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
+ const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32);
SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
@@ -2052,10 +2059,10 @@ SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const
SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
- const SDValue Zero = DAG.getConstant(0, MVT::i32);
- const SDValue One = DAG.getConstant(1, MVT::i32);
- const SDValue NegOne = DAG.getConstant(-1, MVT::i32);
- const SDValue FiftyOne = DAG.getConstant(51, MVT::i32);
+ const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+ const SDValue One = DAG.getConstant(1, SL, MVT::i32);
+ const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
+ const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
@@ -2065,20 +2072,22 @@ SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const
SDValue Exp = extractF64Exponent(Hi, SL, DAG);
- const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), MVT::i64);
+ const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
+ MVT::i64);
SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
- DAG.getConstant(INT64_C(0x0008000000000000), MVT::i64),
+ DAG.getConstant(INT64_C(0x0008000000000000), SL,
+ MVT::i64),
Exp);
SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
- DAG.getConstant(0, MVT::i64), Tmp0,
+ DAG.getConstant(0, SL, MVT::i64), Tmp0,
ISD::SETNE);
SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
- D, DAG.getConstant(0, MVT::i64));
+ D, DAG.getConstant(0, SL, MVT::i64));
SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
@@ -2090,8 +2099,8 @@ SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const
SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
ExpEqNegOne,
- DAG.getConstantFP(1.0, MVT::f64),
- DAG.getConstantFP(0.0, MVT::f64));
+ DAG.getConstantFP(1.0, SL, MVT::f64),
+ DAG.getConstantFP(0.0, SL, MVT::f64));
SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
@@ -2123,8 +2132,8 @@ SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
- const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
- const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
+ const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
+ const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
@@ -2144,9 +2153,9 @@ SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, SL, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, SL, MVT::i32));
SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
SL, MVT::f64, Hi);
@@ -2154,7 +2163,7 @@ SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, SL, MVT::i32));
return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
}
@@ -2175,13 +2184,13 @@ SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
// f32 uint_to_fp i64
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, DL, MVT::i32));
SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
- DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
+ DAG.getConstantFP(4294967296.0f, DL, MVT::f32)); // 2^32
return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
}
@@ -2202,10 +2211,10 @@ SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
- SDValue K0
- = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), MVT::f64);
- SDValue K1
- = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), MVT::f64);
+ SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
+ MVT::f64);
+ SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
+ MVT::f64);
SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
@@ -2304,14 +2313,14 @@ static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
template <typename IntTy>
static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
- uint32_t Offset, uint32_t Width) {
+ uint32_t Offset, uint32_t Width, SDLoc DL) {
if (Width + Offset < 32) {
uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
- return DAG.getConstant(Result, MVT::i32);
+ return DAG.getConstant(Result, DL, MVT::i32);
}
- return DAG.getConstant(Src0 >> Offset, MVT::i32);
+ return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
}
static bool usesAllNormalStores(SDNode *LoadVal) {
@@ -2416,7 +2425,6 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SELECT: {
SDValue Cond = N->getOperand(0);
if (Cond.getOpcode() == ISD::SETCC && Cond.hasOneUse()) {
- SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue LHS = Cond.getOperand(0);
SDValue RHS = Cond.getOperand(1);
@@ -2447,7 +2455,7 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
uint32_t WidthVal = Width->getZExtValue() & 0x1f;
if (WidthVal == 0)
- return DAG.getConstant(0, MVT::i32);
+ return DAG.getConstant(0, DL, MVT::i32);
ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!Offset)
@@ -2486,17 +2494,19 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
return constantFoldBFE<int32_t>(DAG,
CVal->getSExtValue(),
OffsetVal,
- WidthVal);
+ WidthVal,
+ DL);
}
return constantFoldBFE<uint32_t>(DAG,
CVal->getZExtValue(),
OffsetVal,
- WidthVal);
+ WidthVal,
+ DL);
}
if ((OffsetVal + WidthVal) >= 32) {
- SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
+ SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
BitsFrom, ShiftVal);
}
diff --git a/llvm/lib/Target/R600/AMDGPUInstructions.td b/llvm/lib/Target/R600/AMDGPUInstructions.td
index eeb7f3fcde5..caec48171f5 100644
--- a/llvm/lib/Target/R600/AMDGPUInstructions.td
+++ b/llvm/lib/Target/R600/AMDGPUInstructions.td
@@ -583,7 +583,7 @@ def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{
}]>;
def IMMPopCount : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()),
+ return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N),
MVT::i32);
}]>;
diff --git a/llvm/lib/Target/R600/R600ISelLowering.cpp b/llvm/lib/Target/R600/R600ISelLowering.cpp
index b6b7067f7e1..e2604b2ed22 100644
--- a/llvm/lib/Target/R600/R600ISelLowering.cpp
+++ b/llvm/lib/Target/R600/R600ISelLowering.cpp
@@ -611,17 +611,18 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2));
}
case AMDGPUIntrinsic::R600_store_swizzle: {
+ SDLoc DL(Op);
const SDValue Args[8] = {
Chain,
Op.getOperand(2), // Export Value
Op.getOperand(3), // ArrayBase
Op.getOperand(4), // Type
- DAG.getConstant(0, MVT::i32), // SWZ_X
- DAG.getConstant(1, MVT::i32), // SWZ_Y
- DAG.getConstant(2, MVT::i32), // SWZ_Z
- DAG.getConstant(3, MVT::i32) // SWZ_W
+ DAG.getConstant(0, DL, MVT::i32), // SWZ_X
+ DAG.getConstant(1, DL, MVT::i32), // SWZ_Y
+ DAG.getConstant(2, DL, MVT::i32), // SWZ_Z
+ DAG.getConstant(3, DL, MVT::i32) // SWZ_W
};
- return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(), Args);
+ return DAG.getNode(AMDGPUISD::EXPORT, DL, Op.getValueType(), Args);
}
// default for switch(IntrinsicID)
@@ -655,7 +656,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
const R600InstrInfo *TII =
static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
- MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32));
+ MVT::v4f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32));
return DAG.getTargetExtractSubreg(
TII->getRegisterInfo().getSubRegFromChannel(slot % 4),
DL, MVT::f32, SDValue(interp, 0));
@@ -673,11 +674,11 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
if (slot % 4 < 2)
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32),
RegisterJNode, RegisterINode);
else
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32),
RegisterJNode, RegisterINode);
return SDValue(interp, slot % 2);
}
@@ -690,11 +691,11 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
if (IntrinsicID == AMDGPUIntrinsic::R600_interp_xy)
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32),
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
RegisterJNode, RegisterINode);
else
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32),
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
RegisterJNode, RegisterINode);
return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32,
SDValue(interp, 0), SDValue(interp, 1));
@@ -750,19 +751,19 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
}
SDValue TexArgs[19] = {
- DAG.getConstant(TextureOp, MVT::i32),
+ DAG.getConstant(TextureOp, DL, MVT::i32),
Op.getOperand(1),
- DAG.getConstant(0, MVT::i32),
- DAG.getConstant(1, MVT::i32),
- DAG.getConstant(2, MVT::i32),
- DAG.getConstant(3, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(1, DL, MVT::i32),
+ DAG.getConstant(2, DL, MVT::i32),
+ DAG.getConstant(3, DL, MVT::i32),
Op.getOperand(2),
Op.getOperand(3),
Op.getOperand(4),
- DAG.getConstant(0, MVT::i32),
- DAG.getConstant(1, MVT::i32),
- DAG.getConstant(2, MVT::i32),
- DAG.getConstant(3, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(1, DL, MVT::i32),
+ DAG.getConstant(2, DL, MVT::i32),
+ DAG.getConstant(3, DL, MVT::i32),
Op.getOperand(5),
Op.getOperand(6),
Op.getOperand(7),
@@ -775,21 +776,21 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
case AMDGPUIntrinsic::AMDGPU_dp4: {
SDValue Args[8] = {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
- DAG.getConstant(0, MVT::i32)),
+ DAG.getConstant(0, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
- DAG.getConstant(0, MVT::i32)),
+ DAG.getConstant(0, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
- DAG.getConstant(1, MVT::i32)),
+ DAG.getConstant(1, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
- DAG.getConstant(1, MVT::i32)),
+ DAG.getConstant(1, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
- DAG.getConstant(2, MVT::i32)),
+ DAG.getConstant(2, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
- DAG.getConstant(2, MVT::i32)),
+ DAG.getConstant(2, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
- DAG.getConstant(3, MVT::i32)),
+ DAG.getConstant(3, DL, MVT::i32)),
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
- DAG.getConstant(3, MVT::i32))
+ DAG.getConstant(3, DL, MVT::i32))
};
return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args);
}
@@ -931,8 +932,8 @@ SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG,
for (unsigned i = 0, e = VecVT.getVectorNumElements();
i != e; ++i) {
- Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
- Vector, DAG.getConstant(i, getVectorIdxTy())));
+ Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector,
+ DAG.getConstant(i, DL, getVectorIdxTy())));
}
return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args);
@@ -976,11 +977,12 @@ SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
// Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5)
EVT VT = Op.getValueType();
SDValue Arg = Op.getOperand(0);
- SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT,
- DAG.getNode(ISD::FADD, SDLoc(Op), VT,
- DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg,
- DAG.getConstantFP(0.15915494309, MVT::f32)),
- DAG.getConstantFP(0.5, MVT::f32)));
+ SDLoc DL(Op);
+ SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
+ DAG.getNode(ISD::FADD, DL, VT,
+ DAG.getNode(ISD::FMUL, DL, VT, Arg,
+ DAG.getConstantFP(0.15915494309, DL, MVT::f32)),
+ DAG.getConstantFP(0.5, DL, MVT::f32)));
unsigned TrigNode;
switch (Op.getOpcode()) {
case ISD::FCOS:
@@ -992,14 +994,14 @@ SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
default:
llvm_unreachable("Wrong trig opcode");
}
- SDValue TrigVal = DAG.getNode(TrigNode, SDLoc(Op), VT,
- DAG.getNode(ISD::FADD, SDLoc(Op), VT, FractPart,
- DAG.getConstantFP(-0.5, MVT::f32)));
+ SDValue TrigVal = DAG.getNode(TrigNode, DL, VT,
+ DAG.getNode(ISD::FADD, DL, VT, FractPart,
+ DAG.getConstantFP(-0.5, DL, MVT::f32)));
if (Gen >= AMDGPUSubtarget::R700)
return TrigVal;
// On R600 hw, COS/SIN input must be between -Pi and Pi.
- return DAG.getNode(ISD::FMUL, SDLoc(Op), VT, TrigVal,
- DAG.getConstantFP(3.14159265359, MVT::f32));
+ return DAG.getNode(ISD::FMUL, DL, VT, TrigVal,
+ DAG.getConstantFP(3.14159265359, DL, MVT::f32));
}
SDValue R600TargetLowering::LowerSHLParts(SDValue Op, SelectionDAG &DAG) const {
@@ -1009,11 +1011,11 @@ SDValue R600TargetLowering::LowerSHLParts(SDValue Op, SelectionDAG &DAG) const {
SDValue Lo = Op.getOperand(0);
SDValue Hi = Op.getOperand(1);
SDValue Shift = Op.getOperand(2);
- SDValue Zero = DAG.getConstant(0, VT);
- SDValue One = DAG.getConstant(1, VT);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
- SDValue Width = DAG.getConstant(VT.getSizeInBits(), VT);
- SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, VT);
+ SDValue Width = DAG.getConstant(VT.getSizeInBits(), DL, VT);
+ SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width);
SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift);
@@ -1045,13 +1047,13 @@ SDValue R600TargetLowering::LowerSRXParts(SDValue Op, SelectionDAG &DAG) const {
SDValue Lo = Op.getOperand(0);
SDValue Hi = Op.getOperand(1);
SDValue Shift = Op.getOperand(2);
- SDValue Zero = DAG.getConstant(0, VT);
- SDValue One = DAG.getConstant(1, VT);
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
const bool SRA = Op.getOpcode() == ISD::SRA_PARTS;
- SDValue Width = DAG.getConstant(VT.getSizeInBits(), VT);
- SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, VT);
+ SDValue Width = DAG.getConstant(VT.getSizeInBits(), DL, VT);
+ SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width);
SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift);
@@ -1077,11 +1079,12 @@ SDValue R600TargetLowering::LowerSRXParts(SDValue Op, SelectionDAG &DAG) const {
}
SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
return DAG.getNode(
ISD::SETCC,
- SDLoc(Op),
+ DL,
MVT::i1,
- Op, DAG.getConstantFP(0.0f, MVT::f32),
+ Op, DAG.getConstantFP(0.0f, DL, MVT::f32),
DAG.getCondCode(ISD::SETNE)
);
}
@@ -1097,7 +1100,7 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
assert(isInt<16>(ByteOffset));
return DAG.getLoad(VT, DL, DAG.getEntryNode(),
- DAG.getConstant(ByteOffset, MVT::i32), // PTR
+ DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR
MachinePointerInfo(ConstantPointerNull::get(PtrType)),
false, false, false, 0);
}
@@ -1234,11 +1237,11 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
SDValue HWTrue, HWFalse;
if (CompareVT == MVT::f32) {
- HWTrue = DAG.getConstantFP(1.0f, CompareVT);
- HWFalse = DAG.getConstantFP(0.0f, CompareVT);
+ HWTrue = DAG.getConstantFP(1.0f, DL, CompareVT);
+ HWFalse = DAG.getConstantFP(0.0f, DL, CompareVT);
} else if (CompareVT == MVT::i32) {
- HWTrue = DAG.getConstant(-1, CompareVT);
- HWFalse = DAG.getConstant(0, CompareVT);
+ HWTrue = DAG.getConstant(-1, DL, CompareVT);
+ HWFalse = DAG.getConstant(0, DL, CompareVT);
}
else {
llvm_unreachable("Unhandled value type in LowerSELECT_CC");
@@ -1276,8 +1279,9 @@ SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr,
default: llvm_unreachable("Invalid stack width");
}
- return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr,
- DAG.getConstant(SRLPad, MVT::i32));
+ SDLoc DL(Ptr);
+ return DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), Ptr,
+ DAG.getConstant(SRLPad, DL, MVT::i32));
}
void R600TargetLowering::getStackAddress(unsigned StackWidth,
@@ -1328,26 +1332,26 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
EVT MemVT = StoreNode->getMemoryVT();
SDValue MaskConstant;
if (MemVT == MVT::i8) {
- MaskConstant = DAG.getConstant(0xFF, MVT::i32);
+ MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32);
} else {
assert(MemVT == MVT::i16);
- MaskConstant = DAG.getConstant(0xFFFF, MVT::i32);
+ MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32);
}
SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, VT, Ptr,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, DL, MVT::i32));
SDValue ByteIndex = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), Ptr,
- DAG.getConstant(0x00000003, VT));
+ DAG.getConstant(0x00000003, DL, VT));
SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant);
SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex,
- DAG.getConstant(3, VT));
+ DAG.getConstant(3, DL, VT));
SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, Shift);
SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, Shift);
// XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32
// vector instead.
SDValue Src[4] = {
ShiftedValue,
- DAG.getConstant(0, MVT::i32),
- DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
Mask
};
SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src);
@@ -1360,7 +1364,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
// Convert pointer from byte address to dword address.
Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
- Ptr, DAG.getConstant(2, MVT::i32)));
+ Ptr, DAG.getConstant(2, DL, MVT::i32)));
if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
llvm_unreachable("Truncated and indexed stores not supported yet");
@@ -1402,13 +1406,13 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
unsigned Channel, PtrIncr;
getStackAddress(StackWidth, i, Channel, PtrIncr);
Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr,
- DAG.getConstant(PtrIncr, MVT::i32));
+ DAG.getConstant(PtrIncr, DL, MVT::i32));
SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT,
- Value, DAG.getConstant(i, MVT::i32));
+ Value, DAG.getConstant(i, DL, MVT::i32));
Stores[i] = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
Chain, Elem, Ptr,
- DAG.getTargetConstant(Channel, MVT::i32));
+ DAG.getTargetConstant(Channel, DL, MVT::i32));
}
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
} else {
@@ -1416,7 +1420,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value);
}
Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr,
- DAG.getTargetConstant(0, MVT::i32)); // Channel
+ DAG.getTargetConstant(0, DL, MVT::i32)); // Channel
}
return Chain;
@@ -1489,10 +1493,11 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
SDValue Ptr = DAG.getZExtOrTrunc(LoadNode->getBasePtr(), DL,
getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, DL, MVT::i32));
return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op->getVTList(),
LoadNode->getChain(), Ptr,
- DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
+ DAG.getTargetConstant(0, DL, MVT::i32),
+ Op.getOperand(2));
}
if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) {
@@ -1519,7 +1524,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
// Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and
// then div by 4 at the ISel step
SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
- DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32));
+ DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32));
Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
}
EVT NewVT = MVT::v4i32;
@@ -1533,15 +1538,16 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
} else {
// non-constant ptr can't be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
- DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)),
- DAG.getConstant(LoadNode->getAddressSpace() -
- AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32)
+ DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
+ DAG.getConstant(4, DL, MVT::i32)),
+ DAG.getConstant(LoadNode->getAddressSpace() -
+ AMDGPUAS::CONSTANT_BUFFER_0, DL, MVT::i32)
);
}
if (!VT.isVector()) {
Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
}
SDValue MergedValues[2] = {
@@ -1562,7 +1568,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
EVT MemVT = LoadNode->getMemoryVT();
assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
SDValue ShiftAmount =
- DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32);
+ DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), DL,
+ MVT::i32);
SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr,
LoadNode->getPointerInfo(), MemVT,
LoadNode->isVolatile(),
@@ -1600,10 +1607,10 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
unsigned Channel, PtrIncr;
getStackAddress(StackWidth, i, Channel, PtrIncr);
Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr,
- DAG.getConstant(PtrIncr, MVT::i32));
+ DAG.getConstant(PtrIncr, DL, MVT::i32));
Loads[i] = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, ElemVT,
Chain, Ptr,
- DAG.getTargetConstant(Channel, MVT::i32),
+ DAG.getTargetConstant(Channel, DL, MVT::i32),
Op.getOperand(2));
}
for (unsigned i = NumElemVT; i < 4; ++i) {
@@ -1614,7 +1621,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
} else {
LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT,
Chain, Ptr,
- DAG.getTargetConstant(0, MVT::i32), // Channel
+ DAG.getTargetConstant(0, DL, MVT::i32), // Channel
Op.getOperand(2));
}
@@ -1703,7 +1710,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset - ValBase);
SDValue Arg = DAG.getLoad(ISD::UNINDEXED, Ext, VT, DL, Chain,
- DAG.getConstant(Offset, MVT::i32),
+ DAG.getConstant(Offset, DL, MVT::i32),
DAG.getUNDEF(MVT::i32),
PtrInfo,
MemVT, false, true, true, 4);
@@ -1804,7 +1811,8 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
-SDValue Swz[4], SelectionDAG &DAG) const {
+ SDValue Swz[4], SelectionDAG &DAG,
+ SDLoc DL) const {
assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
// Old -> New swizzle values
DenseMap<unsigned, unsigned> SwizzleRemap;
@@ -1813,7 +1821,7 @@ SDValue Swz[4], SelectionDAG &DAG) const {
for (unsigned i = 0; i < 4; i++) {
unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
- Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
+ Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
SwizzleRemap.clear();
@@ -1821,7 +1829,7 @@ SDValue Swz[4], SelectionDAG &DAG) const {
for (unsigned i = 0; i < 4; i++) {
unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
- Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
+ Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
return BuildVector;
@@ -1867,11 +1875,12 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
+ SDLoc dl(N);
+ return DAG.getNode(ISD::SELECT_CC, dl, N->getValueType(0),
SelectCC.getOperand(0), // LHS
SelectCC.getOperand(1), // RHS
- DAG.getConstant(-1, MVT::i32), // True
- DAG.getConstant(0, MVT::i32), // False
+ DAG.getConstant(-1, dl, MVT::i32), // True
+ DAG.getConstant(0, dl, MVT::i32), // False
SelectCC.getOperand(4)); // CC
break;
@@ -2014,7 +2023,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
N->getOperand(7) // SWZ_W
};
SDLoc DL(N);
- NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG);
+ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG, DL);
return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs);
}
case AMDGPUISD::TEXTURE_FETCH: {
@@ -2043,9 +2052,9 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
N->getOperand(17),
N->getOperand(18),
};
- NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG);
- return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(),
- NewArgs);
+ SDLoc DL(N);
+ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL);
+ return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs);
}
}
@@ -2064,13 +2073,13 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
if (!Neg.getNode())
return false;
Src = Src.getOperand(0);
- Neg = DAG.getTargetConstant(1, MVT::i32);
+ Neg = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32);
return true;
case AMDGPU::FABS_R600:
if (!Abs.getNode())
return false;
Src = Src.getOperand(0);
- Abs = DAG.getTargetConstant(1, MVT::i32);
+ Abs = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32);
return true;
case AMDGPU::CONST_COPY: {
unsigned Opcode = ParentNode->getMachineOpcode();
@@ -2166,7 +2175,7 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
assert(C);
if (C->getZExtValue())
return false;
- Imm = DAG.getTargetConstant(ImmValue, MVT::i32);
+ Imm = DAG.getTargetConstant(ImmValue, SDLoc(ParentNode), MVT::i32);
}
Src = DAG.getRegister(ImmReg, MVT::i32);
return true;
@@ -2249,10 +2258,11 @@ SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
AMDGPU::OpName::clamp);
if (ClampIdx < 0)
return Node;
+ SDLoc DL(Node);
std::vector<SDValue> Ops(Src->op_begin(), Src->op_end());
- Ops[ClampIdx - 1] = DAG.getTargetConstant(1, MVT::i32);
- return DAG.getMachineNode(Src.getMachineOpcode(), SDLoc(Node),
- Node->getVTList(), Ops);
+ Ops[ClampIdx - 1] = DAG.getTargetConstant(1, DL, MVT::i32);
+ return DAG.getMachineNode(Src.getMachineOpcode(), DL,
+ Node->getVTList(), Ops);
} else {
if (!TII->hasInstrModifiers(Opcode))
return Node;
diff --git a/llvm/lib/Target/R600/R600ISelLowering.h b/llvm/lib/Target/R600/R600ISelLowering.h
index c54719574f9..1f570404254 100644
--- a/llvm/lib/Target/R600/R600ISelLowering.h
+++ b/llvm/lib/Target/R600/R600ISelLowering.h
@@ -50,7 +50,8 @@ private:
void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const;
- SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG) const;
+ SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG,
+ SDLoc DL) const;
SDValue vectorToVerticalVector(SelectionDAG &DAG, SDValue Vector) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/R600/SIISelLowering.cpp b/llvm/lib/Target/R600/SIISelLowering.cpp
index 43507d8bf33..6ff2a9689e0 100644
--- a/llvm/lib/Target/R600/SIISelLowering.cpp
+++ b/llvm/lib/Target/R600/SIISelLowering.cpp
@@ -384,7 +384,7 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
MRI.getLiveInVirtReg(InputPtrReg), MVT::i64);
SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, BasePtr,
- DAG.getConstant(Offset, MVT::i64));
+ DAG.getConstant(Offset, SL, MVT::i64));
SDValue PtrOffset = DAG.getUNDEF(getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
@@ -826,14 +826,14 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
SDValue PtrLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
SDValue PtrHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, DL, MVT::i32));
SDValue Lo = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i32, MVT::Glue),
PtrLo, GA);
SDValue Hi = DAG.getNode(ISD::ADDE, DL, DAG.getVTList(MVT::i32, MVT::Glue),
- PtrHi, DAG.getConstant(0, MVT::i32),
+ PtrHi, DAG.getConstant(0, DL, MVT::i32),
SDValue(Lo.getNode(), 1));
return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
}
@@ -1018,8 +1018,8 @@ SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Cond = Op.getOperand(0);
- SDValue Zero = DAG.getConstant(0, MVT::i32);
- SDValue One = DAG.getConstant(1, MVT::i32);
+ SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
+ SDValue One = DAG.getConstant(1, DL, MVT::i32);
SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
@@ -1094,12 +1094,12 @@ SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
const APFloat K0Val(BitsToFloat(0x6f800000));
- const SDValue K0 = DAG.getConstantFP(K0Val, MVT::f32);
+ const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
const APFloat K1Val(BitsToFloat(0x2f800000));
- const SDValue K1 = DAG.getConstantFP(K1Val, MVT::f32);
+ const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
- const SDValue One = DAG.getConstantFP(1.0, MVT::f32);
+ const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
@@ -1124,7 +1124,7 @@ SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
SDValue X = Op.getOperand(0);
SDValue Y = Op.getOperand(1);
- const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
+ const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
@@ -1154,7 +1154,7 @@ SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
// Workaround a hardware bug on SI where the condition output from div_scale
// is not usable.
- const SDValue Hi = DAG.getConstant(1, MVT::i32);
+ const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
// Figure out if the scale to use for div_fmas.
SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
@@ -1223,11 +1223,13 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
}
SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Arg = Op.getOperand(0);
- SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT,
- DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg,
- DAG.getConstantFP(0.5 / M_PI, VT)));
+ SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
+ DAG.getNode(ISD::FMUL, DL, VT, Arg,
+ DAG.getConstantFP(0.5/M_PI, DL,
+ VT)));
switch (Op.getOpcode()) {
case ISD::FCOS:
@@ -1417,7 +1419,7 @@ SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
EVT VT = N->getValueType(0);
SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
- SDValue COffset = DAG.getConstant(Offset, MVT::i32);
+ SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
}
@@ -1466,8 +1468,9 @@ SDValue SITargetLowering::performAndCombine(SDNode *N,
SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
"mask not equal");
- return DAG.getNode(AMDGPUISD::FP_CLASS, SDLoc(N), MVT::i1,
- X, DAG.getConstant(Mask, MVT::i32));
+ SDLoc DL(N);
+ return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
+ X, DAG.getConstant(Mask, DL, MVT::i32));
}
}
}
@@ -1497,8 +1500,9 @@ SDValue SITargetLowering::performOrCombine(SDNode *N,
static const uint32_t MaxMask = 0x3ff;
uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
- return DAG.getNode(AMDGPUISD::FP_CLASS, SDLoc(N), MVT::i1,
- Src, DAG.getConstant(NewMask, MVT::i32));
+ SDLoc DL(N);
+ return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
+ Src, DAG.getConstant(NewMask, DL, MVT::i32));
}
return SDValue();
@@ -1512,7 +1516,7 @@ SDValue SITargetLowering::performClassCombine(SDNode *N,
// fp_class x, 0 -> false
if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
if (CMask->isNullValue())
- return DAG.getConstant(0, MVT::i1);
+ return DAG.getConstant(0, SDLoc(N), MVT::i1);
}
return SDValue();
@@ -1596,8 +1600,8 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
const APFloat &APF = CRHS->getValueAPF();
if (APF.isInfinity() && !APF.isNegative()) {
unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
- return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1,
- LHS.getOperand(0), DAG.getConstant(Mask, MVT::i32));
+ return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
+ DAG.getConstant(Mask, SL, MVT::i32));
}
}
@@ -1674,7 +1678,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (LHS.getOpcode() == ISD::FADD) {
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
- const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
+ const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS);
}
}
@@ -1683,7 +1687,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (RHS.getOpcode() == ISD::FADD) {
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
- const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
+ const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS);
}
}
@@ -1710,7 +1714,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
- const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
+ const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS);
return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS);
@@ -1722,7 +1726,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
- const SDValue NegTwo = DAG.getConstantFP(-2.0, MVT::f32);
+ const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32);
return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS);
}
}
@@ -1865,14 +1869,15 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
// Adjust the writemask in the node
std::vector<SDValue> Ops;
- Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Ops.insert(Ops.end(), Node->op_begin() + 1, Node->op_end());
Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
// If we only got one lane, replace it with a copy
// (if NewDmask has only one bit set...)
if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
- SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, MVT::i32);
+ SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(),
+ MVT::i32);
SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
SDLoc(), Users[Lane]->getValueType(0),
SDValue(Node, 0), RC);
@@ -1887,7 +1892,7 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
if (!User)
continue;
- SDValue Op = DAG.getTargetConstant(Idx, MVT::i32);
+ SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
switch (Idx) {
@@ -1982,7 +1987,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
}
static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) {
- SDValue K = DAG.getTargetConstant(Val, MVT::i32);
+ SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
}
@@ -1997,11 +2002,11 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
// Build the half of the subregister with the constants.
const SDValue Ops0[] = {
- DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
buildSMovImm32(DAG, DL, 0),
- DAG.getTargetConstant(AMDGPU::sub0, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
- DAG.getTargetConstant(AMDGPU::sub1, MVT::i32)
+ DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
};
SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
@@ -2009,11 +2014,11 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
// Combine the constants and the pointer.
const SDValue Ops1[] = {
- DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Ptr,
- DAG.getTargetConstant(AMDGPU::sub0_sub1, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
SubRegHi,
- DAG.getTargetConstant(AMDGPU::sub2_sub3, MVT::i32)
+ DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
};
return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
@@ -2046,7 +2051,8 @@ MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
if (RsrcDword1) {
PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
- DAG.getConstant(RsrcDword1, MVT::i32)), 0);
+ DAG.getConstant(RsrcDword1, DL, MVT::i32)),
+ 0);
}
SDValue DataLo = buildSMovImm32(DAG, DL,
@@ -2054,15 +2060,15 @@ MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
const SDValue Ops[] = {
- DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
PtrLo,
- DAG.getTargetConstant(AMDGPU::sub0, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
PtrHi,
- DAG.getTargetConstant(AMDGPU::sub1, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
DataLo,
- DAG.getTargetConstant(AMDGPU::sub2, MVT::i32),
+ DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
DataHi,
- DAG.getTargetConstant(AMDGPU::sub3, MVT::i32)
+ DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
};
return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
diff --git a/llvm/lib/Target/R600/SIInstrInfo.td b/llvm/lib/Target/R600/SIInstrInfo.td
index 39f24423954..608240413ac 100644
--- a/llvm/lib/Target/R600/SIInstrInfo.td
+++ b/llvm/lib/Target/R600/SIInstrInfo.td
@@ -126,7 +126,8 @@ def SIconstdata_ptr : SDNode<
// Transformation function, extract the lower 32bit of a 64bit immediate
def LO32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, SDLoc(N),
+ MVT::i32);
}]>;
def LO32f : SDNodeXForm<fpimm, [{
@@ -136,12 +137,13 @@ def LO32f : SDNodeXForm<fpimm, [{
// Transformation function, extract the upper 32bit of a 64bit immediate
def HI32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() >> 32, MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue() >> 32, SDLoc(N), MVT::i32);
}]>;
def HI32f : SDNodeXForm<fpimm, [{
APInt V = N->getValueAPF().bitcastToAPInt().lshr(32).trunc(32);
- return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
+ return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), SDLoc(N),
+ MVT::f32);
}]>;
def IMM8bitDWORD : PatLeaf <(imm),
@@ -149,39 +151,39 @@ def IMM8bitDWORD : PatLeaf <(imm),
>;
def as_dword_i32imm : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() >> 2, MVT::i32);
+ return CurDAG->getTargetConstant(N->getZExtValue() >> 2, SDLoc(N), MVT::i32);
}]>;
def as_i1imm : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i1);
+ return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
}]>;
def as_i8imm : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i8);
+ return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
}]>;
def as_i16imm : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i16);
+ return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16);
}]>;
def as_i32imm: SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
}]>;
def as_i64imm: SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i64);
+ return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64);
}]>;
// Copied from the AArch64 backend:
def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstant(
- N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i32);
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
}]>;
// Copied from the AArch64 backend:
def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstant(
- N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i64);
+ N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
}]>;
def IMM8bit : PatLeaf <(imm),
diff --git a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index 1cf5ccfdef6..53eb91a8812 100644
--- a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -74,7 +74,7 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), TLI->getPointerTy());
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
@@ -93,7 +93,8 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
} else {
Base = Addr.getOperand(0);
}
- Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr),
+ MVT::i32);
return true;
}
}
@@ -109,7 +110,7 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
}
}
Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
return true;
}
@@ -162,7 +163,8 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
SDValue TopPart;
if (N->getOpcode() == ISD::SDIV) {
TopPart = SDValue(CurDAG->getMachineNode(SP::SRAri, dl, MVT::i32, DivLHS,
- CurDAG->getTargetConstant(31, MVT::i32)), 0);
+ CurDAG->getTargetConstant(31, dl, MVT::i32)),
+ 0);
} else {
TopPart = CurDAG->getRegister(SP::G0, MVT::i32);
}
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 5b964af1959..7dce2e86c24 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -229,7 +229,7 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain,
}
RetOps[0] = Chain; // Update chain.
- RetOps[1] = DAG.getConstant(RetAddrOffset, MVT::i32);
+ RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
// Add the flag if we have it.
if (Flag.getNode())
@@ -261,7 +261,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
// The second operand on the return instruction is the return address offset.
// The return address is always %i7+8 with the 64-bit ABI.
- RetOps.push_back(DAG.getConstant(8, MVT::i32));
+ RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
// Copy the result values into the output registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
@@ -289,7 +289,7 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
// in the high bits of the register.
if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, DL, MVT::i32));
// The next value may go in the low bits of the same register.
// Handle both at once.
@@ -471,7 +471,7 @@ LowerFormalArguments_32(SDValue Chain,
// Sparc is big endian, so add an offset based on the ObjectVT.
unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8);
FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
- DAG.getConstant(Offset, MVT::i32));
+ DAG.getConstant(Offset, dl, MVT::i32));
Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
MachinePointerInfo(),
VA.getValVT(), false, false, false,0);
@@ -570,7 +570,7 @@ LowerFormalArguments_64(SDValue Chain,
// Get the high bits for i32 struct elements.
if (VA.getValVT() == MVT::i32 && VA.needsCustom())
Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, DL, MVT::i32));
// The caller promoted the argument, so insert an Assert?ext SDNode so we
// won't promote the value again in this function.
@@ -723,7 +723,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
int FI = MFI->CreateStackObject(Size, Align, false);
SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
- SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
+ SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
false, // isVolatile,
@@ -733,7 +733,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
ByValArgs.push_back(FIPtr);
}
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true),
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
dl);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
@@ -776,7 +776,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
assert(VA.needsCustom());
// store SRet argument in %sp+64
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- SDValue PtrOff = DAG.getIntPtrConstant(64);
+ SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(),
@@ -793,7 +793,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// if it is double-word aligned, just store.
if (Offset % 8 == 0) {
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(),
@@ -811,7 +811,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
MachinePointerInfo(), false, false, false, 0);
// Increment the pointer to the other half.
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, dl));
// Load the low part.
SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
MachinePointerInfo(), false, false, false, 0);
@@ -826,7 +826,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Store the low part in stack.
unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
MachinePointerInfo(),
@@ -836,13 +836,13 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
unsigned Offset = VA.getLocMemOffset() + StackOffset;
// Store the high part.
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- SDValue PtrOff = DAG.getIntPtrConstant(Offset);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff,
MachinePointerInfo(),
false, false, 0));
// Store the low part.
- PtrOff = DAG.getIntPtrConstant(Offset+4);
+ PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
MachinePointerInfo(),
@@ -867,7 +867,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Create a store off the stack pointer for this argument.
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
- SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+StackOffset);
+ SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
+ dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(),
@@ -909,7 +910,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(Chain);
Ops.push_back(Callee);
if (hasStructRetAttr)
- Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
RegsToPass[i].second.getValueType()));
@@ -929,8 +930,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
// Assign locations to each value returned by this call.
@@ -1082,7 +1083,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Adjust the stack pointer to make room for the arguments.
// FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
// with more than 6 arguments.
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true),
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
DL);
// Collect the set of registers to pass to the function and their values.
@@ -1130,10 +1131,10 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy());
- SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset);
+ SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
HiPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
HiPtrOff);
- SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8);
+ SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
LoPtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
LoPtrOff);
@@ -1159,7 +1160,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// passed in the high bits of the register.
if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, DL, MVT::i32));
// The next value may go in the low bits of the same register.
// Handle both at once.
@@ -1184,7 +1185,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// %sp+BIAS+128 in ours.
SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
Subtarget->getStackPointerBias() +
- 128);
+ 128, DL);
PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff,
MachinePointerInfo(),
@@ -1247,8 +1248,8 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
InGlue = Chain.getValue(1);
// Revert the stack pointer immediately after the call.
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
- DAG.getIntPtrConstant(0, true), InGlue, DL);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
+ DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
InGlue = Chain.getValue(1);
// Now extract the return values. This is more or less the same as
@@ -1289,7 +1290,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Get the high bits for i32 struct elements.
if (VA.getValVT() == MVT::i32 && VA.needsCustom())
RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, DL, MVT::i32));
// The callee promoted the return value, so insert an Assert?ext SDNode so
// we won't promote the value again in this function.
@@ -1831,7 +1832,7 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
// abs44.
SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
SparcMCExpr::VK_Sparc_M44, DAG);
- H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32));
+ H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
@@ -1840,7 +1841,7 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
// abs64.
SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
SparcMCExpr::VK_Sparc_HM, DAG);
- Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32));
+ Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
SparcMCExpr::VK_Sparc_LO, DAG);
return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
@@ -1895,7 +1896,7 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue Chain = DAG.getEntryNode();
SDValue InFlag;
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, true), DL);
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, DL, true), DL);
Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
InFlag = Chain.getValue(1);
SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
@@ -1914,8 +1915,8 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
Ops.push_back(InFlag);
Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, true),
- DAG.getIntPtrConstant(0, true), InFlag, DL);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
+ DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
InFlag = Chain.getValue(1);
SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
@@ -2098,54 +2099,54 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
switch(SPCC) {
default: {
- SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
SPCC = SPCC::ICC_NE;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_UL : {
- SDValue Mask = DAG.getTargetConstant(1, Result.getValueType());
+ SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
- SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
SPCC = SPCC::ICC_NE;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_ULE: {
- SDValue RHS = DAG.getTargetConstant(2, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
SPCC = SPCC::ICC_NE;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_UG : {
- SDValue RHS = DAG.getTargetConstant(1, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
SPCC = SPCC::ICC_G;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_UGE: {
- SDValue RHS = DAG.getTargetConstant(1, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
SPCC = SPCC::ICC_NE;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_U : {
- SDValue RHS = DAG.getTargetConstant(3, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
SPCC = SPCC::ICC_E;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_O : {
- SDValue RHS = DAG.getTargetConstant(3, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
SPCC = SPCC::ICC_NE;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_LG : {
- SDValue Mask = DAG.getTargetConstant(3, Result.getValueType());
+ SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
- SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
SPCC = SPCC::ICC_NE;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
case SPCC::FCC_UE : {
- SDValue Mask = DAG.getTargetConstant(3, Result.getValueType());
+ SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
- SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
SPCC = SPCC::ICC_E;
return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
}
@@ -2317,7 +2318,7 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
}
}
return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
- DAG.getConstant(SPCC, MVT::i32), CompareFlag);
+ DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
}
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
@@ -2353,7 +2354,7 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
}
}
return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
- DAG.getConstant(SPCC, MVT::i32), CompareFlag);
+ DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
@@ -2370,7 +2371,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
SDValue Offset =
DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(),
DAG.getRegister(SP::I6, TLI.getPointerTy()),
- DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset()));
+ DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
MachinePointerInfo(SV), false, false, 0);
@@ -2388,7 +2389,8 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
MachinePointerInfo(SV), false, false, false, 0);
// Increment the pointer, VAList, to the next vaarg.
SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
- DAG.getIntPtrConstant(VT.getSizeInBits()/8));
+ DAG.getIntPtrConstant(VT.getSizeInBits()/8,
+ DL));
// Store the incremented VAList to the legalized pointer.
InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr,
VAListPtr, MachinePointerInfo(SV), false, false, 0);
@@ -2417,7 +2419,7 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
regSpillArea += Subtarget->getStackPointerBias();
SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
- DAG.getConstant(regSpillArea, VT));
+ DAG.getConstant(regSpillArea, dl, VT));
SDValue Ops[2] = { NewVal, Chain };
return DAG.getMergeValues(Ops, dl);
}
@@ -2446,7 +2448,7 @@ static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
if (Subtarget->is64Bit())
FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
- DAG.getIntPtrConstant(stackBias));
+ DAG.getIntPtrConstant(stackBias, dl));
return FrameAddr;
}
@@ -2458,13 +2460,13 @@ static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
while (depth--) {
SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
- DAG.getIntPtrConstant(Offset));
+ DAG.getIntPtrConstant(Offset, dl));
FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(),
false, false, false, 0);
}
if (Subtarget->is64Bit())
FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
- DAG.getIntPtrConstant(stackBias));
+ DAG.getIntPtrConstant(stackBias, dl));
return FrameAddr;
}
@@ -2507,7 +2509,7 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
SDValue Ptr = DAG.getNode(ISD::ADD,
dl, VT,
FrameAddr,
- DAG.getIntPtrConstant(Offset));
+ DAG.getIntPtrConstant(Offset, dl));
RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr,
MachinePointerInfo(), false, false, false, 0);
@@ -2563,7 +2565,7 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
EVT addrVT = LdNode->getBasePtr().getValueType();
SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
LdNode->getBasePtr(),
- DAG.getConstant(8, addrVT));
+ DAG.getConstant(8, dl, addrVT));
SDValue Lo64 = DAG.getLoad(MVT::f64,
dl,
LdNode->getChain(),
@@ -2571,8 +2573,8 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
LdNode->getPointerInfo(),
false, false, false, alignment);
- SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32);
- SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32);
+ SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
+ SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
dl, MVT::f128);
@@ -2599,8 +2601,8 @@ static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF
&& "Unexpected node type");
- SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32);
- SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, MVT::i32);
+ SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
+ SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
dl,
@@ -2627,7 +2629,7 @@ static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
EVT addrVT = StNode->getBasePtr().getValueType();
SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
StNode->getBasePtr(),
- DAG.getConstant(8, addrVT));
+ DAG.getConstant(8, dl, addrVT));
OutChains[1] = DAG.getStore(StNode->getChain(),
dl,
SDValue(Lo64, 0),
@@ -2678,13 +2680,13 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
SDValue Src1 = Op.getOperand(0);
SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
SDValue Src2 = Op.getOperand(1);
SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
@@ -2711,7 +2713,7 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
SDValue Ops[2] = { Dst, Carry };
@@ -2735,7 +2737,7 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
if (LHS.getValueType() != VT)
return Op;
- SDValue ShiftAmt = DAG.getConstant(63, VT);
+ SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
SDValue RHS = Op.getOperand(1);
SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
@@ -2746,14 +2748,14 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
RTLIB::MUL_I128, WideVT,
Args, 4, isSigned, dl).first;
SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
- MulResult, DAG.getIntPtrConstant(0));
+ MulResult, DAG.getIntPtrConstant(0, dl));
SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
- MulResult, DAG.getIntPtrConstant(1));
+ MulResult, DAG.getIntPtrConstant(1, dl));
if (isSigned) {
SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
} else {
- TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, VT),
+ TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
ISD::SETNE);
}
// MulResult is a node with an illegal type. Because such things are not
@@ -3119,7 +3121,8 @@ LowerAsmOperandForConstraint(SDValue Op,
case 'I':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (isInt<13>(C->getSExtValue())) {
- Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
return;
diff --git a/llvm/lib/Target/Sparc/SparcInstr64Bit.td b/llvm/lib/Target/Sparc/SparcInstr64Bit.td
index 54d824064fb..0eb6f08c8f3 100644
--- a/llvm/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/llvm/lib/Target/Sparc/SparcInstr64Bit.td
@@ -63,7 +63,7 @@ defm SRAX : F3_S<"srax", 0b100111, 1, sra, i64, I64Regs>;
// The ALU instructions want their simm13 operands as i32 immediates.
def as_i32imm : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i32);
+ return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
}]>;
def : Pat<(i64 simm13:$val), (ORri (i64 G0), (as_i32imm $val))>;
def : Pat<(i64 SETHIimm:$val), (SETHIi (HI22 $val))>;
@@ -83,11 +83,12 @@ def nimm33 : PatLeaf<(imm), [{
// Bits 10-31 inverted. Same as assembler's %hix.
def HIX22 : SDNodeXForm<imm, [{
uint64_t Val = (~N->getZExtValue() >> 10) & ((1u << 22) - 1);
- return CurDAG->getTargetConstant(Val, MVT::i32);
+ return CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
}]>;
// Bits 0-9 with ones in bits 10-31. Same as assembler's %lox.
def LOX10 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~(~N->getZExtValue() & 0x3ff), MVT::i32);
+ return CurDAG->getTargetConstant(~(~N->getZExtValue() & 0x3ff), SDLoc(N),
+ MVT::i32);
}]>;
def : Pat<(i64 nimm33:$val), (XORri (SETHIi (HIX22 $val)), (LOX10 $val))>,
Requires<[Is64Bit]>;
@@ -121,12 +122,12 @@ def : Pat<(i64 nimm33:$val), (XORri (SETHIi (HIX22 $val)), (LOX10 $val))>,
// Bits 42-63, same as assembler's %hh.
def HH22 : SDNodeXForm<imm, [{
uint64_t Val = (N->getZExtValue() >> 42) & ((1u << 22) - 1);
- return CurDAG->getTargetConstant(Val, MVT::i32);
+ return CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
}]>;
// Bits 32-41, same as assembler's %hm.
def HM10 : SDNodeXForm<imm, [{
uint64_t Val = (N->getZExtValue() >> 32) & ((1u << 10) - 1);
- return CurDAG->getTargetConstant(Val, MVT::i32);
+ return CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
}]>;
def : Pat<(i64 imm:$val),
(ORrr (SLLXri (ORri (SETHIi (HH22 $val)), (HM10 $val)), (i32 32)),
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 17daecac9e0..c877758cfff 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -64,13 +64,14 @@ def simm11 : PatLeaf<(imm), [{ return isInt<11>(N->getSExtValue()); }]>;
def simm13 : PatLeaf<(imm), [{ return isInt<13>(N->getSExtValue()); }]>;
def LO10 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((unsigned)N->getZExtValue() & 1023,
+ return CurDAG->getTargetConstant((unsigned)N->getZExtValue() & 1023, SDLoc(N),
MVT::i32);
}]>;
def HI22 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
- return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, MVT::i32);
+ return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, SDLoc(N),
+ MVT::i32);
}]>;
def SETHIimm : PatLeaf<(imm), [{
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 1a58b531c03..80a98772db7 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -131,7 +131,7 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
// Used by SystemZOperands.td to create integer constants.
inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
- return CurDAG->getTargetConstant(Imm, Node->getValueType(0));
+ return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0));
}
const SystemZTargetMachine &getTargetMachine() const {
@@ -596,7 +596,7 @@ void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
}
// Lower the displacement to a TargetConstant.
- Disp = CurDAG->getTargetConstant(AM.Disp, VT);
+ Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(Base), VT);
}
void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
@@ -864,6 +864,7 @@ SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const {
}
SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
unsigned Count = 0;
@@ -889,7 +890,7 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
// Force the new mask into the DAG, since it may include known-one bits.
auto *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
if (MaskN->getZExtValue() != RISBG.Mask) {
- SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT);
+ SDValue NewMask = CurDAG->getConstant(RISBG.Mask, DL, VT);
N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
return SelectCode(N);
}
@@ -909,14 +910,14 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
RISBG.End &= 31;
}
SDValue Ops[5] = {
- getUNDEF(SDLoc(N), OpcodeVT),
- convertTo(SDLoc(N), OpcodeVT, RISBG.Input),
- CurDAG->getTargetConstant(RISBG.Start, MVT::i32),
- CurDAG->getTargetConstant(RISBG.End | 128, MVT::i32),
- CurDAG->getTargetConstant(RISBG.Rotate, MVT::i32)
+ getUNDEF(DL, OpcodeVT),
+ convertTo(DL, OpcodeVT, RISBG.Input),
+ CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32),
+ CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32),
+ CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32)
};
- N = CurDAG->getMachineNode(Opcode, SDLoc(N), OpcodeVT, Ops);
- return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
+ N = CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops);
+ return convertTo(DL, VT, SDValue(N, 0)).getNode();
}
SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
@@ -955,16 +956,17 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
Opcode = SystemZ::RISBGN;
}
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue Ops[5] = {
- convertTo(SDLoc(N), MVT::i64, Op0),
- convertTo(SDLoc(N), MVT::i64, RxSBG[I].Input),
- CurDAG->getTargetConstant(RxSBG[I].Start, MVT::i32),
- CurDAG->getTargetConstant(RxSBG[I].End, MVT::i32),
- CurDAG->getTargetConstant(RxSBG[I].Rotate, MVT::i32)
+ convertTo(DL, MVT::i64, Op0),
+ convertTo(DL, MVT::i64, RxSBG[I].Input),
+ CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32),
+ CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32),
+ CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32)
};
- N = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, Ops);
- return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
+ N = CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops);
+ return convertTo(DL, VT, SDValue(N, 0)).getNode();
}
SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
@@ -972,12 +974,12 @@ SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
uint64_t LowerVal) {
EVT VT = Node->getValueType(0);
SDLoc DL(Node);
- SDValue Upper = CurDAG->getConstant(UpperVal, VT);
+ SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT);
if (Op0.getNode())
Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
Upper = SDValue(Select(Upper.getNode()), 0);
- SDValue Lower = CurDAG->getConstant(LowerVal, VT);
+ SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT);
SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower);
return Or.getNode();
}
@@ -1111,7 +1113,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
uint64_t ConstCCMask =
cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
// Invert the condition.
- CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask,
+ CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, SDLoc(Node),
CCMask.getValueType());
SDValue Op4 = Node->getOperand(4);
Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4);
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 21882cb5cea..b1036a2e473 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -591,35 +591,35 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
case 'I': // Unsigned 8-bit constant
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<8>(C->getZExtValue()))
- Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
+ Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'J': // Unsigned 12-bit constant
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<12>(C->getZExtValue()))
- Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
+ Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'K': // Signed 16-bit constant
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<16>(C->getSExtValue()))
- Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
+ Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'L': // Signed 20-bit displacement (on all targets we support)
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<20>(C->getSExtValue()))
- Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
+ Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType()));
return;
case 'M': // 0x7fffffff
if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (C->getZExtValue() == 0x7fffffff)
- Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
+ Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType()));
return;
}
@@ -753,7 +753,8 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
EVT PtrVT = getPointerTy();
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
- FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4));
+ FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
+ DAG.getIntPtrConstant(4, DL));
ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
false, false, false, 0);
@@ -854,7 +855,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Mark the start of the call.
if (!IsTailCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true),
+ Chain = DAG.getCALLSEQ_START(Chain,
+ DAG.getConstant(NumBytes, DL, PtrVT, true),
DL);
// Copy argument values to their designated locations.
@@ -890,7 +892,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
Offset += 4;
SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
- DAG.getIntPtrConstant(Offset));
+ DAG.getIntPtrConstant(Offset, DL));
// Emit the store.
MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
@@ -956,8 +958,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Mark the end of the call, which is glued to the call itself.
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, PtrVT, true),
- DAG.getConstant(0, PtrVT, true),
+ DAG.getConstant(NumBytes, DL, PtrVT, true),
+ DAG.getConstant(0, DL, PtrVT, true),
Glue, DL);
Glue = Chain.getValue(1);
@@ -1179,7 +1181,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
-static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) {
+static void adjustZeroCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
if (C.ICmpType == SystemZICMP::UnsignedOnly)
return;
@@ -1193,13 +1195,13 @@ static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) {
(Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
(Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
- C.Op1 = DAG.getConstant(0, C.Op1.getValueType());
+ C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
}
}
// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
// adjust the operands as necessary.
-static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) {
+static void adjustSubwordCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
// For us to make any changes, it must a comparison between a single-use
// load and a constant.
if (!C.Op0.hasOneUse() ||
@@ -1264,7 +1266,7 @@ static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) {
// Make sure that the second operand is an i32 with the right value.
if (C.Op1.getValueType() != MVT::i32 ||
Value != ConstOp1->getZExtValue())
- C.Op1 = DAG.getConstant(Value, MVT::i32);
+ C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
}
// Return true if Op is either an unextended load, or a load suitable
@@ -1360,7 +1362,7 @@ static unsigned reverseCCMask(unsigned CCMask) {
// Check whether C tests for equality between X and Y and whether X - Y
// or Y - X is also computed. In that case it's better to compare the
// result of the subtraction against zero.
-static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) {
+static void adjustForSubtraction(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
C.CCMask == SystemZ::CCMASK_CMP_NE) {
for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
@@ -1369,7 +1371,7 @@ static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) {
((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
(N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
C.Op0 = SDValue(N, 0);
- C.Op1 = DAG.getConstant(0, N->getValueType(0));
+ C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
return;
}
}
@@ -1425,7 +1427,7 @@ static void adjustForLTGFR(Comparison &C) {
// If C compares the truncation of an extending load, try to compare
// the untruncated value instead. This exposes more opportunities to
// reuse CC.
-static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) {
+static void adjustICmpTruncate(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
if (C.Op0.getOpcode() == ISD::TRUNCATE &&
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
@@ -1437,7 +1439,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) {
if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
(Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
C.Op0 = C.Op0.getOperand(0);
- C.Op1 = DAG.getConstant(0, C.Op0.getValueType());
+ C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
}
}
}
@@ -1556,7 +1558,7 @@ static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
// See whether C can be implemented as a TEST UNDER MASK instruction.
// Update the arguments with the TM version if so.
-static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) {
+static void adjustForTestUnderMask(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
// Check that we have a comparison with a constant.
auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
if (!ConstOp1)
@@ -1634,7 +1636,7 @@ static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) {
if (Mask && Mask->getZExtValue() == MaskVal)
C.Op1 = SDValue(Mask, 0);
else
- C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType());
+ C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
C.CCValid = SystemZ::CCMASK_TM;
C.CCMask = NewCCMask;
}
@@ -1677,7 +1679,7 @@ static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
- ISD::CondCode Cond) {
+ ISD::CondCode Cond, SDLoc DL) {
if (CmpOp1.getOpcode() == ISD::Constant) {
uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
unsigned Opcode, CCValid;
@@ -1709,11 +1711,11 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
else
C.ICmpType = SystemZICMP::SignedOnly;
C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
- adjustZeroCmp(DAG, C);
- adjustSubwordCmp(DAG, C);
- adjustForSubtraction(DAG, C);
+ adjustZeroCmp(DAG, DL, C);
+ adjustSubwordCmp(DAG, DL, C);
+ adjustForSubtraction(DAG, DL, C);
adjustForLTGFR(C);
- adjustICmpTruncate(DAG, C);
+ adjustICmpTruncate(DAG, DL, C);
}
if (shouldSwapCmpOperands(C)) {
@@ -1721,7 +1723,7 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
C.CCMask = reverseCCMask(C.CCMask);
}
- adjustForTestUnderMask(DAG, C);
+ adjustForTestUnderMask(DAG, DL, C);
return C;
}
@@ -1740,12 +1742,12 @@ static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
}
if (C.Opcode == SystemZISD::ICMP)
return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
- DAG.getConstant(C.ICmpType, MVT::i32));
+ DAG.getConstant(C.ICmpType, DL, MVT::i32));
if (C.Opcode == SystemZISD::TM) {
bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
- DAG.getConstant(RegisterOnly, MVT::i32));
+ DAG.getConstant(RegisterOnly, DL, MVT::i32));
}
return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
}
@@ -1759,7 +1761,8 @@ static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
- Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64));
+ Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
+ DAG.getConstant(32, DL, MVT::i64));
Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
}
@@ -1791,18 +1794,18 @@ static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue,
if (Conversion.XORValue)
Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
- DAG.getConstant(Conversion.XORValue, MVT::i32));
+ DAG.getConstant(Conversion.XORValue, DL, MVT::i32));
if (Conversion.AddValue)
Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
- DAG.getConstant(Conversion.AddValue, MVT::i32));
+ DAG.getConstant(Conversion.AddValue, DL, MVT::i32));
// The SHR/AND sequence should get optimized to an RISBG.
Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
- DAG.getConstant(Conversion.Bit, MVT::i32));
+ DAG.getConstant(Conversion.Bit, DL, MVT::i32));
if (Conversion.Bit != 31)
Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, DL, MVT::i32));
return Result;
}
@@ -1813,7 +1816,7 @@ SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDLoc DL(Op);
- Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
SDValue Glue = emitCmp(DAG, DL, C);
return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
}
@@ -1825,11 +1828,11 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(4);
SDLoc DL(Op);
- Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
SDValue Glue = emitCmp(DAG, DL, C);
return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
- Op.getOperand(0), DAG.getConstant(C.CCValid, MVT::i32),
- DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue);
+ Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
+ DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue);
}
// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
@@ -1850,7 +1853,7 @@ static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op,
Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
if (IsNegative)
Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
- DAG.getConstant(0, Op.getValueType()), Op);
+ DAG.getConstant(0, DL, Op.getValueType()), Op);
return Op;
}
@@ -1863,7 +1866,7 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDLoc DL(Op);
- Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
// Check for absolute and negative-absolute selections, including those
// where the comparison value is sign-extended (for LPGFR and LNGFR).
@@ -1898,14 +1901,14 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
if (!is32Bit(VT))
Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
// Sign-extend from the low bit.
- SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32);
+ SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32);
SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
}
}
- SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, MVT::i32),
- DAG.getConstant(C.CCMask, MVT::i32), Glue};
+ SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
+ DAG.getConstant(C.CCMask, DL, MVT::i32), Glue};
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
@@ -1945,7 +1948,7 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
// addition for it.
if (Offset != 0)
Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
- DAG.getConstant(Offset, PtrVT));
+ DAG.getConstant(Offset, DL, PtrVT));
return Result;
}
@@ -2006,17 +2009,17 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
// The high part of the thread pointer is in access register 0.
SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
// The low part of the thread pointer is in access register 1.
SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, DL, MVT::i32));
TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
// Merge them into a single 64-bit address.
SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
- DAG.getConstant(32, PtrVT));
+ DAG.getConstant(32, DL, PtrVT));
SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
// Get the offset of GA from the thread pointer, based on the TLS model.
@@ -2153,7 +2156,7 @@ SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
} else {
In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, DL, MVT::i64));
}
SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
@@ -2168,7 +2171,7 @@ SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
MVT::i32, Out64);
SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, DL, MVT::i64));
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
}
llvm_unreachable("Unexpected bitcast combination");
@@ -2189,8 +2192,8 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
// The initial values of each field.
const unsigned NumFields = 4;
SDValue Fields[NumFields] = {
- DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT),
- DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT),
+ DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
+ DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
};
@@ -2202,7 +2205,7 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
SDValue FieldAddr = Addr;
if (Offset != 0)
FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
- DAG.getIntPtrConstant(Offset));
+ DAG.getIntPtrConstant(Offset, DL));
MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
MachinePointerInfo(SV, Offset),
false, false, 0);
@@ -2220,7 +2223,7 @@ SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
SDLoc DL(Op);
- return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32),
+ return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
/*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
/*isTailCall*/false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
@@ -2277,7 +2280,7 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
// multiplication:
//
// (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
- SDValue C63 = DAG.getConstant(63, MVT::i64);
+ SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
SDValue LL = Op.getOperand(0);
SDValue RL = Op.getOperand(1);
SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
@@ -2427,7 +2430,7 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
DAG.computeKnownBits(Op, KnownZero, KnownOne);
unsigned NumSignificantBits = (~KnownZero).getActiveBits();
if (NumSignificantBits == 0)
- return DAG.getConstant(0, VT);
+ return DAG.getConstant(0, DL, VT);
// Skip known-zero high parts of the operand.
int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
@@ -2441,16 +2444,17 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
// Add up per-byte counts in a binary tree. All bits of Op at
// position larger than BitSize remain zero throughout.
for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
- SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, VT));
+ SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
if (BitSize != OrigBitSize)
Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
- DAG.getConstant(((uint64_t)1 << BitSize) - 1, VT));
+ DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
}
// Extract overall result from high byte.
if (BitSize > 8)
- Op = DAG.getNode(ISD::SRL, DL, VT, Op, DAG.getConstant(BitSize - 8, VT));
+ Op = DAG.getNode(ISD::SRL, DL, VT, Op,
+ DAG.getConstant(BitSize - 8, DL, VT));
return Op;
}
@@ -2501,23 +2505,23 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
Opcode = SystemZISD::ATOMIC_LOADW_ADD;
- Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
+ Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
}
// Get the address of the containing word.
SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
- DAG.getConstant(-4, PtrVT));
+ DAG.getConstant(-4, DL, PtrVT));
// Get the number of bits that the word must be rotated left in order
// to bring the field to the top bits of a GR32.
SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
- DAG.getConstant(3, PtrVT));
+ DAG.getConstant(3, DL, PtrVT));
BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
// Get the complementing shift amount, for rotating a field in the top
// bits back to its proper position.
SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
- DAG.getConstant(0, WideVT), BitShift);
+ DAG.getConstant(0, DL, WideVT), BitShift);
// Extend the source operand to 32 bits and prepare it for the inner loop.
// ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
@@ -2526,23 +2530,23 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
// bits must be set, while for other opcodes they should be left clear.
if (Opcode != SystemZISD::ATOMIC_SWAPW)
Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
- DAG.getConstant(32 - BitSize, WideVT));
+ DAG.getConstant(32 - BitSize, DL, WideVT));
if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
Opcode == SystemZISD::ATOMIC_LOADW_NAND)
Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
- DAG.getConstant(uint32_t(-1) >> BitSize, WideVT));
+ DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
// Construct the ATOMIC_LOADW_* node.
SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
- DAG.getConstant(BitSize, WideVT) };
+ DAG.getConstant(BitSize, DL, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
NarrowVT, MMO);
// Rotate the result of the final CS so that the field is in the lower
// bits of a GR32, then truncate it.
SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
- DAG.getConstant(BitSize, WideVT));
+ DAG.getConstant(BitSize, DL, WideVT));
SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
@@ -2568,10 +2572,10 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
// available or the negative value is in the range of A(G)FHI.
int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
- NegSrc2 = DAG.getConstant(Value, MemVT);
+ NegSrc2 = DAG.getConstant(Value, DL, MemVT);
} else if (Subtarget.hasInterlockedAccess1())
// Use LAA(G) if available.
- NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT),
+ NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
Src2);
if (NegSrc2.getNode())
@@ -2610,23 +2614,23 @@ SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
// Get the address of the containing word.
SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
- DAG.getConstant(-4, PtrVT));
+ DAG.getConstant(-4, DL, PtrVT));
// Get the number of bits that the word must be rotated left in order
// to bring the field to the top bits of a GR32.
SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
- DAG.getConstant(3, PtrVT));
+ DAG.getConstant(3, DL, PtrVT));
BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
// Get the complementing shift amount, for rotating a field in the top
// bits back to its proper position.
SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
- DAG.getConstant(0, WideVT), BitShift);
+ DAG.getConstant(0, DL, WideVT), BitShift);
// Construct the ATOMIC_CMP_SWAPW node.
SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
- NegBitShift, DAG.getConstant(BitSize, WideVT) };
+ NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
VTList, Ops, NarrowVT, MMO);
return AtomicOp;
@@ -2655,15 +2659,16 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
// Just preserve the chain.
return Op.getOperand(0);
+ SDLoc DL(Op);
bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
SDValue Ops[] = {
Op.getOperand(0),
- DAG.getConstant(Code, MVT::i32),
+ DAG.getConstant(Code, DL, MVT::i32),
Op.getOperand(1)
};
- return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
+ return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
Node->getVTList(), Ops,
Node->getMemoryVT(), Node->getMemOperand());
}
@@ -2671,10 +2676,11 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
// Return an i32 that contains the value of CC immediately after After,
// whose final operand must be MVT::Glue.
static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) {
+ SDLoc DL(After);
SDValue Glue = SDValue(After, After->getNumValues() - 1);
- SDValue IPM = DAG.getNode(SystemZISD::IPM, SDLoc(After), MVT::i32, Glue);
- return DAG.getNode(ISD::SRL, SDLoc(After), MVT::i32, IPM,
- DAG.getConstant(SystemZ::IPM_CC, MVT::i32));
+ SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
+ return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
+ DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
}
SDValue
@@ -2851,9 +2857,10 @@ SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
Inner.getOperand(0));
SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
- DAG.getConstant(NewShlAmt, ShiftVT));
+ DAG.getConstant(NewShlAmt, SDLoc(Inner),
+ ShiftVT));
return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
- DAG.getConstant(NewSraAmt, ShiftVT));
+ DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
}
}
}
diff --git a/llvm/lib/Target/SystemZ/SystemZOperands.td b/llvm/lib/Target/SystemZ/SystemZOperands.td
index 1b5b7d7bf32..94a682d1465 100644
--- a/llvm/lib/Target/SystemZ/SystemZOperands.td
+++ b/llvm/lib/Target/SystemZ/SystemZOperands.td
@@ -134,72 +134,79 @@ class BDLMode<string type, string bitsize, string dispsize, string suffix,
// Bits 0-15 (counting from the lsb).
def LL16 : SDNodeXForm<imm, [{
uint64_t Value = N->getZExtValue() & 0x000000000000FFFFULL;
- return CurDAG->getTargetConstant(Value, MVT::i64);
+ return CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i64);
}]>;
// Bits 16-31 (counting from the lsb).
def LH16 : SDNodeXForm<imm, [{
uint64_t Value = (N->getZExtValue() & 0x00000000FFFF0000ULL) >> 16;
- return CurDAG->getTargetConstant(Value, MVT::i64);
+ return CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i64);
}]>;
// Bits 32-47 (counting from the lsb).
def HL16 : SDNodeXForm<imm, [{
uint64_t Value = (N->getZExtValue() & 0x0000FFFF00000000ULL) >> 32;
- return CurDAG->getTargetConstant(Value, MVT::i64);
+ return CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i64);
}]>;
// Bits 48-63 (counting from the lsb).
def HH16 : SDNodeXForm<imm, [{
uint64_t Value = (N->getZExtValue() & 0xFFFF000000000000ULL) >> 48;
- return CurDAG->getTargetConstant(Value, MVT::i64);
+ return CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i64);
}]>;
// Low 32 bits.
def LF32 : SDNodeXForm<imm, [{
uint64_t Value = N->getZExtValue() & 0x00000000FFFFFFFFULL;
- return CurDAG->getTargetConstant(Value, MVT::i64);
+ return CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i64);
}]>;
// High 32 bits.
def HF32 : SDNodeXForm<imm, [{
uint64_t Value = N->getZExtValue() >> 32;
- return CurDAG->getTargetConstant(Value, MVT::i64);
+ return CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i64);
}]>;
// Truncate an immediate to a 8-bit signed quantity.
def SIMM8 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(int8_t(N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(int8_t(N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
// Truncate an immediate to a 8-bit unsigned quantity.
def UIMM8 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(uint8_t(N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(uint8_t(N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
// Truncate an immediate to a 16-bit signed quantity.
def SIMM16 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(int16_t(N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(int16_t(N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
// Truncate an immediate to a 16-bit unsigned quantity.
def UIMM16 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(uint16_t(N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(uint16_t(N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
// Truncate an immediate to a 32-bit signed quantity.
def SIMM32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(int32_t(N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(int32_t(N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
// Truncate an immediate to a 32-bit unsigned quantity.
def UIMM32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(uint32_t(N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(uint32_t(N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
// Negate and then truncate an immediate to a 32-bit unsigned quantity.
def NEGIMM32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(uint32_t(-N->getZExtValue()), MVT::i64);
+ return CurDAG->getTargetConstant(uint32_t(-N->getZExtValue()), SDLoc(N),
+ MVT::i64);
}]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index 12fc1981d7d..e7e0268dbb8 100644
--- a/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -46,10 +46,10 @@ static SDValue emitMemMem(SelectionDAG &DAG, SDLoc DL, unsigned Sequence,
// number of straight-line MVCs as 6 * 256 - 1.
if (Size > 6 * 256)
return DAG.getNode(Loop, DL, MVT::Other, Chain, Dst, Src,
- DAG.getConstant(Size, PtrVT),
- DAG.getConstant(Size / 256, PtrVT));
+ DAG.getConstant(Size, DL, PtrVT),
+ DAG.getConstant(Size / 256, DL, PtrVT));
return DAG.getNode(Sequence, DL, MVT::Other, Chain, Dst, Src,
- DAG.getConstant(Size, PtrVT));
+ DAG.getConstant(Size, DL, PtrVT));
}
SDValue SystemZSelectionDAGInfo::
@@ -78,7 +78,8 @@ static SDValue memsetStore(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
for (unsigned I = 1; I < Size; ++I)
StoreVal |= ByteVal << (I * 8);
return DAG.getStore(Chain, DL,
- DAG.getConstant(StoreVal, MVT::getIntegerVT(Size * 8)),
+ DAG.getConstant(StoreVal, DL,
+ MVT::getIntegerVT(Size * 8)),
Dst, DstPtrInfo, false, false, Align);
}
@@ -112,7 +113,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
if (Size2 == 0)
return Chain1;
Dst = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
- DAG.getConstant(Size1, PtrVT));
+ DAG.getConstant(Size1, DL, PtrVT));
DstPtrInfo = DstPtrInfo.getWithOffset(Size1);
SDValue Chain2 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size2,
std::min(Align, Size1), DstPtrInfo);
@@ -126,7 +127,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
if (Bytes == 1)
return Chain1;
SDValue Dst2 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
- DAG.getConstant(1, PtrVT));
+ DAG.getConstant(1, DL, PtrVT));
SDValue Chain2 = DAG.getStore(Chain, DL, Byte, Dst2,
DstPtrInfo.getWithOffset(1),
false, false, 1);
@@ -146,7 +147,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
false, false, Align);
SDValue DstPlus1 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
- DAG.getConstant(1, PtrVT));
+ DAG.getConstant(1, DL, PtrVT));
return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
Chain, DstPlus1, Dst, Bytes - 1);
}
@@ -169,10 +170,10 @@ static SDValue emitCLC(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// needs 2 branches, whereas a straight-line sequence would need 3 or more.
if (Size > 3 * 256)
return DAG.getNode(SystemZISD::CLC_LOOP, DL, VTs, Chain, Src1, Src2,
- DAG.getConstant(Size, PtrVT),
- DAG.getConstant(Size / 256, PtrVT));
+ DAG.getConstant(Size, DL, PtrVT),
+ DAG.getConstant(Size / 256, DL, PtrVT));
return DAG.getNode(SystemZISD::CLC, DL, VTs, Chain, Src1, Src2,
- DAG.getConstant(Size, PtrVT));
+ DAG.getConstant(Size, DL, PtrVT));
}
// Convert the current CC value into an integer that is 0 if CC == 0,
@@ -182,9 +183,9 @@ static SDValue emitCLC(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
static SDValue addIPMSequence(SDLoc DL, SDValue Glue, SelectionDAG &DAG) {
SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
- DAG.getConstant(SystemZ::IPM_CC, MVT::i32));
+ DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
SDValue ROTL = DAG.getNode(ISD::ROTL, DL, MVT::i32, SRL,
- DAG.getConstant(31, MVT::i32));
+ DAG.getConstant(31, DL, MVT::i32));
return ROTL;
}
@@ -213,7 +214,7 @@ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
Length = DAG.getZExtOrTrunc(Length, DL, PtrVT);
Char = DAG.getZExtOrTrunc(Char, DL, MVT::i32);
Char = DAG.getNode(ISD::AND, DL, MVT::i32, Char,
- DAG.getConstant(255, MVT::i32));
+ DAG.getConstant(255, DL, MVT::i32));
SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, Length);
SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
Limit, Src, Char);
@@ -222,9 +223,10 @@ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// Now select between End and null, depending on whether the character
// was found.
- SDValue Ops[] = {End, DAG.getConstant(0, PtrVT),
- DAG.getConstant(SystemZ::CCMASK_SRST, MVT::i32),
- DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32), Glue};
+ SDValue Ops[] = {End, DAG.getConstant(0, DL, PtrVT),
+ DAG.getConstant(SystemZ::CCMASK_SRST, DL, MVT::i32),
+ DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, DL, MVT::i32),
+ Glue};
VTs = DAG.getVTList(PtrVT, MVT::Glue);
End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
return std::make_pair(End, Chain);
@@ -237,7 +239,7 @@ EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
SDVTList VTs = DAG.getVTList(Dest.getValueType(), MVT::Other);
SDValue EndDest = DAG.getNode(SystemZISD::STPCPY, DL, VTs, Chain, Dest, Src,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
return std::make_pair(isStpcpy ? EndDest : Dest, EndDest.getValue(1));
}
@@ -248,7 +250,7 @@ EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
MachinePointerInfo Op2PtrInfo) const {
SDVTList VTs = DAG.getVTList(Src1.getValueType(), MVT::Other, MVT::Glue);
SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src1, Src2,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
Chain = Unused.getValue(1);
SDValue Glue = Chain.getValue(2);
return std::make_pair(addIPMSequence(DL, Glue, DAG), Chain);
@@ -265,7 +267,7 @@ static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG, SDLoc DL,
EVT PtrVT = Src.getValueType();
SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other, MVT::Glue);
SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
- Limit, Src, DAG.getConstant(0, MVT::i32));
+ Limit, Src, DAG.getConstant(0, DL, MVT::i32));
Chain = End.getValue(1);
SDValue Len = DAG.getNode(ISD::SUB, DL, PtrVT, End, Src);
return std::make_pair(Len, Chain);
@@ -275,7 +277,7 @@ std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, MachinePointerInfo SrcPtrInfo) const {
EVT PtrVT = Src.getValueType();
- return getBoundedStrlen(DAG, DL, Chain, Src, DAG.getConstant(0, PtrVT));
+ return getBoundedStrlen(DAG, DL, Chain, Src, DAG.getConstant(0, DL, PtrVT));
}
std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 5da7acfc6ef..0219430f812 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -233,14 +233,15 @@ namespace {
void EmitSpecialCodeForMain();
- inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
- SDValue &Scale, SDValue &Index,
- SDValue &Disp, SDValue &Segment) {
+ inline void getAddressOperands(X86ISelAddressMode &AM, SDLoc DL,
+ SDValue &Base, SDValue &Scale,
+ SDValue &Index, SDValue &Disp,
+ SDValue &Segment) {
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
? CurDAG->getTargetFrameIndex(AM.Base_FrameIndex,
TLI->getPointerTy())
: AM.Base_Reg;
- Scale = getI8Imm(AM.Scale);
+ Scale = getI8Imm(AM.Scale, DL);
Index = AM.IndexReg;
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
@@ -261,7 +262,7 @@ namespace {
Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
AM.SymbolFlags);
else
- Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
+ Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
if (AM.Segment.getNode())
Segment = AM.Segment;
@@ -271,14 +272,14 @@ namespace {
/// getI8Imm - Return a target constant with the specified value, of type
/// i8.
- inline SDValue getI8Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i8);
+ inline SDValue getI8Imm(unsigned Imm, SDLoc DL) {
+ return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
}
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
+ return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
}
/// getGlobalBaseReg - Return an SDNode that returns the value of
@@ -801,11 +802,11 @@ static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
MVT VT = N.getSimpleValueType();
SDLoc DL(N);
- SDValue Eight = DAG.getConstant(8, MVT::i8);
- SDValue NewMask = DAG.getConstant(0xff, VT);
+ SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
+ SDValue NewMask = DAG.getConstant(0xff, DL, VT);
SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
- SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
+ SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
// Insert the new nodes into the topological ordering. We must do this in
@@ -849,7 +850,7 @@ static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
MVT VT = N.getSimpleValueType();
SDLoc DL(N);
- SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
+ SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
@@ -955,9 +956,9 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
X = NewX;
}
SDLoc DL(N);
- SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
+ SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
- SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
+ SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
// Insert the new nodes into the topological ordering. We must do this in
@@ -1198,7 +1199,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
}
// Ok, the transformation is legal and appears profitable. Go for it.
- SDValue Zero = CurDAG->getConstant(0, N.getValueType());
+ SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType());
SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
AM.IndexReg = Neg;
AM.Scale = 1;
@@ -1357,7 +1358,7 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
if (!AM.IndexReg.getNode())
AM.IndexReg = CurDAG->getRegister(0, VT);
- getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
+ getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1413,7 +1414,7 @@ bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) {
if ((uint32_t)ImmVal != (uint64_t)ImmVal)
return false;
- Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64);
+ Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
return true;
}
@@ -1449,9 +1450,9 @@ bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base,
// Base could already be %rip, particularly in the x32 ABI.
Base = SDValue(CurDAG->getMachineNode(
TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64),
+ CurDAG->getTargetConstant(0, DL, MVT::i64),
Base,
- CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
0);
}
@@ -1463,9 +1464,10 @@ bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base,
"Expect to be extending 32-bit registers for use in LEA");
Index = SDValue(CurDAG->getMachineNode(
TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64),
+ CurDAG->getTargetConstant(0, DL, MVT::i64),
Index,
- CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ CurDAG->getTargetConstant(X86::sub_32bit, DL,
+ MVT::i32)),
0);
}
@@ -1531,7 +1533,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
if (Complexity <= 2)
return false;
- getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
+ getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1555,7 +1557,7 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
}
- getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
+ getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1725,7 +1727,7 @@ static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
// an immediate operand to sub. However, it still fits in 32 bits for the
// add (since it is not negated) so we can return target-constant.
if (CNVal == INT32_MIN)
- return CurDAG->getTargetConstant(CNVal, NVT);
+ return CurDAG->getTargetConstant(CNVal, dl, NVT);
// For atomic-load-add, we could do some optimizations.
if (Op == ADD) {
// Translate to INC/DEC if ADD by 1 or -1.
@@ -1740,7 +1742,7 @@ static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
CNVal = -CNVal;
}
}
- return CurDAG->getTargetConstant(CNVal, NVT);
+ return CurDAG->getTargetConstant(CNVal, dl, NVT);
}
// If the value operand is single-used, try to optimize it.
@@ -2053,12 +2055,14 @@ SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
MVT::Other);
+ SDLoc DL(Node);
+
// Memory Operands: Base, Scale, Index, Disp, Segment
- SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
SDValue Segment = CurDAG->getRegister(0, MVT::i32);
- const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
+ const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue(), DL), VIdx,
Disp, Segment, VMask, Chain};
- SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops);
+ SDNode *ResNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
// Node has 2 outputs: VDst and MVT::Other.
// ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
// We replace VDst of Node with VDst of ResNode, and Other of Node with Other
@@ -2232,13 +2236,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Emit the smaller op and the shift.
- SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
+ SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
if (ShlVal == 1)
return CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
SDValue(New, 0));
return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
- getI8Imm(ShlVal));
+ getI8Imm(ShlVal, dl));
}
case X86ISD::UMUL8:
case X86ISD::SMUL8: {
@@ -2402,7 +2406,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Shift AX down 8 bits.
Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
Result,
- CurDAG->getTargetConstant(8, MVT::i8)), 0);
+ CurDAG->getTargetConstant(8, dl, MVT::i8)),
+ 0);
// Then truncate it down to i8.
ReplaceUses(SDValue(Node, 1),
CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
@@ -2522,7 +2527,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ClrNode =
SDValue(CurDAG->getMachineNode(
TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
- CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)),
+ CurDAG->getTargetConstant(X86::sub_16bit, dl,
+ MVT::i32)),
0);
break;
case MVT::i32:
@@ -2531,8 +2537,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ClrNode =
SDValue(CurDAG->getMachineNode(
TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64), ClrNode,
- CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
+ CurDAG->getTargetConstant(X86::sub_32bit, dl,
+ MVT::i32)),
0);
break;
default:
@@ -2584,8 +2591,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
Result =
SDValue(CurDAG->getMachineNode(
TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64), Result,
- CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)),
+ CurDAG->getTargetConstant(0, dl, MVT::i64), Result,
+ CurDAG->getTargetConstant(X86::sub_32bit, dl,
+ MVT::i32)),
0);
}
} else {
@@ -2642,7 +2650,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
(!(C->getZExtValue() & 0x80) ||
HasNoSignedComparisonUses(Node))) {
- SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
+ SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl, MVT::i8);
SDValue Reg = N0.getNode()->getOperand(0);
// On x86-32, only the ABCD registers have 8-bit subregisters.
@@ -2653,7 +2661,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
default: llvm_unreachable("Unsupported TEST operand type!");
}
- SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
+ SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
Reg.getValueType(), Reg, RC), 0);
}
@@ -2678,7 +2686,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
HasNoSignedComparisonUses(Node))) {
// Shift the immediate right by 8 bits.
SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
- MVT::i8);
+ dl, MVT::i8);
SDValue Reg = N0.getNode()->getOperand(0);
// Put the value in an ABCD register.
@@ -2689,7 +2697,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
default: llvm_unreachable("Unsupported TEST operand type!");
}
- SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
+ SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
Reg.getValueType(), Reg, RC), 0);
@@ -2714,7 +2722,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
N0.getValueType() != MVT::i16 &&
(!(C->getZExtValue() & 0x8000) ||
HasNoSignedComparisonUses(Node))) {
- SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
+ SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
+ MVT::i16);
SDValue Reg = N0.getNode()->getOperand(0);
// Extract the 16-bit subregister.
@@ -2736,7 +2745,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
N0.getValueType() == MVT::i64 &&
(!(C->getZExtValue() & 0x80000000) ||
HasNoSignedComparisonUses(Node))) {
- SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
+ MVT::i32);
SDValue Reg = N0.getNode()->getOperand(0);
// Extract the 32-bit subregister.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 079880ef4a1..cf3165e7513 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1892,7 +1892,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
SmallVector<SDValue, 6> RetOps;
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
// Operand #1 = Bytes To Pop
- RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
+ RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
MVT::i16));
// Copy the result values into the output registers.
@@ -2095,7 +2095,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
if (CopyVT != VA.getValVT())
Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
// This truncation won't change the value.
- DAG.getIntPtrConstant(1));
+ DAG.getIntPtrConstant(1, dl));
InFlag = Chain.getValue(2);
InVals.push_back(Val);
@@ -2155,7 +2155,7 @@ static SDValue
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
SDLoc dl) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true,
@@ -2503,7 +2503,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Offset = FuncInfo->getVarArgsGPOffset();
for (SDValue Val : LiveGPRs) {
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
- DAG.getIntPtrConstant(Offset));
+ DAG.getIntPtrConstant(Offset, dl));
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo::getFixedStack(
@@ -2519,9 +2519,9 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SaveXMMOps.push_back(Chain);
SaveXMMOps.push_back(ALVal);
SaveXMMOps.push_back(DAG.getIntPtrConstant(
- FuncInfo->getRegSaveFrameIndex()));
+ FuncInfo->getRegSaveFrameIndex(), dl));
SaveXMMOps.push_back(DAG.getIntPtrConstant(
- FuncInfo->getVarArgsFPOffset()));
+ FuncInfo->getVarArgsFPOffset(), dl));
SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
LiveXMMRegs.end());
MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
@@ -2621,7 +2621,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
int UnwindHelpFI = MFI->CreateStackObject(8, 8, /*isSS=*/false);
SDValue StackSlot = DAG.getFrameIndex(UnwindHelpFI, MVT::i64);
MMI.getWinEHFuncInfo(MF.getFunction()).UnwindHelpFrameIdx = UnwindHelpFI;
- SDValue Neg2 = DAG.getConstant(-2, MVT::i64);
+ SDValue Neg2 = DAG.getConstant(-2, dl, MVT::i64);
Chain = DAG.getStore(Chain, dl, Neg2, StackSlot,
MachinePointerInfo::getFixedStack(UnwindHelpFI),
/*isVolatile=*/true,
@@ -2638,7 +2638,7 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
if (Flags.isByVal())
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
@@ -2784,7 +2784,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!IsSibcall)
Chain = DAG.getCALLSEQ_START(
- Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
+ Chain, DAG.getIntPtrConstant(NumBytesToPush, dl, true), dl);
SDValue RetAddrFrIdx;
// Load return address for tail calls.
@@ -2916,7 +2916,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
&& "SSE registers cannot be used when SSE is disabled");
RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
- DAG.getConstant(NumXMMRegs, MVT::i8)));
+ DAG.getConstant(NumXMMRegs, dl,
+ MVT::i8)));
}
if (isVarArg && IsMustTail) {
@@ -2960,7 +2961,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Flags.isByVal()) {
// Copy relative to framepointer.
- SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
+ SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl,
RegInfo->getStackRegister(),
@@ -3086,8 +3087,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!IsSibcall && isTailCall) {
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(NumBytesToPop, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ DAG.getIntPtrConstant(NumBytesToPop, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
@@ -3095,7 +3096,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(Callee);
if (isTailCall)
- Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
+ Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
// Add argument registers to the end of the list so that they are known live
// into the call.
@@ -3144,8 +3145,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Returns a flag for retval copy to use.
if (!IsSibcall) {
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(NumBytesToPop, true),
- DAG.getIntPtrConstant(NumBytesForCalleeToPop,
+ DAG.getIntPtrConstant(NumBytesToPop, dl, true),
+ DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
true),
InFlag, dl);
InFlag = Chain.getValue(1);
@@ -3532,7 +3533,8 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
case X86ISD::PSHUFLW:
case X86ISD::VPERMILPI:
case X86ISD::VPERMI:
- return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
+ return DAG.getNode(Opc, dl, VT, V1,
+ DAG.getConstant(TargetMask, dl, MVT::i8));
}
}
@@ -3642,13 +3644,13 @@ static bool isX86CCUnsigned(unsigned X86CC) {
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
/// specific condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
-static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
+static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, SDLoc DL, bool isFP,
SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
if (!isFP) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
// X > -1 -> X == 0, jump !sign.
- RHS = DAG.getConstant(0, RHS.getValueType());
+ RHS = DAG.getConstant(0, DL, RHS.getValueType());
return X86::COND_NS;
}
if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
@@ -3657,7 +3659,7 @@ static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
}
if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
// X < 1 -> X <= 0
- RHS = DAG.getConstant(0, RHS.getValueType());
+ RHS = DAG.getConstant(0, DL, RHS.getValueType());
return X86::COND_LE;
}
}
@@ -3960,26 +3962,26 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
SDValue Vec;
if (VT.is128BitVector()) { // SSE
if (Subtarget->hasSSE2()) { // SSE2
- SDValue Cst = DAG.getConstant(0, MVT::i32);
+ SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
} else { // SSE1
- SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
+ SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
}
} else if (VT.is256BitVector()) { // AVX
if (Subtarget->hasInt256()) { // AVX2
- SDValue Cst = DAG.getConstant(0, MVT::i32);
+ SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
} else {
// 256-bit logic and arithmetic instructions in AVX are all
// floating-point, no support for integer ops. Emit fp zeroed vectors.
- SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
+ SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
}
} else if (VT.is512BitVector()) { // AVX-512
- SDValue Cst = DAG.getConstant(0, MVT::i32);
+ SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
@@ -3989,7 +3991,7 @@ static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
&& "Unexpected vector type");
assert((Subtarget->hasVLX() || VT.getVectorNumElements() >= 8)
&& "Unexpected vector type");
- SDValue Cst = DAG.getConstant(0, MVT::i1);
+ SDValue Cst = DAG.getConstant(0, dl, MVT::i1);
SmallVector<SDValue, 64> Ops(VT.getVectorNumElements(), Cst);
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
} else
@@ -4027,7 +4029,7 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
ElemsPerChunk));
- SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
+ SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal, dl);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
}
@@ -4071,7 +4073,7 @@ static SDValue InsertSubVector(SDValue Result, SDValue Vec,
unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
* ElemsPerChunk);
- SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
+ SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal, dl);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
}
@@ -4093,7 +4095,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
if (IdxVal == 0 && Result.getValueType().is256BitVector() &&
Result.getOpcode() != ISD::UNDEF) {
EVT ResultVT = Result.getValueType();
- SDValue ZeroIndex = DAG.getIntPtrConstant(0);
+ SDValue ZeroIndex = DAG.getIntPtrConstant(0, dl);
SDValue Undef = DAG.getUNDEF(ResultVT);
SDValue Vec256 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Undef,
Vec, ZeroIndex);
@@ -4105,7 +4107,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
unsigned ScalarSize = ScalarType.getSizeInBits();
assert((ScalarSize == 64 || ScalarSize == 32) && "Unknown float type");
unsigned MaskVal = (ScalarSize == 64) ? 0x03 : 0x0f;
- SDValue Mask = DAG.getConstant(MaskVal, MVT::i8);
+ SDValue Mask = DAG.getConstant(MaskVal, dl, MVT::i8);
return DAG.getNode(X86ISD::BLENDI, dl, ResultVT, Result, Vec256, Mask);
}
@@ -4121,7 +4123,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
// will be created by InsertSubVector().
MVT CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32;
- SDValue Mask = DAG.getConstant(0x0f, MVT::i8);
+ SDValue Mask = DAG.getConstant(0x0f, dl, MVT::i8);
Vec256 = DAG.getNode(ISD::BITCAST, dl, CastVT, Vec256);
Vec256 = DAG.getNode(X86ISD::BLENDI, dl, CastVT, Result, Vec256, Mask);
return DAG.getNode(ISD::BITCAST, dl, ResultVT, Vec256);
@@ -4162,7 +4164,7 @@ static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
SDLoc dl) {
assert(VT.isVector() && "Expected a vector type");
- SDValue Cst = DAG.getConstant(~0U, MVT::i32);
+ SDValue Cst = DAG.getConstant(~0U, dl, MVT::i32);
SDValue Vec;
if (VT.is256BitVector()) {
if (HasInt256) { // AVX2
@@ -4492,7 +4494,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
}
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
MVT::v16i8, V, Op.getOperand(i),
- DAG.getIntPtrConstant(i));
+ DAG.getIntPtrConstant(i, dl));
}
}
@@ -4520,7 +4522,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
if (ThisIsNonZero) {
ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
- ThisElt, DAG.getConstant(8, MVT::i8));
+ ThisElt, DAG.getConstant(8, dl, MVT::i8));
if (LastIsNonZero)
ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
} else
@@ -4528,7 +4530,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
if (ThisElt.getNode())
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
- DAG.getIntPtrConstant(i/2));
+ DAG.getIntPtrConstant(i/2, dl));
}
}
@@ -4560,7 +4562,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
}
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
MVT::v8i16, V, Op.getOperand(i),
- DAG.getIntPtrConstant(i));
+ DAG.getIntPtrConstant(i, dl));
}
}
@@ -4667,9 +4669,10 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
- SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
- DAG.getIntPtrConstant(InsertPSMask));
- return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
+ SDLoc DL(Op);
+ SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
+ DAG.getIntPtrConstant(InsertPSMask, DL));
+ return DAG.getNode(ISD::BITCAST, DL, VT, Result);
}
/// Return a vector logical shift node.
@@ -4682,7 +4685,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
assert(NumBits % 8 == 0 && "Only support byte sized shifts");
- SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
+ SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, ScalarShiftTy);
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}
@@ -4739,9 +4742,11 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
if ((Offset % RequiredAlign) & 3)
return SDValue();
int64_t StartOffset = Offset & ~(RequiredAlign-1);
- if (StartOffset)
- Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
- Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
+ if (StartOffset) {
+ SDLoc DL(Ptr);
+ Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
+ DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
+ }
int EltNo = (Offset - StartOffset) >> 2;
unsigned NumElems = VT.getVectorNumElements();
@@ -5134,7 +5139,7 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
unsigned Idx = InsertIndices[i];
NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
- DAG.getIntPtrConstant(Idx));
+ DAG.getIntPtrConstant(Idx, DL));
}
return NV;
@@ -5150,13 +5155,13 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
- SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
+ SDValue Cst = DAG.getTargetConstant(0, dl, MVT::i1);
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
if (ISD::isBuildVectorAllOnes(Op.getNode())) {
- SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
+ SDValue Cst = DAG.getTargetConstant(1, dl, MVT::i1);
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
@@ -5186,15 +5191,15 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
if (AllContants) {
SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
- DAG.getConstant(Immediate, MVT::i16));
+ DAG.getConstant(Immediate, dl, MVT::i16));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
if (NumNonConsts == 1 && NonConstIdx != 0) {
SDValue DstVec;
if (NumConsts) {
- SDValue VecAsImm = DAG.getConstant(Immediate,
+ SDValue VecAsImm = DAG.getConstant(Immediate, dl,
MVT::getIntegerVT(VT.getSizeInBits()));
DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
}
@@ -5202,7 +5207,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
DstVec = DAG.getUNDEF(VT);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
Op.getOperand(NonConstIdx),
- DAG.getIntPtrConstant(NonConstIdx));
+ DAG.getIntPtrConstant(NonConstIdx, dl));
}
if (!IsSplat && (NonConstIdx != 0))
llvm_unreachable("Unsupported BUILD_VECTOR operation");
@@ -5210,12 +5215,12 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
SDValue Select;
if (IsSplat)
Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
- DAG.getConstant(-1, SelectVT),
- DAG.getConstant(0, SelectVT));
+ DAG.getConstant(-1, dl, SelectVT),
+ DAG.getConstant(0, dl, SelectVT));
else
Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
- DAG.getConstant((Immediate | 1), SelectVT),
- DAG.getConstant(Immediate, SelectVT));
+ DAG.getConstant((Immediate | 1), dl, SelectVT),
+ DAG.getConstant(Immediate, dl, SelectVT));
return DAG.getNode(ISD::BITCAST, dl, VT, Select);
}
@@ -5733,7 +5738,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (VT.is512BitVector()) {
SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
- Item, DAG.getIntPtrConstant(0));
+ Item, DAG.getIntPtrConstant(0, dl));
}
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Expected an SSE value type!");
@@ -5927,7 +5932,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
for (unsigned i = 1; i < NumElems; ++i) {
if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
- Op.getOperand(i), DAG.getIntPtrConstant(i));
+ Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
}
return Result;
}
@@ -6025,10 +6030,10 @@ static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
if (IsZeroV1 && IsZeroV2)
return getZeroVector(ResVT, Subtarget, DAG, dl);
- SDValue ZeroIdx = DAG.getIntPtrConstant(0);
+ SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
SDValue Undef = DAG.getUNDEF(ResVT);
unsigned NumElems = ResVT.getVectorNumElements();
- SDValue ShiftBits = DAG.getConstant(NumElems/2, MVT::i8);
+ SDValue ShiftBits = DAG.getConstant(NumElems/2, dl, MVT::i8);
V2 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V2, ZeroIdx);
V2 = DAG.getNode(X86ISD::VSHLI, dl, ResVT, V2, ShiftBits);
@@ -6196,7 +6201,7 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
-static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
+static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
SelectionDAG &DAG) {
assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
@@ -6209,7 +6214,7 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
- return DAG.getConstant(Imm, MVT::i8);
+ return DAG.getConstant(Imm, DL, MVT::i8);
}
/// \brief Try to emit a blend instruction for a shuffle using bit math.
@@ -6223,8 +6228,9 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
assert(VT.isInteger() && "Only supports integer vector types!");
MVT EltVT = VT.getScalarType();
int NumEltBits = EltVT.getSizeInBits();
- SDValue Zero = DAG.getConstant(0, EltVT);
- SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
+ SDValue Zero = DAG.getConstant(0, DL, EltVT);
+ SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL,
+ EltVT);
SmallVector<SDValue, 16> MaskOps;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
@@ -6270,7 +6276,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
case MVT::v4f64:
case MVT::v8f32:
return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
- DAG.getConstant(BlendMask, MVT::i8));
+ DAG.getConstant(BlendMask, DL, MVT::i8));
case MVT::v4i64:
case MVT::v8i32:
@@ -6294,7 +6300,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
- DAG.getConstant(BlendMask, MVT::i8)));
+ DAG.getConstant(BlendMask, DL, MVT::i8)));
}
// FALLTHROUGH
case MVT::v8i16: {
@@ -6311,7 +6317,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
- DAG.getConstant(BlendMask, MVT::i8)));
+ DAG.getConstant(BlendMask, DL, MVT::i8)));
}
case MVT::v16i16: {
@@ -6325,7 +6331,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
if (RepeatedMask[i] >= 16)
BlendMask |= 1u << i;
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
- DAG.getConstant(BlendMask, MVT::i8));
+ DAG.getConstant(BlendMask, DL, MVT::i8));
}
}
// FALLTHROUGH
@@ -6357,7 +6363,8 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
for (int j = 0; j < Scale; ++j)
VSELECTMask.push_back(
Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
- : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
+ : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
+ MVT::i8));
V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
@@ -6551,7 +6558,8 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
- DAG.getConstant(Rotation * Scale, MVT::i8)));
+ DAG.getConstant(Rotation * Scale, DL,
+ MVT::i8)));
}
assert(VT.getSizeInBits() == 128 &&
@@ -6568,9 +6576,9 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
- DAG.getConstant(LoByteShift, MVT::i8));
+ DAG.getConstant(LoByteShift, DL, MVT::i8));
SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
- DAG.getConstant(HiByteShift, MVT::i8));
+ DAG.getConstant(HiByteShift, DL, MVT::i8));
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
}
@@ -6629,8 +6637,9 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
MVT EltVT = VT.getScalarType();
int NumEltBits = EltVT.getSizeInBits();
MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
- SDValue Zero = DAG.getConstant(0, IntEltVT);
- SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
+ SDValue Zero = DAG.getConstant(0, DL, IntEltVT);
+ SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL,
+ IntEltVT);
if (EltVT.isFloatingPoint()) {
Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
@@ -6727,7 +6736,8 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
"Illegal integer vector type");
V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
- V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
+ V = DAG.getNode(OpCode, DL, ShiftVT, V,
+ DAG.getConstant(ShiftAmt, DL, MVT::i8));
return DAG.getNode(ISD::BITCAST, DL, VT, V);
};
@@ -6781,19 +6791,19 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
- getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
if (AnyExt && EltBits == 16 && Scale > 2) {
int PSHUFDMask[4] = {0, -1, 0, -1};
InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
- getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
int PSHUFHWMask[4] = {1, -1, -1, -1};
return DAG.getNode(
ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
- getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
+ getV4X86ShuffleImm8ForMask(PSHUFHWMask, DL, DAG)));
}
// If this would require more than 2 unpack instructions to expand, use
@@ -6804,7 +6814,7 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
SDValue PSHUFBMask[16];
for (int i = 0; i < 16; ++i)
PSHUFBMask[i] =
- DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
+ DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, DL, MVT::i8);
InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
@@ -7062,7 +7072,7 @@ static SDValue lowerVectorShuffleAsElementInsertion(
V2 = DAG.getNode(
X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
DAG.getConstant(
- V2Index * EltVT.getSizeInBits()/8,
+ V2Index * EltVT.getSizeInBits()/8, DL,
DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
}
@@ -7222,7 +7232,7 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
// Insert the V2 element into the desired position.
SDLoc DL(Op);
return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
- DAG.getConstant(InsertPSMask, MVT::i8));
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
/// \brief Try to lower a shuffle as a permute of the inputs followed by an
@@ -7372,11 +7382,11 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
- DAG.getConstant(SHUFPDMask, MVT::i8));
+ DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
- return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
- DAG.getConstant(SHUFPDMask, MVT::i8));
+ return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V1,
+ DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
assert(Mask[1] >= 2 && "Non-canonicalized blend!");
@@ -7419,8 +7429,8 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
- return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
- DAG.getConstant(SHUFPDMask, MVT::i8));
+ return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
+ DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
/// \brief Handle lowering of 2-lane 64-bit integer shuffles.
@@ -7455,8 +7465,8 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
return DAG.getNode(
ISD::BITCAST, DL, MVT::v2i64,
- DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
- getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
+ DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
+ getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
}
assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
@@ -7590,7 +7600,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
int V1Index = V2AdjIndex;
int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
- getV4X86ShuffleImm8ForMask(BlendMask, DAG));
+ getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
// Now proceed to reconstruct the final blend as we have the necessary
// high or low half formed.
@@ -7629,7 +7639,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
(Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
(Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
- getV4X86ShuffleImm8ForMask(BlendMask, DAG));
+ getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
// Now we do a normal shuffle of V1 by giving V1 as both operands to
// a blend.
@@ -7641,7 +7651,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
}
}
return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
- getV4X86ShuffleImm8ForMask(NewMask, DAG));
+ getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
}
/// \brief Lower 4-lane 32-bit floating point shuffles.
@@ -7681,13 +7691,13 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Otherwise, use a straight shuffle of a single input vector. We pass the
// input vector to both operands to simulate this with a SHUFPS.
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// There are special ways we can lower some single-element blends. However, we
@@ -7773,7 +7783,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask = UnpackHiMask;
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Try to use shift instructions.
@@ -7990,7 +8000,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
MVT::v8i16, V,
- getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
+ getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
for (int &M : Mask)
if (M != -1 && M == FixIdx)
@@ -8017,7 +8027,8 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
V = DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT,
DAG.getNode(ISD::BITCAST, DL, PSHUFDVT, V),
- getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL,
+ DAG)));
// Adjust the mask to match the new locations of A and B.
for (int &M : Mask)
@@ -8253,15 +8264,16 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// target half.
if (!isNoopShuffleMask(PSHUFLMask))
V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
- getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
+ getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
if (!isNoopShuffleMask(PSHUFHMask))
V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
- getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
+ getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
if (!isNoopShuffleMask(PSHUFDMask))
V = DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT,
DAG.getNode(ISD::BITCAST, DL, PSHUFDVT, V),
- getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL,
+ DAG)));
// At this point, each half should contain all its inputs, and we can then
// just shuffle them into their final position.
@@ -8275,7 +8287,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// Do a half shuffle for the low mask.
if (!isNoopShuffleMask(LoMask))
V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
- getV4X86ShuffleImm8ForMask(LoMask, DAG));
+ getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
// Do a half shuffle with the high mask after shifting its values down.
for (int &M : HiMask)
@@ -8283,7 +8295,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
M -= 4;
if (!isNoopShuffleMask(HiMask))
V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
- getV4X86ShuffleImm8ForMask(HiMask, DAG));
+ getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
return V;
}
@@ -8313,8 +8325,8 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
: (Mask[i / Scale] - Size) * Scale + i % Scale;
if (Zeroable[i / Scale])
V1Idx = V2Idx = ZeroMask;
- V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
- V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
+ V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
+ V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
V1InUse |= (ZeroMask != V1Idx);
V2InUse |= (ZeroMask != V2Idx);
}
@@ -8757,7 +8769,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
SDValue ByteClearMask =
DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
- DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
+ DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
if (!IsSingleInput)
V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
@@ -8803,7 +8815,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Use a mask to drop the high bytes.
VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
- DAG.getConstant(0x00FF, MVT::v8i16));
+ DAG.getConstant(0x00FF, DL, MVT::v8i16));
// This will be a single vector shuffle instead of a blend so nuke VHiHalf.
VHiHalf = DAG.getUNDEF(MVT::v8i16);
@@ -8949,9 +8961,9 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
auto *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV) {
LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
- DAG.getIntPtrConstant(OrigSplitNumElements));
+ DAG.getIntPtrConstant(OrigSplitNumElements, DL));
} else {
SmallVector<SDValue, 16> LoOps, HiOps;
@@ -9140,7 +9152,7 @@ static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
// allow folding it into a memory operand.
unsigned PERMMask = 3 | 2 << 4;
SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
- V1, DAG.getConstant(PERMMask, MVT::i8));
+ V1, DAG.getConstant(PERMMask, DL, MVT::i8));
return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
}
@@ -9177,9 +9189,10 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
- OnlyUsesV1 ? V1 : V2, DAG.getIntPtrConstant(0));
+ OnlyUsesV1 ? V1 : V2,
+ DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
}
}
@@ -9228,7 +9241,7 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
}
return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
- DAG.getConstant(PermMask, MVT::i8));
+ DAG.getConstant(PermMask, DL, MVT::i8));
}
/// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
@@ -9364,13 +9377,13 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
- DAG.getConstant(VPERMILPMask, MVT::i8));
+ DAG.getConstant(VPERMILPMask, DL, MVT::i8));
}
// With AVX2 we have direct support for this permutation.
if (Subtarget->hasAVX2())
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
@@ -9400,7 +9413,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
- DAG.getConstant(SHUFPDMask, MVT::i8));
+ DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
(Mask[1] == -1 || Mask[1] < 2) &&
@@ -9409,7 +9422,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
- DAG.getConstant(SHUFPDMask, MVT::i8));
+ DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
@@ -9476,7 +9489,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ISD::BITCAST, DL, MVT::v4i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
- getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
}
@@ -9484,7 +9497,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// lanes.
if (isSingleInputShuffleMask(Mask))
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Try to use shift instructions.
if (SDValue Shift =
@@ -9554,7 +9567,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (isSingleInputShuffleMask(Mask))
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
- getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 4, 12, 5, 13}))
@@ -9581,7 +9594,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
SDValue VPermMask[8];
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
- : DAG.getConstant(Mask[i], MVT::i32);
+ : DAG.getConstant(Mask[i], DL, MVT::i32);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
return DAG.getNode(
X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
@@ -9654,7 +9667,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
if (isSingleInputShuffleMask(Mask))
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
- getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 1, 9, 4, 12, 5, 13}))
@@ -9682,7 +9695,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
SDValue VPermMask[8];
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
- : DAG.getConstant(Mask[i], MVT::i32);
+ : DAG.getConstant(Mask[i], DL, MVT::i32);
return DAG.getNode(
X86ISD::VPERMV, DL, MVT::v8i32,
DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
@@ -9779,8 +9792,8 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
int M = i < 8 ? Mask[i] : Mask[i] - 8;
assert(M >= 0 && M < 8 && "Invalid single-input mask!");
- PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
- PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
+ PSHUFBMask[2 * i] = DAG.getConstant(2 * M, DL, MVT::i8);
+ PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, DL, MVT::i8);
}
return DAG.getNode(
ISD::BITCAST, DL, MVT::v16i16,
@@ -9871,7 +9884,8 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
PSHUFBMask[i] =
Mask[i] < 0
? DAG.getUNDEF(MVT::i8)
- : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
+ : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, DL,
+ MVT::i8);
return DAG.getNode(
X86ISD::PSHUFB, DL, MVT::v32i8, V1,
@@ -10469,11 +10483,11 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
rc = getRegClassFor(MVT::v16i1);
unsigned MaxSift = rc->getSize()*8 - 1;
Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
- DAG.getConstant(MaxSift - IdxVal, MVT::i8));
+ DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
- DAG.getConstant(MaxSift, MVT::i8));
+ DAG.getConstant(MaxSift, dl, MVT::i8));
return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
SDValue
@@ -10500,10 +10514,10 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
getZeroVector(MaskVT, Subtarget, DAG, dl),
- Idx, DAG.getConstant(0, getPointerTy()));
+ Idx, DAG.getConstant(0, dl, getPointerTy()));
SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
- Perm, DAG.getConstant(0, getPointerTy()));
+ Perm, DAG.getConstant(0, dl, getPointerTy()));
}
return SDValue();
}
@@ -10523,7 +10537,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// IdxVal -= NumElems/2;
IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
- DAG.getConstant(IdxVal, MVT::i32));
+ DAG.getConstant(IdxVal, dl, MVT::i32));
}
assert(VecVT.is128BitVector() && "Unexpected vector length");
@@ -10565,7 +10579,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
if (VT.getSizeInBits() == 64) {
@@ -10584,7 +10598,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
DAG.getUNDEF(VVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
return SDValue();
@@ -10615,13 +10629,13 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
if (Vec.getOpcode() == ISD::UNDEF)
return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
- DAG.getConstant(IdxVal, MVT::i8));
+ DAG.getConstant(IdxVal, dl, MVT::i8));
const TargetRegisterClass* rc = getRegClassFor(VecVT);
unsigned MaxSift = rc->getSize()*8 - 1;
EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
- DAG.getConstant(MaxSift, MVT::i8));
+ DAG.getConstant(MaxSift, dl, MVT::i8));
EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
- DAG.getConstant(MaxSift - IdxVal, MVT::i8));
+ DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
}
@@ -10654,7 +10668,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
(Subtarget->hasAVX2() && EltVT == MVT::i32)) {
SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
- N2 = DAG.getIntPtrConstant(1);
+ N2 = DAG.getIntPtrConstant(1, dl);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
}
}
@@ -10667,7 +10681,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
- DAG.getConstant(IdxIn128, MVT::i32));
+ DAG.getConstant(IdxIn128, dl, MVT::i32));
// Insert the changed part back into the bigger vector
return Insert128BitVector(N0, V, IdxVal, DAG, dl);
@@ -10689,7 +10703,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
- N2 = DAG.getIntPtrConstant(IdxVal);
+ N2 = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(Opc, dl, VT, N0, N1, N2);
}
@@ -10713,11 +10727,11 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// But if optimizing for size and there's a load folding opportunity,
// generate insertps because blendps does not have a 32-bit memory
// operand form.
- N2 = DAG.getIntPtrConstant(1);
+ N2 = DAG.getIntPtrConstant(1, dl);
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
}
- N2 = DAG.getIntPtrConstant(IdxVal << 4);
+ N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
// Create this as a scalar to vector..
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
@@ -10738,7 +10752,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
- N2 = DAG.getIntPtrConstant(IdxVal);
+ N2 = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
}
return SDValue();
@@ -10847,10 +10861,10 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
if (OpVT.getVectorElementType() == MVT::i1) {
if (IdxVal == 0 && Vec.getOpcode() == ISD::UNDEF) // the operation is legal
return Op;
- SDValue ZeroIdx = DAG.getIntPtrConstant(0);
+ SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
SDValue Undef = DAG.getUNDEF(OpVT);
unsigned NumElems = OpVT.getVectorNumElements();
- SDValue ShiftBits = DAG.getConstant(NumElems/2, MVT::i8);
+ SDValue ShiftBits = DAG.getConstant(NumElems/2, dl, MVT::i8);
if (IdxVal == OpVT.getVectorNumElements() / 2) {
// Zero upper bits of the Vec
@@ -11065,7 +11079,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
// addition for it.
if (Offset != 0)
Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
- DAG.getConstant(Offset, getPointerTy()));
+ DAG.getConstant(Offset, dl, getPointerTy()));
return Result;
}
@@ -11180,7 +11194,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
is64Bit ? 257 : 256));
SDValue ThreadPointer =
- DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
+ DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
MachinePointerInfo(Ptr), false, false, false, 0);
unsigned char OperandFlags = 0;
@@ -11322,9 +11336,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
SDValue TlsArray =
Subtarget->is64Bit()
- ? DAG.getIntPtrConstant(0x58)
+ ? DAG.getIntPtrConstant(0x58, dl)
: (Subtarget->isTargetWindowsGNU()
- ? DAG.getIntPtrConstant(0x2C)
+ ? DAG.getIntPtrConstant(0x2C, dl)
: DAG.getExternalSymbol("_tls_array", getPointerTy()));
SDValue ThreadPointer =
@@ -11341,7 +11355,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
false, false, false, 0);
- SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
+ SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), dl,
getPointerTy());
IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
@@ -11378,10 +11392,10 @@ static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
// generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
// during isel.
SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
- DAG.getConstant(VTBits - 1, MVT::i8));
+ DAG.getConstant(VTBits - 1, dl, MVT::i8));
SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
- DAG.getConstant(VTBits - 1, MVT::i8))
- : DAG.getConstant(0, VT);
+ DAG.getConstant(VTBits - 1, dl, MVT::i8))
+ : DAG.getConstant(0, dl, VT);
SDValue Tmp2, Tmp3;
if (Op.getOpcode() == ISD::SHL_PARTS) {
@@ -11396,12 +11410,12 @@ static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
// rely on the results of shld/shrd. Insert a test and select the appropriate
// values for large shift amounts.
SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
- DAG.getConstant(VTBits, MVT::i8));
+ DAG.getConstant(VTBits, dl, MVT::i8));
SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
- AndNode, DAG.getConstant(0, MVT::i8));
+ AndNode, DAG.getConstant(0, dl, MVT::i8));
SDValue Hi, Lo;
- SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ SDValue CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
@@ -11580,7 +11594,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
}
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
@@ -11588,7 +11602,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
// FP constant to bias correct the final result.
- SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
+ SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
MVT::f64);
// Load the 32-bit value into an XMM register.
@@ -11600,7 +11614,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
// Or the load with the bias.
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
@@ -11612,7 +11626,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
MVT::v2f64, Bias)));
Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
// Subtract the bias.
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
@@ -11622,7 +11636,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
if (DestVT.bitsLT(MVT::f64))
return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
if (DestVT.bitsGT(MVT::f64))
return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
@@ -11667,20 +11681,20 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// -- v >> 16
// Create the splat vector for 0x4b000000.
- SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
+ SDValue CstLow = DAG.getConstant(0x4b000000, DL, MVT::i32);
SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
CstLow, CstLow, CstLow, CstLow};
SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
makeArrayRef(&CstLowArray[0], NumElts));
// Create the splat vector for 0x53000000.
- SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
+ SDValue CstHigh = DAG.getConstant(0x53000000, DL, MVT::i32);
SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
CstHigh, CstHigh, CstHigh, CstHigh};
SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
makeArrayRef(&CstHighArray[0], NumElts));
// Create the right shift.
- SDValue CstShift = DAG.getConstant(16, MVT::i32);
+ SDValue CstShift = DAG.getConstant(16, DL, MVT::i32);
SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
CstShift, CstShift, CstShift, CstShift};
SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
@@ -11697,7 +11711,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// Low will be bitcasted right away, so do not bother bitcasting back to its
// original type.
Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
- VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
+ VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
// uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
// (uint4) 0x53000000, 0xaa);
SDValue VecCstHighBitcast =
@@ -11707,9 +11721,9 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// High will be bitcasted right away, so do not bother bitcasting back to
// its original type.
High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
- VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
+ VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
} else {
- SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
+ SDValue CstMask = DAG.getConstant(0xffff, DL, MVT::i32);
SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
CstMask, CstMask, CstMask);
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
@@ -11722,7 +11736,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
SDValue CstFAdd = DAG.getConstantFP(
- APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
+ APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, MVT::f32);
SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
CstFAdd, CstFAdd, CstFAdd, CstFAdd};
SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
@@ -11787,13 +11801,13 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
if (SrcVT == MVT::i32) {
- SDValue WordOff = DAG.getConstant(4, getPointerTy());
+ SDValue WordOff = DAG.getConstant(4, dl, getPointerTy());
SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
getPointerTy(), StackSlot, WordOff);
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
StackSlot, MachinePointerInfo(),
false, false, 0);
- SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
+ SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
OffsetSlot, MachinePointerInfo(),
false, false, 0);
SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
@@ -11825,8 +11839,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
// Check whether the sign bit is set.
SDValue SignSet = DAG.getSetCC(dl,
getSetCCResultType(*DAG.getContext(), MVT::i64),
- Op.getOperand(0), DAG.getConstant(0, MVT::i64),
- ISD::SETLT);
+ Op.getOperand(0),
+ DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
// Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
SDValue FudgePtr = DAG.getConstantPool(
@@ -11834,8 +11848,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
getPointerTy());
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
- SDValue Zero = DAG.getIntPtrConstant(0);
- SDValue Four = DAG.getIntPtrConstant(4);
+ SDValue Zero = DAG.getIntPtrConstant(0, dl);
+ SDValue Four = DAG.getIntPtrConstant(4, dl);
SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
Zero, Four);
FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
@@ -11847,7 +11861,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
MVT::f32, false, false, false, 4);
// Extend everything to 80 bits to force it to be done on x87.
SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
- return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
+ return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
+ DAG.getIntPtrConstant(0, dl));
}
std::pair<SDValue,SDValue>
@@ -12003,7 +12018,7 @@ static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Now we have only mask extension
assert(InVT.getVectorElementType() == MVT::i1);
- SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
+ SDValue Cst = DAG.getTargetConstant(1, DL, ExtVT.getScalarType());
const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
@@ -12096,7 +12111,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
InVT = ExtVT;
}
- SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
+ SDValue Cst = DAG.getTargetConstant(1, DL, InVT.getVectorElementType());
const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
SDValue CP = DAG.getConstantPool(C, getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
@@ -12116,13 +12131,13 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
ShufMask);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
}
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
- DAG.getIntPtrConstant(2));
+ DAG.getIntPtrConstant(2, DL));
OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
static const int ShufMask[] = {0, 2, 4, 6};
@@ -12136,16 +12151,16 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
SmallVector<SDValue,32> pshufbMask;
for (unsigned i = 0; i < 2; ++i) {
- pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
- pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x0, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x1, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x4, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x5, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x8, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x9, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0xc, DL, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0xd, DL, MVT::i8));
for (unsigned j = 0; j < 8; ++j)
- pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
+ pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
}
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
@@ -12155,15 +12170,15 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
&ShufMask[0]);
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::BITCAST, DL, VT, In);
}
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
- DAG.getIntPtrConstant(4));
+ DAG.getIntPtrConstant(4, DL));
OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
@@ -12202,7 +12217,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
DAG.getNode(ISD::BITCAST, DL, NVT, In),
DAG.getUNDEF(NVT), &MaskVec[0]);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
}
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
@@ -12339,7 +12354,7 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
}
// And if it is bigger, shrink it first.
if (SrcVT.bitsGT(VT)) {
- Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
+ Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1, dl));
SrcVT = VT;
}
@@ -12398,8 +12413,8 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
// Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
- DAG.getConstant(1, VT));
- return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
+ DAG.getConstant(1, dl, VT));
+ return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, dl, VT));
}
// Check whether an OR'd tree is PTEST-able.
@@ -12520,7 +12535,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
if (Op.getValueType() == MVT::i1) {
SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
- DAG.getConstant(0, MVT::i8));
+ DAG.getConstant(0, dl, MVT::i8));
}
// CF and OF aren't always set the way we want. Determine which
// of these we need.
@@ -12564,7 +12579,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
// return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
// DAG.getConstant(0, MVT::i1));
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
- DAG.getConstant(0, Op.getValueType()));
+ DAG.getConstant(0, dl, Op.getValueType()));
}
unsigned Opcode = 0;
unsigned NumOperands = 0;
@@ -12652,7 +12667,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
if (!Mask.isSignedIntN(32)) // Avoid large immediates.
break;
SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
- DAG.getConstant(Mask, VT));
+ DAG.getConstant(Mask, dl, VT));
DAG.ReplaceAllUsesWith(Op, New);
Op = New;
}
@@ -12738,7 +12753,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
if (Opcode == 0)
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
- DAG.getConstant(0, Op.getValueType()));
+ DAG.getConstant(0, dl, Op.getValueType()));
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
@@ -12803,7 +12818,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
- DAG.getConstant(8, MVT::i8));
+ DAG.getConstant(8, dl, MVT::i8));
SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
}
@@ -12926,7 +12941,7 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
// Use BT if the immediate can't be encoded in a TEST instruction.
if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
LHS = AndLHS;
- RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
+ RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType());
}
}
@@ -12948,7 +12963,7 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(Cond, MVT::i8), BT);
+ DAG.getConstant(Cond, dl, MVT::i8), BT);
}
return SDValue();
@@ -13040,16 +13055,16 @@ static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
"Unexpected type for boolean compare operation");
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
SDValue NotOp0 = DAG.getNode(ISD::XOR, dl, VT, Op0,
- DAG.getConstant(-1, VT));
+ DAG.getConstant(-1, dl, VT));
SDValue NotOp1 = DAG.getNode(ISD::XOR, dl, VT, Op1,
- DAG.getConstant(-1, VT));
+ DAG.getConstant(-1, dl, VT));
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE:
// (x != y) -> ~(x ^ y)
return DAG.getNode(ISD::XOR, dl, VT,
DAG.getNode(ISD::XOR, dl, VT, Op0, Op1),
- DAG.getConstant(-1, VT));
+ DAG.getConstant(-1, dl, VT));
case ISD::SETEQ:
// (x == y) -> (x ^ y)
return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1);
@@ -13109,7 +13124,7 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(Opc, dl, VT, Op0, Op1);
Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
return DAG.getNode(Opc, dl, VT, Op0, Op1,
- DAG.getConstant(SSECC, MVT::i8));
+ DAG.getConstant(SSECC, dl, MVT::i8));
}
/// \brief Try to turn a VSETULT into a VSETULE by modifying its second
@@ -13136,7 +13151,7 @@ static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
if (Val == 0)
return SDValue();
- ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
+ ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
@@ -13176,14 +13191,14 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
}
SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
- DAG.getConstant(CC0, MVT::i8));
+ DAG.getConstant(CC0, dl, MVT::i8));
SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
- DAG.getConstant(CC1, MVT::i8));
+ DAG.getConstant(CC1, dl, MVT::i8));
return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
}
// Handle all other FP comparisons here.
return DAG.getNode(Opc, dl, VT, Op0, Op1,
- DAG.getConstant(SSECC, MVT::i8));
+ DAG.getConstant(SSECC, dl, MVT::i8));
}
// Break 256-bit integer vector compare into smaller ones.
@@ -13304,10 +13319,10 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// compare is always unsigned.
SDValue SB;
if (FlipSigns) {
- SB = DAG.getConstant(0x80000000U, MVT::v4i32);
+ SB = DAG.getConstant(0x80000000U, dl, MVT::v4i32);
} else {
- SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
- SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
+ SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
+ SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Sign, Zero, Sign, Zero);
}
@@ -13362,7 +13377,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// bits of the inputs before performing those operations.
if (FlipSigns) {
EVT EltVT = VT.getVectorElementType();
- SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
+ SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), dl,
+ VT);
Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
}
@@ -13430,7 +13446,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
CCode = X86::GetOppositeBranchCondition(CCode);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(CCode, MVT::i8),
+ DAG.getConstant(CCode, dl, MVT::i8),
Op0.getOperand(1));
if (VT == MVT::i1)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
@@ -13442,18 +13458,18 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
- return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
+ return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
}
bool isFP = Op1.getSimpleValueType().isFloatingPoint();
- unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
+ unsigned X86CC = TranslateX86CC(CC, dl, isFP, Op0, Op1, DAG);
if (X86CC == X86::COND_INVALID)
return SDValue();
SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), EFLAGS);
+ DAG.getConstant(X86CC, dl, MVT::i8), EFLAGS);
if (VT == MVT::i1)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
return SetCC;
@@ -13518,12 +13534,12 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (SSECC != 8) {
if (Subtarget->hasAVX512()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
- DAG.getConstant(SSECC, MVT::i8));
+ DAG.getConstant(SSECC, DL, MVT::i8));
return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
}
SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
- DAG.getConstant(SSECC, MVT::i8));
+ DAG.getConstant(SSECC, DL, MVT::i8));
// If we have AVX, we can use a variable vector select (VBLENDV) instead
// of 3 logic instructions for size savings and potentially speed.
@@ -13555,7 +13571,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue VSel = DAG.getNode(ISD::VSELECT, DL, VecVT, VCmp, VOp1, VOp2);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
- VSel, DAG.getIntPtrConstant(0));
+ VSel, DAG.getIntPtrConstant(0, DL));
}
SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
@@ -13593,21 +13609,22 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
(isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
- DAG.getConstant(0, CmpOp0.getValueType()),
+ DAG.getConstant(0, DL,
+ CmpOp0.getValueType()),
CmpOp0);
SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
- DAG.getConstant(X86::COND_B, MVT::i8),
+ DAG.getConstant(X86::COND_B, DL, MVT::i8),
SDValue(Neg.getNode(), 1));
return Res;
}
Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
- CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
+ CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
SDValue Res = // Res = 0 or -1.
DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
- DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
+ DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp);
if (isAllOnes(Op1) != (CondCode == X86::COND_E))
Res = DAG.getNOT(DL, Res, Res.getValueType());
@@ -13679,7 +13696,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
else
Cond = X86Op.getValue(1);
- CC = DAG.getConstant(X86Cond, MVT::i8);
+ CC = DAG.getConstant(X86Cond, DL, MVT::i8);
addTest = false;
}
@@ -13701,7 +13718,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
if (addTest) {
- CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
}
@@ -13716,7 +13733,8 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
- DAG.getConstant(X86::COND_B, MVT::i8), Cond);
+ DAG.getConstant(X86::COND_B, DL, MVT::i8),
+ Cond);
if (isAllOnes(Op1) != (CondCode == X86::COND_B))
return DAG.getNOT(DL, Res, Res.getValueType());
return Res;
@@ -13976,7 +13994,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
SmallVector<SDValue, 8> Chains;
SDValue Ptr = Ld->getBasePtr();
SDValue Increment =
- DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
+ DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, dl, TLI.getPointerTy());
SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
for (unsigned i = 0; i < NumLoads; ++i) {
@@ -13992,7 +14010,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
else
Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
- ScalarLoad, DAG.getIntPtrConstant(i));
+ ScalarLoad, DAG.getIntPtrConstant(i, dl));
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
}
@@ -14032,7 +14050,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
MemVT.getVectorElementType().getSizeInBits();
Shuff =
- DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
+ DAG.getNode(ISD::SRA, dl, RegVT, Shuff,
+ DAG.getConstant(Amt, dl, RegVT));
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Shuff;
@@ -14199,7 +14218,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
else
Cond = X86Op.getValue(1);
- CC = DAG.getConstant(X86Cond, MVT::i8);
+ CC = DAG.getConstant(X86Cond, dl, MVT::i8);
addTest = false;
} else {
unsigned CondOpc;
@@ -14230,7 +14249,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
- CC = DAG.getConstant(CCode, MVT::i8);
+ CC = DAG.getConstant(CCode, dl, MVT::i8);
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
@@ -14248,7 +14267,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
- CC = DAG.getConstant(CCode, MVT::i8);
+ CC = DAG.getConstant(CCode, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
@@ -14261,7 +14280,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
- CC = DAG.getConstant(CCode, MVT::i8);
+ CC = DAG.getConstant(CCode, dl, MVT::i8);
Cond = Cond.getOperand(0).getOperand(1);
addTest = false;
} else if (Cond.getOpcode() == ISD::SETCC &&
@@ -14287,10 +14306,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
- CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
- CC = DAG.getConstant(X86::COND_P, MVT::i8);
+ CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
@@ -14317,10 +14336,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
- CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
- CC = DAG.getConstant(X86::COND_NP, MVT::i8);
+ CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8);
Cond = Cmp;
addTest = false;
Dest = FalseBB;
@@ -14348,7 +14367,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
- CC = DAG.getConstant(X86Cond, MVT::i8);
+ CC = DAG.getConstant(X86Cond, dl, MVT::i8);
Cond = EmitTest(Cond, X86Cond, dl, DAG);
}
Cond = ConvertCmpIfNecessary(Cond, DAG);
@@ -14385,7 +14404,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// Chain the dynamic stack allocation so that it doesn't modify the stack
// pointer when other instructions are using the stack.
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true),
SDLoc(Node));
SDValue Size = Tmp2.getOperand(1);
@@ -14397,11 +14416,11 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
- DAG.getConstant(-(uint64_t)Align, VT));
+ DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
- Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
- DAG.getIntPtrConstant(0, true), SDValue(),
+ Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), SDValue(),
SDLoc(Node));
SDValue Ops[2] = { Tmp1, Tmp2 };
@@ -14457,7 +14476,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
if (Align) {
SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
- DAG.getConstant(-(uint64_t)Align, VT));
+ DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
}
@@ -14492,22 +14511,22 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// Store gp_offset
SDValue Store = DAG.getStore(Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
- MVT::i32),
+ DL, MVT::i32),
FIN, MachinePointerInfo(SV), false, false, 0);
MemOps.push_back(Store);
// Store fp_offset
FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
- FIN, DAG.getIntPtrConstant(4));
+ FIN, DAG.getIntPtrConstant(4, DL));
Store = DAG.getStore(Op.getOperand(0), DL,
- DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
+ DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL,
MVT::i32),
FIN, MachinePointerInfo(SV, 4), false, false, 0);
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
- FIN, DAG.getIntPtrConstant(4));
+ FIN, DAG.getIntPtrConstant(4, DL));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
getPointerTy());
Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
@@ -14517,7 +14536,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// Store ptr to reg_save_area.
FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
- FIN, DAG.getIntPtrConstant(8));
+ FIN, DAG.getIntPtrConstant(8, DL));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
getPointerTy());
Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
@@ -14567,9 +14586,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// Insert VAARG_64 node into the DAG
// VAARG_64 returns two values: Variable Argument Address, Chain
- SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, MVT::i32),
- DAG.getConstant(ArgMode, MVT::i8),
- DAG.getConstant(Align, MVT::i32)};
+ SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
+ DAG.getConstant(ArgMode, dl, MVT::i8),
+ DAG.getConstant(Align, dl, MVT::i32)};
SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
VTs, InstOps, MVT::i64,
@@ -14600,7 +14619,7 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
SDLoc DL(Op);
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
- DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
+ DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
false, false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
@@ -14621,7 +14640,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
if (Opc == X86ISD::VSRAI)
ShiftAmt = ElementType.getSizeInBits() - 1;
else
- return DAG.getConstant(0, VT);
+ return DAG.getConstant(0, dl, VT);
}
assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
@@ -14646,7 +14665,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
- Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
+ Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
}
break;
case X86ISD::VSRLI:
@@ -14658,7 +14677,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
- Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
+ Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
}
break;
case X86ISD::VSRAI:
@@ -14670,7 +14689,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
- Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
+ Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
}
break;
}
@@ -14678,7 +14697,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
}
- return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
+ return DAG.getNode(Opc, dl, VT, SrcOp,
+ DAG.getConstant(ShiftAmt, dl, MVT::i8));
}
// getTargetVShiftNode - Handle vector element shifts where the shift amount
@@ -14716,7 +14736,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
SmallVector<SDValue, 4> ShOps;
ShOps.push_back(ShAmt);
if (SVT == MVT::i32) {
- ShOps.push_back(DAG.getConstant(0, SVT));
+ ShOps.push_back(DAG.getConstant(0, dl, SVT));
ShOps.push_back(DAG.getUNDEF(SVT));
}
ShOps.push_back(DAG.getUNDEF(SVT));
@@ -14757,7 +14777,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
// are extracted by EXTRACT_SUBVECTOR.
SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
switch (Op.getOpcode()) {
default: break;
@@ -14915,22 +14935,23 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Op.getOperand(2));
}
SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
- DAG.getTargetConstant(0, MaskVT),
+ DAG.getTargetConstant(0, dl,
+ MaskVT),
Subtarget, DAG);
SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
DAG.getUNDEF(BitcastVT), CmpMask,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
+ unsigned X86CC = TranslateX86CC(CC, dl, true, LHS, RHS, DAG);
assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), Cond);
+ DAG.getConstant(X86CC, dl, MVT::i8), Cond);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case VSHIFT:
@@ -14957,7 +14978,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDLoc dl(Op);
SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
PassThru);
@@ -14972,7 +14993,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDLoc dl(Op);
SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
Op.getOperand(2));
}
@@ -15062,7 +15083,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDValue RHS = Op.getOperand(2);
unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
- SDValue CC = DAG.getConstant(X86CC, MVT::i8);
+ SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
@@ -15071,7 +15092,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
- SDValue CC = DAG.getConstant(X86CC, MVT::i8);
+ SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
@@ -15136,7 +15157,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8),
+ DAG.getConstant(X86CC, dl, MVT::i8),
SDValue(PCMP.getNode(), 1));
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
@@ -15163,17 +15184,17 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDLoc dl(Op);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
assert(C && "Invalid scale type");
- SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
EVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
SDValue MaskInReg;
ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
if (MaskC)
- MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
+ MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
else
MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
- SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
if (Src.getOpcode() == ISD::UNDEF)
Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
@@ -15189,15 +15210,15 @@ static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDLoc dl(Op);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
assert(C && "Invalid scale type");
- SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
- SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
+ SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
EVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
SDValue MaskInReg;
ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
if (MaskC)
- MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
+ MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
else
MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
@@ -15212,15 +15233,15 @@ static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDLoc dl(Op);
ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
assert(C && "Invalid scale type");
- SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
- SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
+ SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
EVT MaskVT =
MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
SDValue MaskInReg;
ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
if (MaskC)
- MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
+ MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), dl, MaskVT);
else
MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
//SDVTList VTs = DAG.getVTList(MVT::Other);
@@ -15261,7 +15282,7 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
// The EAX register is loaded with the low-order 32 bits. The EDX register
// is loaded with the supported high-order bits of the counter.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
- DAG.getConstant(32, MVT::i8));
+ DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
@@ -15315,7 +15336,7 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
// The EDX register is loaded with the high-order 32 bits of the MSR, and
// the EAX register is loaded with the low-order 32 bits.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
- DAG.getConstant(32, MVT::i8));
+ DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
@@ -15360,8 +15381,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
// If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
// Otherwise return the value from Rand, which is always 0, casted to i32.
SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
- DAG.getConstant(1, Op->getValueType(1)),
- DAG.getConstant(X86::COND_B, MVT::i32),
+ DAG.getConstant(1, dl, Op->getValueType(1)),
+ DAG.getConstant(X86::COND_B, dl, MVT::i32),
SDValue(Result.getNode(), 1) };
SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
DAG.getVTList(Op->getValueType(1), MVT::Glue),
@@ -15421,7 +15442,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86::COND_NE, MVT::i8),
+ DAG.getConstant(X86::COND_NE, dl, MVT::i8),
InTrans);
SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
@@ -15433,14 +15454,14 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
- DAG.getConstant(-1, MVT::i8));
+ DAG.getConstant(-1, dl, MVT::i8));
SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
Op.getOperand(4), GenCF.getValue(1));
SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
Op.getOperand(5), MachinePointerInfo(),
false, false, 0);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86::COND_B, MVT::i8),
+ DAG.getConstant(X86::COND_B, dl, MVT::i8),
Res.getValue(1));
Results.push_back(SetCC);
Results.push_back(Store);
@@ -15464,7 +15485,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
Mask.getValueType().getSizeInBits());
SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
DataToCompress, DAG.getUNDEF(VT));
@@ -15488,7 +15509,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
Mask.getValueType().getSizeInBits());
SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
false, false, false, 0);
@@ -15516,7 +15537,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
+ SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT,
FrameAddr, Offset),
@@ -15584,7 +15605,7 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName,
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
+ return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
@@ -15603,7 +15624,8 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
- DAG.getIntPtrConstant(RegInfo->getSlotSize()));
+ DAG.getIntPtrConstant(RegInfo->getSlotSize(),
+ dl));
StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
false, false, 0);
@@ -15658,12 +15680,12 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// Load the pointer to the nested function into R11.
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
SDValue Addr = Trmp;
- OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
+ OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr),
false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
- DAG.getConstant(2, MVT::i64));
+ DAG.getConstant(2, dl, MVT::i64));
OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
MachinePointerInfo(TrmpAddr, 2),
false, false, 2);
@@ -15672,13 +15694,13 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// R10 is specified in X86CallingConv.td
OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
- DAG.getConstant(10, MVT::i64));
- OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
+ DAG.getConstant(10, dl, MVT::i64));
+ OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr, 10),
false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
- DAG.getConstant(12, MVT::i64));
+ DAG.getConstant(12, dl, MVT::i64));
OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
MachinePointerInfo(TrmpAddr, 12),
false, false, 2);
@@ -15686,16 +15708,16 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// Jump to the nested function.
OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
- DAG.getConstant(20, MVT::i64));
- OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
+ DAG.getConstant(20, dl, MVT::i64));
+ OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr, 20),
false, false, 0);
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
- DAG.getConstant(22, MVT::i64));
- OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
- MachinePointerInfo(TrmpAddr, 22),
+ DAG.getConstant(22, dl, MVT::i64));
+ OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
+ Addr, MachinePointerInfo(TrmpAddr, 22),
false, false, 0);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
@@ -15748,32 +15770,32 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDValue Addr, Disp;
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(10, MVT::i32));
+ DAG.getConstant(10, dl, MVT::i32));
Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
// This is storing the opcode for MOV32ri.
const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
OutChains[0] = DAG.getStore(Root, dl,
- DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
+ DAG.getConstant(MOV32ri|N86Reg, dl, MVT::i8),
Trmp, MachinePointerInfo(TrmpAddr),
false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
MachinePointerInfo(TrmpAddr, 1),
false, false, 1);
const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(5, MVT::i32));
- OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
- MachinePointerInfo(TrmpAddr, 5),
+ DAG.getConstant(5, dl, MVT::i32));
+ OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
+ Addr, MachinePointerInfo(TrmpAddr, 5),
false, false, 1);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(6, MVT::i32));
+ DAG.getConstant(6, dl, MVT::i32));
OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
MachinePointerInfo(TrmpAddr, 6),
false, false, 1);
@@ -15830,20 +15852,20 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SDValue CWD1 =
DAG.getNode(ISD::SRL, DL, MVT::i16,
DAG.getNode(ISD::AND, DL, MVT::i16,
- CWD, DAG.getConstant(0x800, MVT::i16)),
- DAG.getConstant(11, MVT::i8));
+ CWD, DAG.getConstant(0x800, DL, MVT::i16)),
+ DAG.getConstant(11, DL, MVT::i8));
SDValue CWD2 =
DAG.getNode(ISD::SRL, DL, MVT::i16,
DAG.getNode(ISD::AND, DL, MVT::i16,
- CWD, DAG.getConstant(0x400, MVT::i16)),
- DAG.getConstant(9, MVT::i8));
+ CWD, DAG.getConstant(0x400, DL, MVT::i16)),
+ DAG.getConstant(9, DL, MVT::i8));
SDValue RetVal =
DAG.getNode(ISD::AND, DL, MVT::i16,
DAG.getNode(ISD::ADD, DL, MVT::i16,
DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
- DAG.getConstant(1, MVT::i16)),
- DAG.getConstant(3, MVT::i16));
+ DAG.getConstant(1, DL, MVT::i16)),
+ DAG.getConstant(3, DL, MVT::i16));
return DAG.getNode((VT.getSizeInBits() < 16 ?
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
@@ -15869,14 +15891,15 @@ static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
// If src is zero (i.e. bsr sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
- DAG.getConstant(NumBits+NumBits-1, OpVT),
- DAG.getConstant(X86::COND_E, MVT::i8),
+ DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
+ DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
// Finally xor with NumBits-1.
- Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
+ Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
+ DAG.getConstant(NumBits - 1, dl, OpVT));
if (VT == MVT::i8)
Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
@@ -15901,7 +15924,8 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
// And xor with NumBits-1.
- Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
+ Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
+ DAG.getConstant(NumBits - 1, dl, OpVT));
if (VT == MVT::i8)
Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
@@ -15921,8 +15945,8 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
// If src is zero (i.e. bsf sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
- DAG.getConstant(NumBits, VT),
- DAG.getConstant(X86::COND_E, MVT::i8),
+ DAG.getConstant(NumBits, dl, VT),
+ DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
@@ -15989,8 +16013,8 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
if (Subtarget->hasInt256()) {
if (VT == MVT::v32i8) {
MVT SubVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() / 2);
- SDValue Lo = DAG.getIntPtrConstant(0);
- SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2);
+ SDValue Lo = DAG.getIntPtrConstant(0, dl);
+ SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Lo);
SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Lo);
SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Hi);
@@ -16024,8 +16048,8 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
ALo = DAG.getNode(ISD::BITCAST, dl, ExVT, ALo);
BLo = DAG.getNode(ISD::BITCAST, dl, ExVT, BLo);
- ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, ExVT));
- BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, ExVT));
+ ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
+ BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
}
// Extract the hi parts and sign extend to i16
@@ -16044,15 +16068,15 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
AHi = DAG.getNode(ISD::BITCAST, dl, ExVT, AHi);
BHi = DAG.getNode(ISD::BITCAST, dl, ExVT, BHi);
- AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, ExVT));
- BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, ExVT));
+ AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
+ BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
}
// Multiply, mask the lower 8bits of the lo/hi results and pack
SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
- RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, ExVT));
- RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, ExVT));
+ RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
+ RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
@@ -16228,7 +16252,8 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// unsigned multiply.
if (IsSigned && !Subtarget->hasSSE41()) {
SDValue ShAmt =
- DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
+ DAG.getConstant(31, dl,
+ DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
@@ -16283,7 +16308,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
// Zero out the rightmost bits.
SmallVector<SDValue, 32> V(
- NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), MVT::i8));
+ NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SHL,
DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
@@ -16294,7 +16319,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
// Zero out the leftmost bits.
SmallVector<SDValue, 32> V(
- NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, MVT::i8));
+ NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SRL,
DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
}
@@ -16308,7 +16333,8 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
// R s>> a === ((R u>> a) ^ m) - m
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
SmallVector<SDValue, 32> V(NumElts,
- DAG.getConstant(128 >> ShiftAmt, MVT::i8));
+ DAG.getConstant(128 >> ShiftAmt, dl,
+ MVT::i8));
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
@@ -16413,7 +16439,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
if (!BaseShAmt)
// Avoid introducing an extract element from a shuffle.
BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
- DAG.getIntPtrConstant(SplatIdx));
+ DAG.getIntPtrConstant(SplatIdx, dl));
}
}
@@ -16571,7 +16597,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
Elts.push_back(DAG.getUNDEF(SVT));
continue;
}
- Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
+ Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
}
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
return DAG.getNode(ISD::MUL, dl, VT, R, BV);
@@ -16579,9 +16605,10 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// Lower SHL with variable shift amount.
if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
- Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
+ Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
- Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op,
+ DAG.getConstant(0x3f800000U, dl, VT));
Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
return DAG.getNode(ISD::MUL, dl, VT, Op, R);
@@ -16645,10 +16672,10 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// Replace this node with two shifts followed by a MOVSS/MOVSD.
EVT CastVT = MVT::v4i32;
SDValue Splat1 =
- DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
+ DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT);
SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
SDValue Splat2 =
- DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
+ DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT);
SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
if (TargetOpcode == X86ISD::MOVSD)
CastVT = MVT::v2i64;
@@ -16664,16 +16691,16 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
// a = a << 5;
- Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
+ Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, dl, VT));
Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
// Turn 'a' into a mask suitable for VSELECT
- SDValue VSelM = DAG.getConstant(0x80, VT);
+ SDValue VSelM = DAG.getConstant(0x80, dl, VT);
SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
- SDValue CM1 = DAG.getConstant(0x0f, VT);
- SDValue CM2 = DAG.getConstant(0x3f, VT);
+ SDValue CM1 = DAG.getConstant(0x0f, dl, VT);
+ SDValue CM2 = DAG.getConstant(0x3f, dl, VT);
// r = VSELECT(r, psllw(r & (char16)15, 4), a);
SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
@@ -16814,7 +16841,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
SDValue SetCC =
DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
- DAG.getConstant(X86::COND_O, MVT::i32),
+ DAG.getConstant(X86::COND_O, DL, MVT::i32),
SDValue(Sum.getNode(), 2));
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
@@ -16827,7 +16854,7 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
SDValue SetCC =
DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
- DAG.getConstant(Cond, MVT::i32),
+ DAG.getConstant(Cond, DL, MVT::i32),
SDValue(Sum.getNode(), 1));
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
@@ -16979,13 +17006,13 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
SDValue Chain = Op.getOperand(0);
- SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
SDValue Ops[] = {
- DAG.getRegister(X86::ESP, MVT::i32), // Base
- DAG.getTargetConstant(1, MVT::i8), // Scale
- DAG.getRegister(0, MVT::i32), // Index
- DAG.getTargetConstant(0, MVT::i32), // Disp
- DAG.getRegister(0, MVT::i32), // Segment.
+ DAG.getRegister(X86::ESP, MVT::i32), // Base
+ DAG.getTargetConstant(1, dl, MVT::i8), // Scale
+ DAG.getRegister(0, MVT::i32), // Index
+ DAG.getTargetConstant(0, dl, MVT::i32), // Disp
+ DAG.getRegister(0, MVT::i32), // Segment.
Zero,
Chain
};
@@ -17018,7 +17045,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
SDValue Ops[] = { cpIn.getValue(0),
Op.getOperand(1),
Op.getOperand(3),
- DAG.getTargetConstant(size, MVT::i8),
+ DAG.getTargetConstant(size, DL, MVT::i8),
cpIn.getValue(1) };
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
@@ -17030,7 +17057,8 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
MVT::i32, cpOut.getValue(2));
SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
- DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
+ DAG.getConstant(X86::COND_E, DL, MVT::i8),
+ EFLAGS);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
@@ -17059,7 +17087,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
SmallVector<SDValue, 16> Elts;
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
- DAG.getIntPtrConstant(i)));
+ DAG.getIntPtrConstant(i, dl)));
// Explicitly mark the extra elements as Undef.
Elts.append(NumElts, DAG.getUNDEF(SVT));
@@ -17068,7 +17096,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, dl));
}
assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
@@ -17123,12 +17151,15 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
bool NeedsBitcast = EltVT == MVT::i32;
MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
- SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
- SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
- SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
+ SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl,
+ EltVT);
+ SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl,
+ EltVT);
+ SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl,
+ EltVT);
// v = v - ((v >> 1) & 0x55555555...)
- SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
+ SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, dl, EltVT));
SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
if (NeedsBitcast)
@@ -17147,7 +17178,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
// v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
- SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
+ SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, dl, EltVT));
SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
@@ -17166,7 +17197,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
// v = (v + (v >> 4)) & 0x0F0F0F0F...
- SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
+ SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, dl, EltVT));
SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
@@ -17199,7 +17230,7 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
Add = And;
SmallVector<SDValue, 8> Csts;
for (unsigned i = 8; i <= Len/2; i *= 2) {
- Csts.assign(NumElts, DAG.getConstant(i, EltVT));
+ Csts.assign(NumElts, DAG.getConstant(i, dl, EltVT));
SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
@@ -17207,7 +17238,8 @@ static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
}
// The result is on the least significant 6-bits on i32 and 7-bits on i64.
- SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
+ SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), dl,
+ EltVT);
SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
if (NeedsBitcast) {
@@ -17226,7 +17258,7 @@ static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Node);
EVT T = Node->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
- DAG.getConstant(0, T), Node->getOperand(2));
+ DAG.getConstant(0, dl, T), Node->getOperand(2));
return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
Node->getOperand(0),
@@ -17332,9 +17364,9 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
// Returned in bits 0:31 and 32:64 xmm0.
SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
- CallResult.first, DAG.getIntPtrConstant(0));
+ CallResult.first, DAG.getIntPtrConstant(0, dl));
SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
- CallResult.first, DAG.getIntPtrConstant(1));
+ CallResult.first, DAG.getIntPtrConstant(1, dl));
SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
}
@@ -17501,7 +17533,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
return;
SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
N->getOperand(0));
- SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
+ SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
MVT::f64);
SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
@@ -17544,9 +17576,9 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
SDValue cpInL, cpInH;
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
- DAG.getConstant(0, HalfT));
+ DAG.getConstant(0, dl, HalfT));
cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
- DAG.getConstant(1, HalfT));
+ DAG.getConstant(1, dl, HalfT));
cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
cpInL, SDValue());
@@ -17555,9 +17587,9 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
cpInH, cpInL.getValue(1));
SDValue swapInL, swapInH;
swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
- DAG.getConstant(0, HalfT));
+ DAG.getConstant(0, dl, HalfT));
swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
- DAG.getConstant(1, HalfT));
+ DAG.getConstant(1, dl, HalfT));
swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
Regs64bit ? X86::RBX : X86::EBX,
swapInL, cpInH.getValue(1));
@@ -17584,7 +17616,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
MVT::i32, cpOutH.getValue(2));
SDValue Success =
DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
+ DAG.getConstant(X86::COND_E, dl, MVT::i8), EFLAGS);
Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
@@ -17634,7 +17666,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVector<SDValue, 8> Elts;
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
- ToVecInt, DAG.getIntPtrConstant(i)));
+ ToVecInt, DAG.getIntPtrConstant(i, dl)));
Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
}
@@ -19845,7 +19877,7 @@ static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
int M = Mask[i / Ratio] != SM_SentinelZero
? Ratio * Mask[i / Ratio] + i % Ratio
: 255;
- PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
+ PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Input);
@@ -20151,7 +20183,7 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
for (int &M : Mask)
M = VMask[M];
V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Rebuild the chain around this new shuffle.
while (!Chain.empty()) {
@@ -20238,7 +20270,7 @@ static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
for (int &M : Mask)
M = VMask[M];
V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
- getV4X86ShuffleImm8ForMask(Mask, DAG));
+ getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Check that the shuffles didn't cancel each other out. If not, we need to
// combine to the new one.
@@ -20297,7 +20329,7 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
V = DAG.getNode(ISD::BITCAST, DL, DVT, V);
DCI.AddToWorklist(V.getNode());
V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
- getV4X86ShuffleImm8ForMask(DMask, DAG));
+ getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
DCI.AddToWorklist(V.getNode());
return DAG.getNode(ISD::BITCAST, DL, VT, V);
}
@@ -20727,11 +20759,11 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
- DAG.getConstant(0, VecIdxTy));
+ DAG.getConstant(0, dl, VecIdxTy));
SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
- DAG.getConstant(1, VecIdxTy));
+ DAG.getConstant(1, dl, VecIdxTy));
- SDValue ShAmt = DAG.getConstant(32,
+ SDValue ShAmt = DAG.getConstant(32, dl,
DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
@@ -20751,7 +20783,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Replace each use (extract) with a load of the appropriate element.
for (unsigned i = 0; i < 4; ++i) {
uint64_t Offset = EltSize * i;
- SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
+ SDValue OffsetVal = DAG.getConstant(Offset, dl, TLI.getPointerTy());
SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
StackPtr, OffsetVal);
@@ -21112,21 +21144,21 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
TrueC->getAPIntValue().isPowerOf2()) {
if (NeedsCondInvert) // Invert the condition if needed.
Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
- DAG.getConstant(1, Cond.getValueType()));
+ DAG.getConstant(1, DL, Cond.getValueType()));
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
unsigned ShAmt = TrueC->getAPIntValue().logBase2();
return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
- DAG.getConstant(ShAmt, MVT::i8));
+ DAG.getConstant(ShAmt, DL, MVT::i8));
}
// Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
if (NeedsCondInvert) // Invert the condition if needed.
Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
- DAG.getConstant(1, Cond.getValueType()));
+ DAG.getConstant(1, DL, Cond.getValueType()));
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
@@ -21161,7 +21193,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
if (NeedsCondInvert) // Invert the condition if needed.
Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
- DAG.getConstant(1, Cond.getValueType()));
+ DAG.getConstant(1, DL, Cond.getValueType()));
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
@@ -21169,7 +21201,8 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Scale the condition by the difference.
if (Diff != 1)
Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
- DAG.getConstant(Diff, Cond.getValueType()));
+ DAG.getConstant(Diff, DL,
+ Cond.getValueType()));
// Add the base if non-zero.
if (FalseC->getAPIntValue() != 0)
@@ -21257,7 +21290,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
(-OpRHSConst->getAPIntValue() - 1))
return DAG.getNode(
X86ISD::SUBUS, DL, VT, OpLHS,
- DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
+ DAG.getConstant(-OpRHSConst->getAPIntValue(), DL, VT));
// Another special case: If C was a sign bit, the sub has been
// canonicalized into a xor.
@@ -21271,7 +21304,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// don't rely on particular values of undef lanes.
return DAG.getNode(
X86ISD::SUBUS, DL, VT, OpLHS,
- DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
+ DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT));
}
}
}
@@ -21665,7 +21698,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
// Extra check as FCMOV only supports a subset of X86 cond.
(FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
SDValue Ops[] = { FalseOp, TrueOp,
- DAG.getConstant(CC, MVT::i8), Flags };
+ DAG.getConstant(CC, DL, MVT::i8), Flags };
return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
}
@@ -21687,14 +21720,14 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
// shift amount.
if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
- DAG.getConstant(CC, MVT::i8), Cond);
+ DAG.getConstant(CC, DL, MVT::i8), Cond);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
unsigned ShAmt = TrueC->getAPIntValue().logBase2();
Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
- DAG.getConstant(ShAmt, MVT::i8));
+ DAG.getConstant(ShAmt, DL, MVT::i8));
if (N->getNumValues() == 2) // Dead flag value?
return DCI.CombineTo(N, Cond, SDValue());
return Cond;
@@ -21704,7 +21737,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
// for any integer data type, including i8/i16.
if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
- DAG.getConstant(CC, MVT::i8), Cond);
+ DAG.getConstant(CC, DL, MVT::i8), Cond);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
@@ -21742,14 +21775,14 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
if (isFastMultiplier) {
APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
- DAG.getConstant(CC, MVT::i8), Cond);
+ DAG.getConstant(CC, DL, MVT::i8), Cond);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
Cond);
// Scale the condition by the difference.
if (Diff != 1)
Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
- DAG.getConstant(Diff, Cond.getValueType()));
+ DAG.getConstant(Diff, DL, Cond.getValueType()));
// Add the base if non-zero.
if (FalseC->getAPIntValue() != 0)
@@ -21795,7 +21828,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
if (CC == X86::COND_E &&
CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
SDValue Ops[] = { FalseOp, Cond.getOperand(0),
- DAG.getConstant(CC, MVT::i8), Cond };
+ DAG.getConstant(CC, DL, MVT::i8), Cond };
return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
}
}
@@ -21829,10 +21862,10 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
CC1 = X86::GetOppositeBranchCondition(CC1);
}
- SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, MVT::i8),
+ SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
Flags};
SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), LOps);
- SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, MVT::i8), Flags};
+ SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(CMOV.getNode(), 1));
return CMOV;
@@ -21930,8 +21963,9 @@ static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
// Replace this packed shift intrinsic with a target independent
// shift dag node.
- SDValue Splat = DAG.getConstant(C, VT);
- return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
+ SDLoc DL(N);
+ SDValue Splat = DAG.getConstant(C, DL, VT);
+ return DAG.getNode(ISD::SRA, DL, VT, Op0, Splat);
}
}
}
@@ -21981,17 +22015,17 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
SDValue NewMul;
if (isPowerOf2_64(MulAmt1))
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
- DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
+ DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
else
NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
- DAG.getConstant(MulAmt1, VT));
+ DAG.getConstant(MulAmt1, DL, VT));
if (isPowerOf2_64(MulAmt2))
NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
- DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
+ DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
else
NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
- DAG.getConstant(MulAmt2, VT));
+ DAG.getConstant(MulAmt2, DL, VT));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, NewMul, false);
@@ -22018,9 +22052,11 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
APInt ShAmt = N1C->getAPIntValue();
Mask = Mask.shl(ShAmt);
- if (Mask != 0)
- return DAG.getNode(ISD::AND, SDLoc(N), VT,
- N00, DAG.getConstant(Mask, VT));
+ if (Mask != 0) {
+ SDLoc DL(N);
+ return DAG.getNode(ISD::AND, DL, VT,
+ N00, DAG.getConstant(Mask, DL, VT));
+ }
}
}
@@ -22150,7 +22186,8 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
if (Subtarget->hasAVX512()) {
SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
- CMP01, DAG.getConstant(x86cc, MVT::i8));
+ CMP01,
+ DAG.getConstant(x86cc, DL, MVT::i8));
if (N->getValueType(0) != MVT::i1)
return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
FSetCC);
@@ -22158,7 +22195,8 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
}
SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
CMP00.getValueType(), CMP00, CMP01,
- DAG.getConstant(x86cc, MVT::i8));
+ DAG.getConstant(x86cc, DL,
+ MVT::i8));
bool is64BitFP = (CMP00.getValueType() == MVT::f64);
MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
@@ -22174,14 +22212,16 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
Vector64);
OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
- Vector32, DAG.getIntPtrConstant(0));
+ Vector32, DAG.getIntPtrConstant(0, DL));
IntVT = MVT::i32;
}
- SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
+ SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT,
+ OnesOrZeroesF);
SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
- DAG.getConstant(1, IntVT));
- SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
+ DAG.getConstant(1, DL, IntVT));
+ SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
+ ANDed);
return OneBitOfTruth;
}
}
@@ -22293,7 +22333,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
APInt Mask = APInt::getAllOnesValue(InBits);
Mask = Mask.zext(VT.getScalarType().getSizeInBits());
return DAG.getNode(ISD::AND, DL, VT,
- Op, DAG.getConstant(Mask, VT));
+ Op, DAG.getConstant(Mask, DL, VT));
}
case ISD::SIGN_EXTEND:
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
@@ -22389,8 +22429,8 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
Mask.push_back(i / ZextRatio);
SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
- Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
- return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
+ Shuffle->getOperand(0), DAG.getConstant(0, DL, SrcType), Mask);
+ return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
}
static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
@@ -22425,7 +22465,8 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
uint64_t MaskSize = countPopulation(Mask);
if (Shift + MaskSize <= VT.getSizeInBits())
return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
- DAG.getConstant(Shift | (MaskSize << 8), VT));
+ DAG.getConstant(Shift | (MaskSize << 8), DL,
+ VT));
}
}
} // BEXTR
@@ -22640,10 +22681,10 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
// Generate SUB & CMOV.
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
- DAG.getConstant(0, VT), N0.getOperand(0));
+ DAG.getConstant(0, DL, VT), N0.getOperand(0));
SDValue Ops[] = { N0.getOperand(0), Neg,
- DAG.getConstant(X86::COND_GE, MVT::i8),
+ DAG.getConstant(X86::COND_GE, DL, MVT::i8),
SDValue(Neg.getNode(), 1) };
return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
}
@@ -22688,7 +22729,7 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue Ptr = Ld->getBasePtr();
- SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
+ SDValue Increment = DAG.getConstant(16, dl, TLI.getPointerTy());
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
NumElems/2);
@@ -22767,7 +22808,7 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
ShuffleVec[i] = NumElems*SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
- DAG.getConstant(0, WideVecVT),
+ DAG.getConstant(0, dl, WideVecVT),
&ShuffleVec[0]);
}
else {
@@ -22779,7 +22820,7 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
unsigned NumConcat = WidenNumElts / MaskNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
- SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
+ SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
Ops[0] = Mask;
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = ZeroVal;
@@ -22851,7 +22892,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
ShuffleVec[i] = NumElems*SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
- DAG.getConstant(0, WideVecVT),
+ DAG.getConstant(0, dl, WideVecVT),
&ShuffleVec[0]);
}
else {
@@ -22863,7 +22904,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
unsigned NumConcat = WidenNumElts / MaskNumElts;
SmallVector<SDValue, 16> Ops(NumConcat);
- SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
+ SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
Ops[0] = Mask;
for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = ZeroVal;
@@ -22897,7 +22938,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
- SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
+ SDValue Stride = DAG.getConstant(16, dl, TLI.getPointerTy());
SDValue Ptr0 = St->getBasePtr();
SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
@@ -22970,7 +23011,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
- SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
+ SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, dl,
TLI.getPointerTy());
SDValue Ptr = St->getBasePtr();
@@ -22978,7 +23019,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
StoreType, ShuffWide,
- DAG.getIntPtrConstant(i));
+ DAG.getIntPtrConstant(i, dl));
SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
St->getPointerInfo(), St->isVolatile(),
St->isNonTemporal(), St->getAlignment());
@@ -23062,7 +23103,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// Otherwise, lower to two pairs of 32-bit loads / stores.
SDValue LoAddr = Ld->getBasePtr();
SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
- DAG.getConstant(4, MVT::i32));
+ DAG.getConstant(4, LdDL, MVT::i32));
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
Ld->getPointerInfo(),
@@ -23083,7 +23124,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
LoAddr = St->getBasePtr();
HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
- DAG.getConstant(4, MVT::i32));
+ DAG.getConstant(4, StDL, MVT::i32));
SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
St->getPointerInfo(),
@@ -23507,7 +23548,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
N00.getOperand(0), N00.getOperand(1)),
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, dl, VT));
}
}
@@ -23519,7 +23560,7 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
N00.getOperand(0), N00.getOperand(1)),
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, dl, VT));
}
}
if (VT.is256BitVector()) {
@@ -23558,18 +23599,18 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
- SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), LHS.getValueType(), RHS,
+ SDValue addV = DAG.getNode(ISD::ADD, DL, LHS.getValueType(), RHS,
LHS.getOperand(1));
- return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV,
- DAG.getConstant(0, addV.getValueType()), CC);
+ return DAG.getSetCC(DL, N->getValueType(0), addV,
+ DAG.getConstant(0, DL, addV.getValueType()), CC);
}
if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
- SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), RHS.getValueType(), LHS,
+ SDValue addV = DAG.getNode(ISD::ADD, DL, RHS.getValueType(), LHS,
RHS.getOperand(1));
- return DAG.getSetCC(SDLoc(N), N->getValueType(0), addV,
- DAG.getConstant(0, addV.getValueType()), CC);
+ return DAG.getSetCC(DL, N->getValueType(0), addV,
+ DAG.getConstant(0, DL, addV.getValueType()), CC);
}
if (VT.getScalarType() == MVT::i1 &&
@@ -23593,9 +23634,9 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
assert(VT == LHS.getOperand(0).getValueType() &&
"Uexpected operand type");
if (CC == ISD::SETGT)
- return DAG.getConstant(0, VT);
+ return DAG.getConstant(0, DL, VT);
if (CC == ISD::SETLE)
- return DAG.getConstant(1, VT);
+ return DAG.getConstant(1, DL, VT);
if (CC == ISD::SETEQ || CC == ISD::SETGE)
return DAG.getNOT(DL, LHS.getOperand(0), VT);
@@ -23616,7 +23657,8 @@ static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
SDValue Addr = Load->getOperand(1);
SDValue NewAddr = DAG.getNode(
ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
- DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
+ DAG.getConstant(Index * EVT.getStoreSize(), dl,
+ Addr.getSimpleValueType()));
SDValue NewLoad =
DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
@@ -23672,7 +23714,7 @@ static SDValue PerformBLENDICombine(SDNode *N, SelectionDAG &DAG) {
if (VT == MVT::v2f64)
if (auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(2)))
if (Mask->getZExtValue() == 2 && !isShuffleFoldableLoad(V0)) {
- SDValue NewMask = DAG.getConstant(1, MVT::i8);
+ SDValue NewMask = DAG.getConstant(1, DL, MVT::i8);
return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
}
@@ -23687,12 +23729,14 @@ static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
if (VT == MVT::i8)
return DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
- DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
- DAG.getConstant(1, VT));
+ DAG.getConstant(X86::COND_B, DL, MVT::i8),
+ EFLAGS),
+ DAG.getConstant(1, DL, VT));
assert (VT == MVT::i1 && "Unexpected type for SECCC node");
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
- DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
+ DAG.getConstant(X86::COND_B, DL, MVT::i8),
+ EFLAGS));
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
@@ -23731,7 +23775,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
if (Flags.getNode()) {
- SDValue Cond = DAG.getConstant(CC, MVT::i8);
+ SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
}
@@ -23753,7 +23797,7 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
if (Flags.getNode()) {
- SDValue Cond = DAG.getConstant(CC, MVT::i8);
+ SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
Flags);
}
@@ -23857,12 +23901,13 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
SDValue(N, 1).use_empty()) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
- SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
+ SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
- DAG.getConstant(X86::COND_B,MVT::i8),
+ DAG.getConstant(X86::COND_B, DL,
+ MVT::i8),
N->getOperand(2)),
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, DL, VT));
return DCI.CombineTo(N, Res1, CarryOut);
}
@@ -23897,16 +23942,17 @@ static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
SDValue CmpOp0 = Cmp.getOperand(0);
SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
- DAG.getConstant(1, CmpOp0.getValueType()));
+ DAG.getConstant(1, DL, CmpOp0.getValueType()));
SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
if (CC == X86::COND_NE)
return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
DL, OtherVal.getValueType(), OtherVal,
- DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
+ DAG.getConstant(-1ULL, DL, OtherVal.getValueType()),
+ NewCmp);
return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
DL, OtherVal.getValueType(), OtherVal,
- DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
+ DAG.getConstant(0, DL, OtherVal.getValueType()), NewCmp);
}
/// PerformADDCombine - Do target-specific dag combines on integer adds.
@@ -23942,9 +23988,9 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
EVT VT = Op0.getValueType();
SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
Op1.getOperand(0),
- DAG.getConstant(~XorC, VT));
+ DAG.getConstant(~XorC, SDLoc(Op1), VT));
return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
- DAG.getConstant(C->getAPIntValue()+1, VT));
+ DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
}
}
@@ -24014,7 +24060,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
OrigVT.getVectorNumElements() / Ratio);
OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
- DAG.getIntPtrConstant(0));
+ DAG.getIntPtrConstant(0, DL));
}
Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
@@ -24495,7 +24541,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'I':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 31) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24503,7 +24550,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'J':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 63) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24511,7 +24559,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'K':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (isInt<8>(C->getSExtValue())) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24520,7 +24569,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
(Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
- Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24528,7 +24578,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'M':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 3) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24536,7 +24587,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 255) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24544,7 +24596,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'O':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 127) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24555,7 +24608,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
- Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
+ Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
break;
}
// FIXME gcc accepts some relocatable values here too, but only in certain
@@ -24568,7 +24621,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
C->getZExtValue())) {
- Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
+ Op.getValueType());
break;
}
}
@@ -24580,7 +24634,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// Literal immediates are always ok.
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
// Widen to 64 bits here to get it sign extended.
- Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
+ Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64);
break;
}
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 45e6d0a7f4d..6abb035f688 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -17,12 +17,12 @@
def GetLo32XForm : SDNodeXForm<imm, [{
// Transformation function: get the low 32 bits.
- return getI32Imm((unsigned)N->getZExtValue());
+ return getI32Imm((unsigned)N->getZExtValue(), SDLoc(N));
}]>;
def GetLo8XForm : SDNodeXForm<imm, [{
// Transformation function: get the low 8 bits.
- return getI8Imm((uint8_t)N->getZExtValue());
+ return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
}]>;
diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 5094aa459b9..b53a8f83b30 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -566,7 +566,7 @@ def fp32imm0 : PatLeaf<(f32 fpimm), [{
def I8Imm : SDNodeXForm<imm, [{
// Transformation function: get the low 8 bits.
- return getI8Imm((uint8_t)N->getZExtValue());
+ return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
}]>;
def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>;
@@ -577,31 +577,31 @@ def FROUND_CURRENT : ImmLeaf<i32, [{
// BYTE_imm - Transform bit immediates into byte immediates.
def BYTE_imm : SDNodeXForm<imm, [{
// Transformation function: imm >> 3
- return getI32Imm(N->getZExtValue() >> 3);
+ return getI32Imm(N->getZExtValue() >> 3, SDLoc(N));
}]>;
// EXTRACT_get_vextract128_imm xform function: convert extract_subvector index
// to VEXTRACTF128/VEXTRACTI128 imm.
def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{
- return getI8Imm(X86::getExtractVEXTRACT128Immediate(N));
+ return getI8Imm(X86::getExtractVEXTRACT128Immediate(N), SDLoc(N));
}]>;
// INSERT_get_vinsert128_imm xform function: convert insert_subvector index to
// VINSERTF128/VINSERTI128 imm.
def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
- return getI8Imm(X86::getInsertVINSERT128Immediate(N));
+ return getI8Imm(X86::getInsertVINSERT128Immediate(N), SDLoc(N));
}]>;
// EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
// to VEXTRACTF64x4 imm.
def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{
- return getI8Imm(X86::getExtractVEXTRACT256Immediate(N));
+ return getI8Imm(X86::getExtractVEXTRACT256Immediate(N), SDLoc(N));
}]>;
// INSERT_get_vinsert256_imm xform function: convert insert_subvector index to
// VINSERTF64x4 imm.
def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{
- return getI8Imm(X86::getInsertVINSERT256Immediate(N));
+ return getI8Imm(X86::getInsertVINSERT256Immediate(N), SDLoc(N));
}]>;
def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index),
diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td
index e9a04318be3..8e270bf2266 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/llvm/lib/Target/X86/X86InstrInfo.td
@@ -2223,7 +2223,7 @@ let Predicates = [HasBMI2], Defs = [EFLAGS] in {
def CountTrailingOnes : SDNodeXForm<imm, [{
// Count the trailing ones in the immediate.
- return getI8Imm(countTrailingOnes(N->getZExtValue()));
+ return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
}]>;
def BZHIMask : ImmLeaf<i64, [{
diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td
index c706d43c9f5..caecf7001ef 100644
--- a/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -850,12 +850,12 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
def ROT32L2R_imm8 : SDNodeXForm<imm, [{
// Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
- return getI8Imm(32 - N->getZExtValue());
+ return getI8Imm(32 - N->getZExtValue(), SDLoc(N));
}]>;
def ROT64L2R_imm8 : SDNodeXForm<imm, [{
// Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
- return getI8Imm(64 - N->getZExtValue());
+ return getI8Imm(64 - N->getZExtValue(), SDLoc(N));
}]>;
multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 4bfc7f9861d..5ca40bc0091 100644
--- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -138,22 +138,22 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
default: // Byte aligned
AVT = MVT::i8;
ValReg = X86::AL;
- Count = DAG.getIntPtrConstant(SizeVal);
+ Count = DAG.getIntPtrConstant(SizeVal, dl);
break;
}
if (AVT.bitsGT(MVT::i8)) {
unsigned UBytes = AVT.getSizeInBits() / 8;
- Count = DAG.getIntPtrConstant(SizeVal / UBytes);
+ Count = DAG.getIntPtrConstant(SizeVal / UBytes, dl);
BytesLeft = SizeVal % UBytes;
}
- Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, AVT),
+ Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, dl, AVT),
InFlag);
InFlag = Chain.getValue(1);
} else {
AVT = MVT::i8;
- Count = DAG.getIntPtrConstant(SizeVal);
+ Count = DAG.getIntPtrConstant(SizeVal, dl);
Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Src, InFlag);
InFlag = Chain.getValue(1);
}
@@ -174,7 +174,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
Count = Size;
EVT CVT = Count.getValueType();
SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
- DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
+ DAG.getConstant((AVT == MVT::i64) ? 7 : 3, dl,
+ CVT));
Chain = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
X86::ECX,
Left, InFlag);
@@ -190,9 +191,9 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
Chain = DAG.getMemset(Chain, dl,
DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
- DAG.getConstant(Offset, AddrVT)),
+ DAG.getConstant(Offset, dl, AddrVT)),
Src,
- DAG.getConstant(BytesLeft, SizeVT),
+ DAG.getConstant(BytesLeft, dl, SizeVT),
Align, isVolatile, false,
DstPtrInfo.getWithOffset(Offset));
}
@@ -248,7 +249,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
unsigned UBytes = AVT.getSizeInBits() / 8;
unsigned CountVal = SizeVal / UBytes;
- SDValue Count = DAG.getIntPtrConstant(CountVal);
+ SDValue Count = DAG.getIntPtrConstant(CountVal, dl);
unsigned BytesLeft = SizeVal % UBytes;
SDValue InFlag;
@@ -279,10 +280,12 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
EVT SizeVT = Size.getValueType();
Results.push_back(DAG.getMemcpy(Chain, dl,
DAG.getNode(ISD::ADD, dl, DstVT, Dst,
- DAG.getConstant(Offset, DstVT)),
+ DAG.getConstant(Offset, dl,
+ DstVT)),
DAG.getNode(ISD::ADD, dl, SrcVT, Src,
- DAG.getConstant(Offset, SrcVT)),
- DAG.getConstant(BytesLeft, SizeVT),
+ DAG.getConstant(Offset, dl,
+ SrcVT)),
+ DAG.getConstant(BytesLeft, dl, SizeVT),
Align, isVolatile, AlwaysInline, false,
DstPtrInfo.getWithOffset(Offset),
SrcPtrInfo.getWithOffset(Offset)));
diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 5c7ea5e3640..f5b180b1ac0 100644
--- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -47,8 +47,8 @@ namespace {
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
inline bool immMskBitp(SDNode *inN) const {
@@ -90,7 +90,7 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
FrameIndexSDNode *FIN = nullptr;
if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::ADD) {
@@ -100,7 +100,8 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
&& (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
// Constant positive word offset from frame index
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(CN->getSExtValue(), SDLoc(Addr),
+ MVT::i32);
return true;
}
}
@@ -138,7 +139,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
- SDValue MskSize = getI32Imm(32 - countLeadingZeros((uint32_t)Val));
+ SDValue MskSize = getI32Imm(32 - countLeadingZeros((uint32_t)Val), dl);
return CurDAG->getMachineNode(XCore::MKMSK_rus, dl,
MVT::i32, MskSize);
}
@@ -256,7 +257,7 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
// after with clrsr 1. If any resources owned by the thread are ready an event
// will be taken. If no resource is ready we branch to the address which was
// the operand to the checkevent intrinsic.
- SDValue constOne = getI32Imm(1);
+ SDValue constOne = getI32Imm(1, dl);
SDValue Glue =
SDValue(CurDAG->getMachineNode(XCore::SETSR_branch_u6, dl, MVT::Glue,
constOne, Chain), 0);
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index c4e3bb8da6d..cf1822d0938 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -298,7 +298,7 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
GA = getGlobalAddressWrapper(GA, GV, DAG);
// Handle the rest of the offset.
if (Offset != FoldedOffset) {
- SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32);
+ SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
}
return GA;
@@ -368,7 +368,7 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
}
assert((NumEntries >> 31) == 0);
SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, dl, MVT::i32));
return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
ScaledIndex);
}
@@ -393,12 +393,12 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
HighOffset);
} else {
LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
- DAG.getConstant(LowOffset, MVT::i32));
+ DAG.getConstant(LowOffset, DL, MVT::i32));
HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
- DAG.getConstant(HighOffset, MVT::i32));
+ DAG.getConstant(HighOffset, DL, MVT::i32));
}
- SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32);
- SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32);
+ SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
+ SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
LowAddr, MachinePointerInfo(),
@@ -469,14 +469,14 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LD->isVolatile(), LD->isNonTemporal(),
LD->isInvariant(), 2);
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, DL, MVT::i32));
SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
HighAddr,
LD->getPointerInfo().getWithOffset(2),
MVT::i16, LD->isVolatile(),
LD->isNonTemporal(), LD->isInvariant(), 2);
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(16, DL, MVT::i32));
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
High.getValue(1));
@@ -529,13 +529,13 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
if (ST->getAlignment() == 2) {
SDValue Low = Value;
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(16, dl, MVT::i32));
SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
ST->getPointerInfo(), MVT::i16,
ST->isVolatile(), ST->isNonTemporal(),
2);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, dl, MVT::i32));
SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
ST->getPointerInfo().getWithOffset(2),
MVT::i16, ST->isVolatile(),
@@ -573,7 +573,7 @@ LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
LHS, RHS);
@@ -590,7 +590,7 @@ LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
Zero, Zero);
@@ -675,13 +675,13 @@ TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
SDLoc dl(N);
SDValue LL, RL, AddendL, AddendH;
LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
+ Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
+ Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Other, DAG.getConstant(0, MVT::i32));
+ Other, DAG.getConstant(0, dl, MVT::i32));
AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Other, DAG.getConstant(1, MVT::i32));
+ Other, DAG.getConstant(1, dl, MVT::i32));
APInt HighMask = APInt::getHighBitsSet(64, 32);
unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
@@ -704,9 +704,9 @@ TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
}
SDValue LH, RH;
LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
+ Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
+ Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
DAG.getVTList(MVT::i32, MVT::i32), AddendH,
AddendL, LL, RL);
@@ -735,18 +735,22 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
// Extract components
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- N->getOperand(0), DAG.getConstant(0, MVT::i32));
+ N->getOperand(0),
+ DAG.getConstant(0, dl, MVT::i32));
SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- N->getOperand(0), DAG.getConstant(1, MVT::i32));
+ N->getOperand(0),
+ DAG.getConstant(1, dl, MVT::i32));
SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- N->getOperand(1), DAG.getConstant(0, MVT::i32));
+ N->getOperand(1),
+ DAG.getConstant(0, dl, MVT::i32));
SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- N->getOperand(1), DAG.getConstant(1, MVT::i32));
+ N->getOperand(1),
+ DAG.getConstant(1, dl, MVT::i32));
// Expand
unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
XCoreISD::LSUB;
- SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSL, RHSL, Zero);
SDValue Carry(Lo.getNode(), 1);
@@ -775,7 +779,8 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const
false, false, false, 0);
// Increment the pointer, VAList, to the next vararg
SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
- DAG.getIntPtrConstant(VT.getSizeInBits() / 8));
+ DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
+ dl));
// Store the incremented VAList to the legalized pointer
InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
MachinePointerInfo(SV), false, false, 0);
@@ -910,30 +915,30 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
SDValue Addr = Trmp;
SDLoc dl(Op);
- OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
- Addr, MachinePointerInfo(TrmpAddr), false, false,
- 0);
+ OutChains[0] = DAG.getStore(Chain, dl,
+ DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr), false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(4, MVT::i32));
- OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
- Addr, MachinePointerInfo(TrmpAddr, 4), false,
- false, 0);
+ DAG.getConstant(4, dl, MVT::i32));
+ OutChains[1] = DAG.getStore(Chain, dl,
+ DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr, 4), false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(8, MVT::i32));
- OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
- Addr, MachinePointerInfo(TrmpAddr, 8), false,
- false, 0);
+ DAG.getConstant(8, dl, MVT::i32));
+ OutChains[2] = DAG.getStore(Chain, dl,
+ DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr, 8), false, false, 0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(12, MVT::i32));
+ DAG.getConstant(12, dl, MVT::i32));
OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
MachinePointerInfo(TrmpAddr, 12), false, false,
0);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
- DAG.getConstant(16, MVT::i32));
+ DAG.getConstant(16, dl, MVT::i32));
OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
MachinePointerInfo(TrmpAddr, 16), false, false,
0);
@@ -1096,7 +1101,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag,
int offset = ResultMemLocs[i].first;
unsigned index = ResultMemLocs[i].second;
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
- SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, MVT::i32) };
+ SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
InVals[index] = load;
MemOpChains.push_back(load.getValue(1));
@@ -1145,7 +1150,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = RetCCInfo.getNextStackOffset();
- Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
+ Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, dl,
getPointerTy(), true), dl);
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
@@ -1182,7 +1187,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
Chain, Arg,
- DAG.getConstant(Offset/4, MVT::i32)));
+ DAG.getConstant(Offset/4, dl,
+ MVT::i32)));
}
}
@@ -1233,8 +1239,9 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, getPointerTy(), true),
- DAG.getConstant(0, getPointerTy(), true),
+ DAG.getConstant(NumBytes, dl, getPointerTy(),
+ true),
+ DAG.getConstant(0, dl, getPointerTy(), true),
InFlag, dl);
InFlag = Chain.getValue(1);
@@ -1422,7 +1429,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(FIN);
MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
- DAG.getConstant(Size, MVT::i32),
+ DAG.getConstant(Size, dl, MVT::i32),
Align, false, false, false,
MachinePointerInfo(),
MachinePointerInfo()));
@@ -1487,7 +1494,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
SmallVector<SDValue, 4> RetOps(1, Chain);
// Return on XCore is always a "retsp 0"
- RetOps.push_back(DAG.getConstant(0, MVT::i32));
+ RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
SmallVector<SDValue, 4> MemOpChains;
// Handle return values that must be copied to memory.
@@ -1671,9 +1678,9 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// fold (ladd 0, 0, x) -> 0, x & 1
if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
- SDValue Carry = DAG.getConstant(0, VT);
+ SDValue Carry = DAG.getConstant(0, dl, VT);
SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
- DAG.getConstant(1, VT));
+ DAG.getConstant(1, dl, VT));
SDValue Ops[] = { Result, Carry };
return DAG.getMergeValues(Ops, dl);
}
@@ -1686,7 +1693,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
VT.getSizeInBits() - 1);
DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
- SDValue Carry = DAG.getConstant(0, VT);
+ SDValue Carry = DAG.getConstant(0, dl, VT);
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
SDValue Ops[] = { Result, Carry };
return DAG.getMergeValues(Ops, dl);
@@ -1711,7 +1718,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
if ((KnownZero & Mask) == Mask) {
SDValue Borrow = N2;
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
- DAG.getConstant(0, VT), N2);
+ DAG.getConstant(0, dl, VT), N2);
SDValue Ops[] = { Result, Borrow };
return DAG.getMergeValues(Ops, dl);
}
@@ -1725,7 +1732,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
VT.getSizeInBits() - 1);
DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
- SDValue Borrow = DAG.getConstant(0, VT);
+ SDValue Borrow = DAG.getConstant(0, dl, VT);
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
SDValue Ops[] = { Result, Borrow };
return DAG.getMergeValues(Ops, dl);
@@ -1791,13 +1798,13 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
DAG.MaskedValueIsZero(Addend0, HighMask) &&
DAG.MaskedValueIsZero(Addend1, HighMask)) {
SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Mul0, DAG.getConstant(0, MVT::i32));
+ Mul0, DAG.getConstant(0, dl, MVT::i32));
SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Mul1, DAG.getConstant(0, MVT::i32));
+ Mul1, DAG.getConstant(0, dl, MVT::i32));
SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Addend0, DAG.getConstant(0, MVT::i32));
+ Addend0, DAG.getConstant(0, dl, MVT::i32));
SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Addend1, DAG.getConstant(0, MVT::i32));
+ Addend1, DAG.getConstant(0, dl, MVT::i32));
SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
Addend0L, Addend1L);
@@ -1837,7 +1844,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
bool isTail = isInTailCallPosition(DAG, ST, Chain);
return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
LD->getBasePtr(),
- DAG.getConstant(StoreBits/8, MVT::i32),
+ DAG.getConstant(StoreBits/8, dl, MVT::i32),
Alignment, false, isTail, ST->getPointerInfo(),
LD->getPointerInfo());
}
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.td b/llvm/lib/Target/XCore/XCoreInstrInfo.td
index 8e9bb452560..8110b918c22 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -94,33 +94,34 @@ def XCoreMemBarrier : SDNode<"XCoreISD::MEMBARRIER", SDT_XCoreMEMBARRIER,
def div4_xform : SDNodeXForm<imm, [{
// Transformation function: imm/4
assert(N->getZExtValue() % 4 == 0);
- return getI32Imm(N->getZExtValue()/4);
+ return getI32Imm(N->getZExtValue()/4, SDLoc(N));
}]>;
def msksize_xform : SDNodeXForm<imm, [{
// Transformation function: get the size of a mask
assert(isMask_32(N->getZExtValue()));
// look for the first non-zero bit
- return getI32Imm(32 - countLeadingZeros((uint32_t)N->getZExtValue()));
+ return getI32Imm(32 - countLeadingZeros((uint32_t)N->getZExtValue()),
+ SDLoc(N));
}]>;
def neg_xform : SDNodeXForm<imm, [{
// Transformation function: -imm
uint32_t value = N->getZExtValue();
- return getI32Imm(-value);
+ return getI32Imm(-value, SDLoc(N));
}]>;
def bpwsub_xform : SDNodeXForm<imm, [{
// Transformation function: 32-imm
uint32_t value = N->getZExtValue();
- return getI32Imm(32-value);
+ return getI32Imm(32 - value, SDLoc(N));
}]>;
def div4neg_xform : SDNodeXForm<imm, [{
// Transformation function: -imm/4
uint32_t value = N->getZExtValue();
assert(-value % 4 == 0);
- return getI32Imm(-value/4);
+ return getI32Imm(-value/4, SDLoc(N));
}]>;
def immUs4Neg : PatLeaf<(imm), [{
OpenPOWER on IntegriCloud