summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td37
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h25
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp87
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td4
6 files changed, 127 insertions, 39 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 88d419046b9..c2d2a0b768f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -238,6 +238,36 @@ def FeatureSDWA : SubtargetFeature<"sdwa",
"Support SDWA (Sub-DWORD Addressing) extension"
>;
+def FeatureSDWAOmod : SubtargetFeature<"sdwa-omod",
+ "HasSDWAOmod",
+ "true",
+ "Support OMod with SDWA (Sub-DWORD Addressing) extension"
+>;
+
+def FeatureSDWAScalar : SubtargetFeature<"sdwa-scalar",
+ "HasSDWAScalar",
+ "true",
+ "Support scalar register with SDWA (Sub-DWORD Addressing) extension"
+>;
+
+def FeatureSDWASdst : SubtargetFeature<"sdwa-sdst",
+ "HasSDWASdst",
+ "true",
+ "Support scalar dst for VOPC with SDWA (Sub-DWORD Addressing) extension"
+>;
+
+def FeatureSDWAMac : SubtargetFeature<"sdwa-mav",
+ "HasSDWAMac",
+ "true",
+ "Support v_mac_f32/f16 with SDWA (Sub-DWORD Addressing) extension"
+>;
+
+def FeatureSDWAClampVOPC : SubtargetFeature<"sdwa-clamp-vopc",
+ "HasSDWAClampVOPC",
+ "true",
+ "Support clamp for VOPC with SDWA (Sub-DWORD Addressing) extension"
+>;
+
def FeatureDPP : SubtargetFeature<"dpp",
"HasDPP",
"true",
@@ -421,8 +451,8 @@ def FeatureVolcanicIslands : SubtargetFeatureGeneration<"VOLCANIC_ISLANDS",
FeatureWavefrontSize64, FeatureFlatAddressSpace, FeatureGCN,
FeatureGCN3Encoding, FeatureCIInsts, Feature16BitInsts,
FeatureSMemRealTime, FeatureVGPRIndexMode, FeatureMovrel,
- FeatureScalarStores, FeatureInv2PiInlineImm, FeatureSDWA,
- FeatureDPP
+ FeatureScalarStores, FeatureInv2PiInlineImm,
+ FeatureSDWA, FeatureSDWAClampVOPC, FeatureSDWAMac, FeatureDPP
]
>;
@@ -432,7 +462,8 @@ def FeatureGFX9 : SubtargetFeatureGeneration<"GFX9",
FeatureGCN3Encoding, FeatureCIInsts, Feature16BitInsts,
FeatureSMemRealTime, FeatureScalarStores, FeatureInv2PiInlineImm,
FeatureApertureRegs, FeatureGFX9Insts, FeatureVOP3P, FeatureVGPRIndexMode,
- FeatureFastFMAF32, FeatureSDWA, FeatureDPP,
+ FeatureFastFMAF32, FeatureDPP,
+ FeatureSDWA, FeatureSDWAOmod, FeatureSDWAScalar, FeatureSDWASdst,
FeatureFlatInstOffsets, FeatureFlatGlobalInsts, FeatureFlatScratchInsts
]
>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 8d157e2f98f..ab5abf2039a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -124,6 +124,11 @@ AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
HasScalarStores(false),
HasInv2PiInlineImm(false),
HasSDWA(false),
+ HasSDWAOmod(false),
+ HasSDWAScalar(false),
+ HasSDWASdst(false),
+ HasSDWAMac(false),
+ HasSDWAClampVOPC(false),
HasDPP(false),
FlatAddressSpace(false),
FlatInstOffsets(false),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 5f4f20316a6..2b16289c723 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -149,6 +149,11 @@ protected:
bool HasScalarStores;
bool HasInv2PiInlineImm;
bool HasSDWA;
+ bool HasSDWAOmod;
+ bool HasSDWAScalar;
+ bool HasSDWASdst;
+ bool HasSDWAMac;
+ bool HasSDWAClampVOPC;
bool HasDPP;
bool FlatAddressSpace;
bool FlatInstOffsets;
@@ -431,6 +436,26 @@ public:
return HasSDWA;
}
+ bool hasSDWAOmod() const {
+ return HasSDWAOmod;
+ }
+
+ bool hasSDWAScalar() const {
+ return HasSDWAScalar;
+ }
+
+ bool hasSDWASdst() const {
+ return HasSDWASdst;
+ }
+
+ bool hasSDWAMac() const {
+ return HasSDWAMac;
+ }
+
+ bool hasSDWAClampVOPC() const {
+ return HasSDWAClampVOPC;
+ }
+
/// \brief Returns the offset in bytes from the start of the input buffer
/// of the first explicit kernel argument.
unsigned getExplicitKernelArgOffset(const MachineFunction &MF) const {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 35c1c3ed3f5..c9b48fea722 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2454,7 +2454,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
continue;
const MachineOperand &MO = MI.getOperand(OpIdx);
- if (AMDGPU::isVI(ST)) {
+ if (!ST.hasSDWAScalar()) {
// Only VGPRS on VI
if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
@@ -2469,7 +2469,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
}
}
- if (AMDGPU::isVI(ST)) {
+ if (!ST.hasSDWAOmod()) {
// No omod allowed on VI
const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
if (OMod != nullptr &&
@@ -2481,14 +2481,14 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
if (isVOPC(BasicOpcode)) {
- if (AMDGPU::isVI(ST) && DstIdx != -1) {
+ if (!ST.hasSDWASdst() && DstIdx != -1) {
// Only vcc allowed as dst on VI for VOPC
const MachineOperand &Dst = MI.getOperand(DstIdx);
if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
return false;
}
- } else if (AMDGPU::isGFX9(ST)) {
+ } else if (!ST.hasSDWAClampVOPC()) {
// No clamp allowed on GFX9 for VOPC
const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
if (Clamp != nullptr &&
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index e756c86e35d..4ac23ef03cb 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -67,9 +67,9 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
void matchSDWAOperands(MachineFunction &MF);
- bool isConvertibleToSDWA(const MachineInstr &MI) const;
+ bool isConvertibleToSDWA(const MachineInstr &MI, const SISubtarget &ST) const;
bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
- void legalizeScalarOperands(MachineInstr &MI) const;
+ void legalizeScalarOperands(MachineInstr &MI, const SISubtarget &ST) const;
StringRef getPassName() const override { return "SI Peephole SDWA"; }
@@ -607,24 +607,38 @@ void SIPeepholeSDWA::matchSDWAOperands(MachineFunction &MF) {
}
}
-bool SIPeepholeSDWA::isConvertibleToSDWA(const MachineInstr &MI) const {
+bool SIPeepholeSDWA::isConvertibleToSDWA(const MachineInstr &MI,
+ const SISubtarget &ST) const {
// Check if this instruction has opcode that supports SDWA
- unsigned Opc = MI.getOpcode();
- if (AMDGPU::getSDWAOp(Opc) != -1)
- return true;
- int Opc32 = AMDGPU::getVOPe32(Opc);
- if (Opc32 != -1 && AMDGPU::getSDWAOp(Opc32) != -1) {
- if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
- return false;
+ int Opc = MI.getOpcode();
+ if (AMDGPU::getSDWAOp(Opc) == -1)
+ Opc = AMDGPU::getVOPe32(Opc);
+
+ if (Opc == -1 || AMDGPU::getSDWAOp(Opc) == -1)
+ return false;
+
+ if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
+ return false;
- if (TII->isVOPC(Opc)) {
+ if (TII->isVOPC(Opc)) {
+ if (!ST.hasSDWASdst()) {
const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
- return SDst && SDst->getReg() == AMDGPU::VCC;
- } else {
- return !TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
+ if (SDst && SDst->getReg() != AMDGPU::VCC)
+ return false;
}
+
+ if (!ST.hasSDWAClampVOPC() && TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
+ return false;
+
+ } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) {
+ return false;
}
- return false;
+
+ if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_MAC_F16_e32 ||
+ Opc == AMDGPU::V_MAC_F32_e32))
+ return false;
+
+ return true;
}
bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
@@ -690,13 +704,23 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
SDWAInst.add(*Src2);
}
- // Initialize clamp.
- if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1)
+ // Copy clamp if present, initialize otherwise
+ assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1);
+ MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
+ if (Clamp) {
+ SDWAInst.add(*Clamp);
+ } else {
SDWAInst.addImm(0);
+ }
- // Initialize omod.
- if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1)
+ // Copy omod if present, initialize otherwise if needed
+ MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
+ if (OMod) {
+ assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1);
+ SDWAInst.add(*OMod);
+ } else if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) {
SDWAInst.addImm(0);
+ }
// Initialize dst_sel and dst_unused if present
if (Dst) {
@@ -750,16 +774,25 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
}
// If an instruction was converted to SDWA it should not have immediates or SGPR
-// operands. Copy its scalar operands into VGPRs.
-void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI) const {
+// operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
+void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, const SISubtarget &ST) const {
const MCInstrDesc &Desc = TII->get(MI.getOpcode());
- for (unsigned I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
- MachineOperand &Op = MI.getOperand(I);
+ unsigned ConstantBusCount = 0;
+ for (MachineOperand &Op: MI.explicit_uses()) {
if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
continue;
+
+ unsigned I = MI.getOperandNo(&Op);
if (Desc.OpInfo[I].RegClass == -1 ||
!TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
continue;
+
+ if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
+ TRI->isSGPRReg(*MRI, Op.getReg())) {
+ ++ConstantBusCount;
+ continue;
+ }
+
unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
@@ -775,10 +808,8 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI) const {
bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- if (!ST.hasSDWA() ||
- !AMDGPU::isVI(ST)) { // TODO: Add support for SDWA on gfx9
+ if (!ST.hasSDWA())
return false;
- }
MRI = &MF.getRegInfo();
TRI = ST.getRegisterInfo();
@@ -790,7 +821,7 @@ bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
for (const auto &OperandPair : SDWAOperands) {
const auto &Operand = OperandPair.second;
MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
- if (PotentialMI && isConvertibleToSDWA(*PotentialMI)) {
+ if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
PotentialMatches[PotentialMI].push_back(Operand.get());
}
}
@@ -805,7 +836,7 @@ bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
bool Ret = !ConvertedInstructions.empty();
while (!ConvertedInstructions.empty())
- legalizeScalarOperands(*ConvertedInstructions.pop_back_val());
+ legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);
return Ret;
}
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 2a7747c28d7..e386f21c2ba 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -401,10 +401,6 @@ class VOP_SDWA_Real <VOP_SDWA_Pseudo ps> :
let Constraints = ps.Constraints;
let DisableEncoding = ps.DisableEncoding;
- // string Mnemonic = ps.Mnemonic;
- // string AsmOperands = ps.AsmOperands;
- // string AsmOperands9 = ps.AsmOperands9;
-
// Copy relevant pseudo op flags
let SubtargetPredicate = ps.SubtargetPredicate;
let AssemblerPredicate = ps.AssemblerPredicate;
OpenPOWER on IntegriCloud