diff options
| author | Valery Pykhtin <Valery.Pykhtin@amd.com> | 2019-02-08 11:59:48 +0000 |
|---|---|---|
| committer | Valery Pykhtin <Valery.Pykhtin@amd.com> | 2019-02-08 11:59:48 +0000 |
| commit | 7fe97f8c7ca1c6dc380d7499b008808590450efb (patch) | |
| tree | ba7440a5af933f6142e607993f082d0e71e65384 /llvm/lib | |
| parent | 08dc50f2fb824dbfb909e9a9babb0f5cdbfe69c2 (diff) | |
| download | bcm5719-llvm-7fe97f8c7ca1c6dc380d7499b008808590450efb.tar.gz bcm5719-llvm-7fe97f8c7ca1c6dc380d7499b008808590450efb.zip | |
[AMDGPU] Fix DPP combiner
Differential revision: https://reviews.llvm.org/D55444
dpp move with uses and old reg initializer should be in the same BB.
bound_ctrl:0 is only considered when bank_mask and row_mask are fully enabled (0xF). Otherwise the old register value is checked for identity.
Added add, subrev, and, or instructions to the old folding function.
Kill flag is cleared for the src0 (DPP register) as it may be copied into more than one user.
The pass is still disabled by default.
llvm-svn: 353513
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp | 239 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 26 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.h | 6 |
3 files changed, 175 insertions, 96 deletions
diff --git a/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp b/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp index a77bb8afb19..430cf480eb1 100644 --- a/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp +++ b/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp @@ -6,31 +6,35 @@ // //===----------------------------------------------------------------------===// // The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0 -// operand.If any of the use instruction cannot be combined with the mov the +// operand. If any of the use instruction cannot be combined with the mov the // whole sequence is reverted. // // $old = ... // $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane, -// dpp_controls..., $bound_ctrl -// $res = VALU $dpp_value, ... +// dpp_controls..., $row_mask, $bank_mask, $bound_ctrl +// $res = VALU $dpp_value [, src1] // // to // -// $res = VALU_DPP $folded_old, $vgpr_to_be_read_from_other_lane, ..., -// dpp_controls..., $folded_bound_ctrl +// $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,] +// dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl // // Combining rules : // -// $bound_ctrl is DPP_BOUND_ZERO, $old is any -// $bound_ctrl is DPP_BOUND_OFF, $old is 0 +// if $row_mask and $bank_mask are fully enabled (0xF) and +// $bound_ctrl==DPP_BOUND_ZERO or $old==0 +// -> $combined_old = undef, +// $combined_bound_ctrl = DPP_BOUND_ZERO // -// ->$folded_old = undef, $folded_bound_ctrl = DPP_BOUND_ZERO -// $bound_ctrl is DPP_BOUND_OFF, $old is undef +// if the VALU op is binary and +// $bound_ctrl==DPP_BOUND_OFF and +// $old==identity value (immediate) for the VALU op +// -> $combined_old = src1, +// $combined_bound_ctrl = DPP_BOUND_OFF // -// ->$folded_old = undef, $folded_bound_ctrl = DPP_BOUND_OFF -// $bound_ctrl is DPP_BOUND_OFF, $old is foldable +// Othervise cancel. // -// ->$folded_old = folded value, $folded_bound_ctrl = DPP_BOUND_OFF +// The mov_dpp instruction should recide in the same BB as all it's uses //===----------------------------------------------------------------------===// #include "AMDGPU.h" @@ -66,20 +70,16 @@ class GCNDPPCombine : public MachineFunctionPass { MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const; - RegSubRegPair foldOldOpnd(MachineInstr &OrigMI, - RegSubRegPair OldOpndVGPR, - MachineOperand &OldOpndValue) const; - MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI, - RegSubRegPair OldOpndVGPR, + RegSubRegPair CombOldVGPR, MachineOperand *OldOpnd, - bool BoundCtrlZero) const; + bool CombBCZ) const; MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI, - RegSubRegPair OldOpndVGPR, - bool BoundCtrlZero) const; + RegSubRegPair CombOldVGPR, + bool CombBCZ) const; bool hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName, @@ -152,8 +152,8 @@ MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const { MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI, - RegSubRegPair OldOpndVGPR, - bool BoundCtrlZero) const { + RegSubRegPair CombOldVGPR, + bool CombBCZ) const { assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp); assert(TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg() == TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0)->getReg()); @@ -177,9 +177,15 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old); if (OldIdx != -1) { assert(OldIdx == NumOperands); - assert(isOfRegClass(OldOpndVGPR, AMDGPU::VGPR_32RegClass, *MRI)); - DPPInst.addReg(OldOpndVGPR.Reg, 0, OldOpndVGPR.SubReg); + assert(isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)); + DPPInst.addReg(CombOldVGPR.Reg, 0, CombOldVGPR.SubReg); ++NumOperands; + } else { + // TODO: this discards MAC/FMA instructions for now, let's add it later + LLVM_DEBUG(dbgs() << " failed: no old operand in DPP instruction," + " TBD\n"); + Fail = true; + break; } if (auto *Mod0 = TII->getNamedOperand(OrigMI, @@ -198,6 +204,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, break; } DPPInst.add(*Src0); + DPPInst->getOperand(NumOperands).setIsKill(false); ++NumOperands; if (auto *Mod1 = TII->getNamedOperand(OrigMI, @@ -230,7 +237,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl)); DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask)); DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask)); - DPPInst.addImm(BoundCtrlZero ? 1 : 0); + DPPInst.addImm(CombBCZ ? 1 : 0); } while (false); if (Fail) { @@ -241,64 +248,68 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, return DPPInst.getInstr(); } -GCNDPPCombine::RegSubRegPair -GCNDPPCombine::foldOldOpnd(MachineInstr &OrigMI, - RegSubRegPair OldOpndVGPR, - MachineOperand &OldOpndValue) const { - assert(OldOpndValue.isImm()); - switch (OrigMI.getOpcode()) { +static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) { + assert(OldOpnd->isImm()); + switch (OrigMIOp) { default: break; + case AMDGPU::V_ADD_U32_e32: + case AMDGPU::V_ADD_I32_e32: + case AMDGPU::V_OR_B32_e32: + case AMDGPU::V_SUBREV_U32_e32: + case AMDGPU::V_SUBREV_I32_e32: case AMDGPU::V_MAX_U32_e32: - if (OldOpndValue.getImm() == std::numeric_limits<uint32_t>::max()) - return OldOpndVGPR; + case AMDGPU::V_XOR_B32_e32: + if (OldOpnd->getImm() == 0) + return true; break; - case AMDGPU::V_MAX_I32_e32: - if (OldOpndValue.getImm() == std::numeric_limits<int32_t>::max()) - return OldOpndVGPR; + case AMDGPU::V_AND_B32_e32: + case AMDGPU::V_MIN_U32_e32: + if (static_cast<uint32_t>(OldOpnd->getImm()) == + std::numeric_limits<uint32_t>::max()) + return true; break; case AMDGPU::V_MIN_I32_e32: - if (OldOpndValue.getImm() == std::numeric_limits<int32_t>::min()) - return OldOpndVGPR; + if (static_cast<int32_t>(OldOpnd->getImm()) == + std::numeric_limits<int32_t>::max()) + return true; + break; + case AMDGPU::V_MAX_I32_e32: + if (static_cast<int32_t>(OldOpnd->getImm()) == + std::numeric_limits<int32_t>::min()) + return true; break; - case AMDGPU::V_MUL_I32_I24_e32: case AMDGPU::V_MUL_U32_U24_e32: - if (OldOpndValue.getImm() == 1) { - auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1); - assert(Src1 && Src1->isReg()); - return getRegSubRegPair(*Src1); - } + if (OldOpnd->getImm() == 1) + return true; break; } - return RegSubRegPair(); + return false; } -// Cases to combine: -// $bound_ctrl is DPP_BOUND_ZERO, $old is any -// $bound_ctrl is DPP_BOUND_OFF, $old is 0 -// -> $old = undef, $bound_ctrl = DPP_BOUND_ZERO - -// $bound_ctrl is DPP_BOUND_OFF, $old is undef -// -> $old = undef, $bound_ctrl = DPP_BOUND_OFF - -// $bound_ctrl is DPP_BOUND_OFF, $old is foldable -// -> $old = folded value, $bound_ctrl = DPP_BOUND_OFF - MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI, - RegSubRegPair OldOpndVGPR, + RegSubRegPair CombOldVGPR, MachineOperand *OldOpndValue, - bool BoundCtrlZero) const { - assert(OldOpndVGPR.Reg); - if (!BoundCtrlZero && OldOpndValue) { - assert(OldOpndValue->isImm()); - OldOpndVGPR = foldOldOpnd(OrigMI, OldOpndVGPR, *OldOpndValue); - if (!OldOpndVGPR.Reg) { - LLVM_DEBUG(dbgs() << " failed: old immediate cannot be folded\n"); + bool CombBCZ) const { + assert(CombOldVGPR.Reg); + if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) { + auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1); + if (!Src1 || !Src1->isReg()) { + LLVM_DEBUG(dbgs() << " failed: no src1 or it isn't a register\n"); + return nullptr; + } + if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) { + LLVM_DEBUG(dbgs() << " failed: old immediate ins't an identity\n"); + return nullptr; + } + CombOldVGPR = getRegSubRegPair(*Src1); + if (!isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)) { + LLVM_DEBUG(dbgs() << " failed: src1 isn't a VGPR32 register\n"); return nullptr; } } - return createDPPInst(OrigMI, MovMI, OldOpndVGPR, BoundCtrlZero); + return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ); } // returns true if MI doesn't have OpndName immediate operand or the @@ -315,31 +326,64 @@ bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName, bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const { assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp); + LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI); + + auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst); + assert(DstOpnd && DstOpnd->isReg()); + auto DPPMovReg = DstOpnd->getReg(); + if (!isEXECMaskConstantBetweenDefAndUses(DPPMovReg, *MRI)) { + LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same" + " for all uses\n"); + return false; + } + + auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask); + assert(RowMaskOpnd && RowMaskOpnd->isImm()); + auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask); + assert(BankMaskOpnd && BankMaskOpnd->isImm()); + const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF && + BankMaskOpnd->getImm() == 0xF; + auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl); assert(BCZOpnd && BCZOpnd->isImm()); - bool BoundCtrlZero = 0 != BCZOpnd->getImm(); - - LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI); + bool BoundCtrlZero = BCZOpnd->getImm(); auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old); assert(OldOpnd && OldOpnd->isReg()); - auto OldOpndVGPR = getRegSubRegPair(*OldOpnd); - auto *OldOpndValue = getOldOpndValue(*OldOpnd); + + auto * const OldOpndValue = getOldOpndValue(*OldOpnd); + // OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else + // We could use: assert(!OldOpndValue || OldOpndValue->isImm()) + // but the third option is used to distinguish undef from non-immediate + // to reuse IMPLICIT_DEF instruction later assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd); - if (OldOpndValue) { - if (BoundCtrlZero) { - OldOpndVGPR.Reg = AMDGPU::NoRegister; // should be undef, ignore old opnd - OldOpndValue = nullptr; - } else { - if (!OldOpndValue->isImm()) { - LLVM_DEBUG(dbgs() << " failed: old operand isn't an imm or undef\n"); - return false; - } - if (OldOpndValue->getImm() == 0) { - OldOpndVGPR.Reg = AMDGPU::NoRegister; // should be undef - OldOpndValue = nullptr; - BoundCtrlZero = true; + + bool CombBCZ = false; + + if (MaskAllLanes && BoundCtrlZero) { // [1] + CombBCZ = true; + } else { + if (!OldOpndValue || !OldOpndValue->isImm()) { + LLVM_DEBUG(dbgs() << " failed: the DPP mov isn't combinable\n"); + return false; + } + + if (OldOpndValue->getParent()->getParent() != MovMI.getParent()) { + LLVM_DEBUG(dbgs() << + " failed: old reg def and mov should be in the same BB\n"); + return false; + } + + if (OldOpndValue->getImm() == 0) { + if (MaskAllLanes) { + assert(!BoundCtrlZero); // by check [1] + CombBCZ = true; } + } else if (BoundCtrlZero) { + assert(!MaskAllLanes); // by check [1] + LLVM_DEBUG(dbgs() << + " failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n"); + return false; } } @@ -347,25 +391,28 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const { if (!OldOpndValue) dbgs() << "undef"; else - dbgs() << OldOpndValue->getImm(); - dbgs() << ", bound_ctrl=" << BoundCtrlZero << '\n'); - - std::vector<MachineInstr*> OrigMIs, DPPMIs; - if (!OldOpndVGPR.Reg) { // OldOpndVGPR = undef - OldOpndVGPR = RegSubRegPair( + dbgs() << *OldOpndValue; + dbgs() << ", bound_ctrl=" << CombBCZ << '\n'); + + SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs; + auto CombOldVGPR = getRegSubRegPair(*OldOpnd); + // try to reuse previous old reg if its undefined (IMPLICIT_DEF) + if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef + CombOldVGPR = RegSubRegPair( MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass)); auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(), - TII->get(AMDGPU::IMPLICIT_DEF), OldOpndVGPR.Reg); + TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg); DPPMIs.push_back(UndefInst.getInstr()); } OrigMIs.push_back(&MovMI); bool Rollback = true; - for (auto &Use : MRI->use_nodbg_operands( - TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg())) { + for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) { Rollback = true; auto &OrigMI = *Use.getParent(); + LLVM_DEBUG(dbgs() << " try: " << OrigMI); + auto OrigOp = OrigMI.getOpcode(); if (TII->isVOP3(OrigOp)) { if (!TII->hasVALU32BitEncoding(OrigOp)) { @@ -388,8 +435,8 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const { LLVM_DEBUG(dbgs() << " combining: " << OrigMI); if (&Use == TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0)) { - if (auto *DPPInst = createDPPInst(OrigMI, MovMI, OldOpndVGPR, - OldOpndValue, BoundCtrlZero)) { + if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR, + OldOpndValue, CombBCZ)) { DPPMIs.push_back(DPPInst); Rollback = false; } @@ -400,8 +447,8 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const { BB->insert(OrigMI, NewMI); if (TII->commuteInstruction(*NewMI)) { LLVM_DEBUG(dbgs() << " commuted: " << *NewMI); - if (auto *DPPInst = createDPPInst(*NewMI, MovMI, OldOpndVGPR, - OldOpndValue, BoundCtrlZero)) { + if (auto *DPPInst = createDPPInst(*NewMI, MovMI, CombOldVGPR, + OldOpndValue, CombBCZ)) { DPPMIs.push_back(DPPInst); Rollback = false; } diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 06c080c8af8..c6cd7a1a4a6 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -5646,3 +5646,29 @@ MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, } return nullptr; } + +bool llvm::isEXECMaskConstantBetweenDefAndUses(unsigned VReg, + MachineRegisterInfo &MRI) { + assert(MRI.isSSA() && "Must be run on SSA"); + auto *TRI = MRI.getTargetRegisterInfo(); + + auto *DefI = MRI.getVRegDef(VReg); + auto *BB = DefI->getParent(); + + DenseSet<MachineInstr*> Uses; + for (auto &Use : MRI.use_nodbg_operands(VReg)) { + auto *I = Use.getParent(); + if (I->getParent() != BB) + return false; + Uses.insert(I); + } + + auto E = BB->end(); + for (auto I = std::next(DefI->getIterator()); I != E; ++I) { + Uses.erase(&*I); + // don't check the last use + if (Uses.empty() || I->modifiesRegister(AMDGPU::EXEC, TRI)) + break; + } + return Uses.empty(); +} diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 2aa9a33c5aa..b96c40def00 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -953,6 +953,12 @@ TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI, MachineInstr *getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, MachineRegisterInfo &MRI); +/// \brief Return true if EXEC mask isnt' changed between the def and +/// all uses of VReg. Currently if def and uses are in different BBs - +/// simply return false. Should be run on SSA. +bool isEXECMaskConstantBetweenDefAndUses(unsigned VReg, + MachineRegisterInfo &MRI); + namespace AMDGPU { LLVM_READONLY |

