From a9cc92c247ce5d0ecc3399e7af6e40a3d59bbf6c Mon Sep 17 00:00:00 2001 From: Nicolai Haehnle Date: Fri, 30 Nov 2018 22:55:29 +0000 Subject: AMDGPU: Fix various issues around the VirtReg2Value mapping Summary: The VirtReg2Value mapping is crucial for getting consistently reliable divergence information into the SelectionDAG. This patch fixes a bunch of issues that lead to incorrect divergence info and introduces tight assertions to ensure we don't regress: 1. VirtReg2Value is generated lazily; there were some cases where a lookup was performed before all relevant virtual registers were created, leading to an out-of-sync mapping. Those cases were: - Complex code to lower formal arguments that generated CopyFromReg nodes from live-in registers (fixed by never querying the mapping for live-in registers). - Code that generates CopyToReg for formal arguments that are used outside the entry basic block (fixed by never querying the mapping for Register nodes, which don't need the divergence info anyway). 2. For complex values that are lowered to a sequence of registers, all registers must be reflected in the VirtReg2Value mapping. I am not adding any new tests, since I'm not actually aware of any bugs that these problems are causing with trunk as-is. However, I recently added a test case (in r346423) which fails when D53283 is applied without this change. Also, the new assertions should provide most of the effective test coverage. There is one test change in sdwa-peephole.ll. The underlying issue is that since the divergence info is now correct, the DAGISel will select V_OR_B32 directly instead of S_OR_B32. This leads to an extra COPY which affects the behavior of MachineLICM in a way that ends up with the S_MOV_B32 with the constant in a different basic block than the V_OR_B32, which is presumably what defeats the peephole. Reviewers: alex-t, arsenm, rampitec Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits Differential Revision: https://reviews.llvm.org/D54340 llvm-svn: 348049 --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 65 +++++++++++++++++-------------- 1 file changed, 36 insertions(+), 29 deletions(-) (limited to 'llvm/lib/Target') diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 9f5198042e4..dbcd1bf0c76 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -9303,42 +9303,49 @@ void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits); } +LLVM_ATTRIBUTE_UNUSED +static bool isCopyFromRegOfInlineAsm(const SDNode *N) { + assert(N->getOpcode() == ISD::CopyFromReg); + do { + // Follow the chain until we find an INLINEASM node. + N = N->getOperand(0).getNode(); + if (N->getOpcode() == ISD::INLINEASM) + return true; + } while (N->getOpcode() == ISD::CopyFromReg); + return false; +} + bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const { switch (N->getOpcode()) { - case ISD::Register: case ISD::CopyFromReg: { - const RegisterSDNode *R = nullptr; - if (N->getOpcode() == ISD::Register) { - R = dyn_cast(N); - } - else { - R = dyn_cast(N->getOperand(1)); - } - if (R) - { - const MachineFunction * MF = FLI->MF; - const GCNSubtarget &ST = MF->getSubtarget(); - const MachineRegisterInfo &MRI = MF->getRegInfo(); - const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); - unsigned Reg = R->getReg(); - if (TRI.isPhysicalRegister(Reg)) - return TRI.isVGPR(MRI, Reg); - - if (MRI.isLiveIn(Reg)) { - // workitem.id.x workitem.id.y workitem.id.z - // Any VGPR formal argument is also considered divergent - if (TRI.isVGPR(MRI, Reg)) - return true; - // Formal arguments of non-entry functions - // are conservatively considered divergent - else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) - return true; - } - return !KDA || KDA->isDivergent(FLI->getValueFromVirtualReg(Reg)); + const RegisterSDNode *R = cast(N->getOperand(1)); + const MachineFunction * MF = FLI->MF; + const GCNSubtarget &ST = MF->getSubtarget(); + const MachineRegisterInfo &MRI = MF->getRegInfo(); + const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); + unsigned Reg = R->getReg(); + if (TRI.isPhysicalRegister(Reg)) + return !TRI.isSGPRReg(MRI, Reg); + + if (MRI.isLiveIn(Reg)) { + // workitem.id.x workitem.id.y workitem.id.z + // Any VGPR formal argument is also considered divergent + if (!TRI.isSGPRReg(MRI, Reg)) + return true; + // Formal arguments of non-entry functions + // are conservatively considered divergent + else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) + return true; + return false; } + const Value *V = FLI->getValueFromVirtualReg(Reg); + if (V) + return KDA->isDivergent(V); + assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); + return !TRI.isSGPRReg(MRI, Reg); } break; case ISD::LOAD: { -- cgit v1.2.3