summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIISelLowering.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp107
1 files changed, 107 insertions, 0 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ba637d4e5a6..18098170c9b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10944,3 +10944,110 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
}
+
+const TargetRegisterClass *
+SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
+ const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false);
+ const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ if (RC == &AMDGPU::VReg_1RegClass && !isDivergent)
+ return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass
+ : &AMDGPU::SReg_32RegClass;
+ if (!TRI->isSGPRClass(RC) && !isDivergent)
+ return TRI->getEquivalentSGPRClass(RC);
+ else if (TRI->isSGPRClass(RC) && isDivergent)
+ return TRI->getEquivalentVGPRClass(RC);
+
+ return RC;
+}
+
+static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited) {
+ if (!Visited.insert(V).second)
+ return false;
+ bool Result = false;
+ for (auto U : V->users()) {
+ if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) {
+ if (V == U->getOperand(1)) {
+ switch (Intrinsic->getIntrinsicID()) {
+ default:
+ Result = false;
+ break;
+ case Intrinsic::amdgcn_if_break:
+ case Intrinsic::amdgcn_if:
+ case Intrinsic::amdgcn_else:
+ Result = true;
+ break;
+ }
+ }
+ if (V == U->getOperand(0)) {
+ switch (Intrinsic->getIntrinsicID()) {
+ default:
+ Result = false;
+ break;
+ case Intrinsic::amdgcn_end_cf:
+ case Intrinsic::amdgcn_loop:
+ Result = true;
+ break;
+ }
+ }
+ } else {
+ Result = hasCFUser(U, Visited);
+ }
+ if (Result)
+ break;
+ }
+ return Result;
+}
+
+bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
+ const Value *V) const {
+ if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
+ switch (Intrinsic->getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::amdgcn_if_break:
+ return true;
+ }
+ }
+ if (const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V)) {
+ if (const IntrinsicInst *Intrinsic =
+ dyn_cast<IntrinsicInst>(ExtValue->getOperand(0))) {
+ switch (Intrinsic->getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::amdgcn_if:
+ case Intrinsic::amdgcn_else: {
+ ArrayRef<unsigned> Indices = ExtValue->getIndices();
+ if (Indices.size() == 1 && Indices[0] == 1) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ if (const CallInst *CI = dyn_cast<CallInst>(V)) {
+ if (isa<InlineAsm>(CI->getCalledValue())) {
+ const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo();
+ ImmutableCallSite CS(CI);
+ TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints(
+ MF.getDataLayout(), Subtarget->getRegisterInfo(), CS);
+ for (auto &TC : TargetConstraints) {
+ if (TC.Type == InlineAsm::isOutput) {
+ ComputeConstraintToUse(TC, SDValue());
+ unsigned AssignedReg;
+ const TargetRegisterClass *RC;
+ std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint(
+ SIRI, TC.ConstraintCode, TC.ConstraintVT);
+ if (RC) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg))
+ return true;
+ else if (SIRI->isSGPRClass(RC))
+ return true;
+ }
+ }
+ }
+ }
+ }
+ SmallPtrSet<const Value *, 16> Visited;
+ return hasCFUser(V, Visited);
+}
OpenPOWER on IntegriCloud