diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-06-18 12:23:45 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-06-18 12:23:45 +0000 |
| commit | e75e197ad879177bc643d4c5d5596bbe173555e7 (patch) | |
| tree | 2cc398aacb24239e8ca7e42a8dfcba3fd951e1c1 | |
| parent | 23f03f5059c85ef70134d4f774a372a8b5f7f341 (diff) | |
| download | bcm5719-llvm-e75e197ad879177bc643d4c5d5596bbe173555e7.tar.gz bcm5719-llvm-e75e197ad879177bc643d4c5d5596bbe173555e7.zip | |
AMDGPU: Remove unnecessary check for virtual register
The copy was found by searching the uses of a virtual register, so
it's already known to be virtual.
llvm-svn: 363669
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 21 |
1 files changed, 4 insertions, 17 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 7fdbf97fe40..61a4407cbcf 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -271,17 +271,9 @@ static bool updateOperand(FoldCandidate &Fold, } MachineOperand *New = Fold.OpToFold; - if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && - TargetRegisterInfo::isVirtualRegister(New->getReg())) { - Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); - - Old.setIsUndef(New->isUndef()); - return true; - } - - // FIXME: Handle physical registers. - - return false; + Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); + Old.setIsUndef(New->isUndef()); + return true; } static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, @@ -503,7 +495,6 @@ void SIFoldOperands::foldOperand( } else { if (UseMI->isCopy() && OpToFold.isReg() && TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) && - TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(1).getReg()) && TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) && !UseMI->getOperand(1).getSubReg()) { @@ -539,14 +530,10 @@ void SIFoldOperands::foldOperand( const TargetRegisterClass *FoldRC = TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); - // Split 64-bit constants into 32-bits for folding. if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { unsigned UseReg = UseOp.getReg(); - const TargetRegisterClass *UseRC - = TargetRegisterInfo::isVirtualRegister(UseReg) ? - MRI->getRegClass(UseReg) : - TRI->getPhysRegClass(UseReg); + const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) return; |

