diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-09-14 15:51:33 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-09-14 15:51:33 +0000 |
commit | 2bc198a333a3e6cf6b65e6a4d28018ab8a4efe18 (patch) | |
tree | 3764b5853bacfa9e6349f5c039d4c7bcde33fccc /llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | |
parent | 5f6bb6cd24ee981fa50bdd4c424eead3e1429e76 (diff) | |
download | bcm5719-llvm-2bc198a333a3e6cf6b65e6a4d28018ab8a4efe18.tar.gz bcm5719-llvm-2bc198a333a3e6cf6b65e6a4d28018ab8a4efe18.zip |
AMDGPU: Support folding FrameIndex operands
This avoids test regressions in a future commit.
llvm-svn: 281491
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFoldOperands.cpp')
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 35 |
1 files changed, 26 insertions, 9 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index b55dee68d51..e1257b1d33e 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -48,24 +48,36 @@ public: struct FoldCandidate { MachineInstr *UseMI; - unsigned UseOpNo; - MachineOperand *OpToFold; - uint64_t ImmToFold; + union { + MachineOperand *OpToFold; + uint64_t ImmToFold; + int FrameIndexToFold; + }; + unsigned char UseOpNo; + MachineOperand::MachineOperandType Kind; FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) : - UseMI(MI), UseOpNo(OpNo) { - + UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) { if (FoldOp->isImm()) { - OpToFold = nullptr; ImmToFold = FoldOp->getImm(); + } else if (FoldOp->isFI()) { + FrameIndexToFold = FoldOp->getIndex(); } else { assert(FoldOp->isReg()); OpToFold = FoldOp; } } + bool isFI() const { + return Kind == MachineOperand::MO_FrameIndex; + } + bool isImm() const { - return !OpToFold; + return Kind == MachineOperand::MO_Immediate; + } + + bool isReg() const { + return Kind == MachineOperand::MO_Register; } }; @@ -107,6 +119,11 @@ static bool updateOperand(FoldCandidate &Fold, return true; } + if (Fold.isFI()) { + Old.ChangeToFrameIndex(Fold.FrameIndexToFold); + return true; + } + MachineOperand *New = Fold.OpToFold; if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && TargetRegisterInfo::isVirtualRegister(New->getReg())) { @@ -448,7 +465,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { unsigned OpSize = TII->getOpSize(MI, 1); MachineOperand &OpToFold = MI.getOperand(1); - bool FoldingImm = OpToFold.isImm(); + bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); // FIXME: We could also be folding things like FrameIndexes and // TargetIndexes. @@ -500,7 +517,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { for (FoldCandidate &Fold : FoldList) { if (updateOperand(Fold, TRI)) { // Clear kill flags. - if (!Fold.isImm()) { + if (Fold.isReg()) { assert(Fold.OpToFold && Fold.OpToFold->isReg()); // FIXME: Probably shouldn't bother trying to fold if not an // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR |