diff options
| author | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2019-06-16 17:13:09 +0000 |
|---|---|---|
| committer | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2019-06-16 17:13:09 +0000 |
| commit | 52500216727e39fa90b33d914e93338c88b0abeb (patch) | |
| tree | f86e334dba0b0d62b25aef7b0329d0b934593af7 /llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp | |
| parent | e20b388e2f923bfc98f63a13fea9fc19aeaec425 (diff) | |
| download | bcm5719-llvm-52500216727e39fa90b33d914e93338c88b0abeb.tar.gz bcm5719-llvm-52500216727e39fa90b33d914e93338c88b0abeb.zip | |
[AMDGPU] gfx10 conditional registers handling
This is cpp source part of wave32 support, excluding overriden
getRegClass().
Differential Revision: https://reviews.llvm.org/D63351
llvm-svn: 363513
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp | 81 |
1 files changed, 57 insertions, 24 deletions
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp index a399f7715a2..6aed25bfff7 100644 --- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -82,6 +82,16 @@ private: LiveIntervals *LIS = nullptr; MachineRegisterInfo *MRI = nullptr; + const TargetRegisterClass *BoolRC = nullptr; + unsigned AndOpc; + unsigned OrOpc; + unsigned XorOpc; + unsigned MovTermOpc; + unsigned Andn2TermOpc; + unsigned XorTermrOpc; + unsigned OrSaveExecOpc; + unsigned Exec; + void emitIf(MachineInstr &MI); void emitElse(MachineInstr &MI); void emitIfBreak(MachineInstr &MI); @@ -188,16 +198,16 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) { // Add an implicit def of exec to discourage scheduling VALU after this which // will interfere with trying to form s_and_saveexec_b64 later. unsigned CopyReg = SimpleIf ? SaveExecReg - : MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + : MRI->createVirtualRegister(BoolRC); MachineInstr *CopyExec = BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) - .addReg(AMDGPU::EXEC) - .addReg(AMDGPU::EXEC, RegState::ImplicitDefine); + .addReg(Exec) + .addReg(Exec, RegState::ImplicitDefine); - unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned Tmp = MRI->createVirtualRegister(BoolRC); MachineInstr *And = - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp) + BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp) .addReg(CopyReg) .add(Cond); @@ -206,7 +216,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) { MachineInstr *Xor = nullptr; if (!SimpleIf) { Xor = - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg) + BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg) .addReg(Tmp) .addReg(CopyReg); setImpSCCDefDead(*Xor, ImpDefSCC.isDead()); @@ -215,7 +225,7 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) { // Use a copy that is a terminator to get correct spill code placement it with // fast regalloc. MachineInstr *SetExec = - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC) + BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec) .addReg(Tmp, RegState::Kill); // Insert a pseudo terminator to help keep the verifier happy. This will also @@ -265,7 +275,7 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) { // We are running before TwoAddressInstructions, and si_else's operands are // tied. In order to correctly tie the registers, split this into a copy of // the src like it does. - unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned CopyReg = MRI->createVirtualRegister(BoolRC); MachineInstr *CopyExec = BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg) .add(MI.getOperand(1)); // Saved EXEC @@ -273,9 +283,9 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) { // This must be inserted before phis and any spill code inserted before the // else. unsigned SaveReg = ExecModified ? - MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass) : DstReg; + MRI->createVirtualRegister(BoolRC) : DstReg; MachineInstr *OrSaveExec = - BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), SaveReg) + BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg) .addReg(CopyReg); MachineBasicBlock *DestBB = MI.getOperand(2).getMBB(); @@ -284,8 +294,8 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) { if (ExecModified) { MachineInstr *And = - BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg) + .addReg(Exec) .addReg(SaveReg); if (LIS) @@ -293,8 +303,8 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) { } MachineInstr *Xor = - BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec) + .addReg(Exec) .addReg(DstReg); MachineInstr *Branch = @@ -347,14 +357,14 @@ void SILowerControlFlow::emitIfBreak(MachineInstr &MI) { // exit" mask. MachineInstr *And = nullptr, *Or = nullptr; if (!SkipAnding) { - And = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst) - .addReg(AMDGPU::EXEC) + And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), Dst) + .addReg(Exec) .add(MI.getOperand(1)); - Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) + Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst) .addReg(Dst) .add(MI.getOperand(2)); } else - Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) + Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst) .add(MI.getOperand(1)) .add(MI.getOperand(2)); @@ -372,8 +382,8 @@ void SILowerControlFlow::emitLoop(MachineInstr &MI) { const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *AndN2 = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec) + .addReg(Exec) .add(MI.getOperand(0)); MachineInstr *Branch = @@ -394,8 +404,8 @@ void SILowerControlFlow::emitEndCf(MachineInstr &MI) { MachineBasicBlock::iterator InsPt = MBB.begin(); MachineInstr *NewMI = - BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, InsPt, DL, TII->get(OrOpc), Exec) + .addReg(Exec) .add(MI.getOperand(0)); if (LIS) @@ -427,13 +437,13 @@ void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo, // does not really modify exec. for (auto I = Def->getIterator(); I != MI.getIterator(); ++I) if (I->modifiesRegister(AMDGPU::EXEC, TRI) && - !(I->isCopy() && I->getOperand(0).getReg() != AMDGPU::EXEC)) + !(I->isCopy() && I->getOperand(0).getReg() != Exec)) return; for (const auto &SrcOp : Def->explicit_operands()) if (SrcOp.isReg() && SrcOp.isUse() && (TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) || - SrcOp.getReg() == AMDGPU::EXEC)) + SrcOp.getReg() == Exec)) Src.push_back(SrcOp); } @@ -471,6 +481,27 @@ bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) { // This doesn't actually need LiveIntervals, but we can preserve them. LIS = getAnalysisIfAvailable<LiveIntervals>(); MRI = &MF.getRegInfo(); + BoolRC = TRI->getBoolRC(); + + if (ST.isWave32()) { + AndOpc = AMDGPU::S_AND_B32; + OrOpc = AMDGPU::S_OR_B32; + XorOpc = AMDGPU::S_XOR_B32; + MovTermOpc = AMDGPU::S_MOV_B32_term; + Andn2TermOpc = AMDGPU::S_ANDN2_B32_term; + XorTermrOpc = AMDGPU::S_XOR_B32_term; + OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32; + Exec = AMDGPU::EXEC_LO; + } else { + AndOpc = AMDGPU::S_AND_B64; + OrOpc = AMDGPU::S_OR_B64; + XorOpc = AMDGPU::S_XOR_B64; + MovTermOpc = AMDGPU::S_MOV_B64_term; + Andn2TermOpc = AMDGPU::S_ANDN2_B64_term; + XorTermrOpc = AMDGPU::S_XOR_B64_term; + OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64; + Exec = AMDGPU::EXEC; + } MachineFunction::iterator NextBB; for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); @@ -507,6 +538,8 @@ bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) { case AMDGPU::S_AND_B64: case AMDGPU::S_OR_B64: + case AMDGPU::S_AND_B32: + case AMDGPU::S_OR_B32: // Cleanup bit manipulations on exec mask combineMasks(MI); Last = I; |

