diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2014-03-24 20:08:13 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2014-03-24 20:08:13 +0000 |
| commit | 684dc80b6d0de8404d592ecaf83b0a4706366f86 (patch) | |
| tree | a0c421f34666b37a86a79183dc0118a035431170 | |
| parent | 248b7b6ba1c2a69373c9923a316baa4ed5bd19a3 (diff) | |
| download | bcm5719-llvm-684dc80b6d0de8404d592ecaf83b0a4706366f86.tar.gz bcm5719-llvm-684dc80b6d0de8404d592ecaf83b0a4706366f86.zip | |
R600/SI: Fix extra mov from legalizing 64-bit SALU ops.
Check the register class of each operand individually
to avoid an extra copy to a vgpr.
llvm-svn: 204662
| -rw-r--r-- | llvm/lib/Target/R600/SIInstrInfo.cpp | 40 | ||||
| -rw-r--r-- | llvm/test/CodeGen/R600/or.ll | 10 |
2 files changed, 31 insertions, 19 deletions
diff --git a/llvm/lib/Target/R600/SIInstrInfo.cpp b/llvm/lib/Target/R600/SIInstrInfo.cpp index eb5172c896e..336f6aa5667 100644 --- a/llvm/lib/Target/R600/SIInstrInfo.cpp +++ b/llvm/lib/Target/R600/SIInstrInfo.cpp @@ -1028,29 +1028,41 @@ void SIInstrInfo::splitScalar64BitOp(SmallVectorImpl<MachineInstr *> &Worklist, MachineBasicBlock::iterator MII = Inst; const MCInstrDesc &InstDesc = get(Opcode); - const TargetRegisterClass *RC = MRI.getRegClass(Src0.getReg()); - const TargetRegisterClass *SubRC = RI.getSubRegClass(RC, AMDGPU::sub0); - MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, RC, - AMDGPU::sub0, SubRC); - MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, RC, - AMDGPU::sub0, SubRC); - - unsigned DestSub0 = MRI.createVirtualRegister(SubRC); + const TargetRegisterClass *Src0RC = Src0.isReg() ? + MRI.getRegClass(Src0.getReg()) : + &AMDGPU::SGPR_32RegClass; + + const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); + const TargetRegisterClass *Src1RC = Src1.isReg() ? + MRI.getRegClass(Src1.getReg()) : + &AMDGPU::SGPR_32RegClass; + + const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); + + MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, + AMDGPU::sub0, Src0SubRC); + MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, + AMDGPU::sub0, Src1SubRC); + + const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); + const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0); + + unsigned DestSub0 = MRI.createVirtualRegister(DestRC); MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0) .addOperand(SrcReg0Sub0) .addOperand(SrcReg1Sub0); - MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, RC, - AMDGPU::sub1, SubRC); - MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, RC, - AMDGPU::sub1, SubRC); + MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, + AMDGPU::sub1, Src0SubRC); + MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, + AMDGPU::sub1, Src1SubRC); - unsigned DestSub1 = MRI.createVirtualRegister(SubRC); + unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC); MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1) .addOperand(SrcReg0Sub1) .addOperand(SrcReg1Sub1); - unsigned FullDestReg = MRI.createVirtualRegister(RC); + unsigned FullDestReg = MRI.createVirtualRegister(DestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) .addReg(DestSub0) .addImm(AMDGPU::sub0) diff --git a/llvm/test/CodeGen/R600/or.ll b/llvm/test/CodeGen/R600/or.ll index 8e985c75cbd..be984b27122 100644 --- a/llvm/test/CodeGen/R600/or.ll +++ b/llvm/test/CodeGen/R600/or.ll @@ -89,11 +89,11 @@ define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, } ; SI-LABEL: @vector_or_i64_loadimm -; SI-DAG: S_MOV_B32 -; SI-DAG: S_MOV_B32 -; SI-DAG: BUFFER_LOAD_DWORDX2 -; SI: V_OR_B32_e32 -; SI: V_OR_B32_e32 +; SI-DAG: S_MOV_B32 [[LO_S_IMM:s[0-9]+]], -545810305 +; SI-DAG: S_MOV_B32 [[HI_S_IMM:s[0-9]+]], 5231 +; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, +; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]] +; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]] ; SI: S_ENDPGM define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) { %loada = load i64 addrspace(1)* %a, align 8 |

