diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2015-08-29 06:48:46 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2015-08-29 06:48:46 +0000 |
commit | 5c004a7c610396fe2503380d46adb88da1de9df8 (patch) | |
tree | b247b44e9664fb8ed9a11609c29f3f35474e3fea /llvm | |
parent | a15a35e55274ec76b7fb606aea08788e3f21dcbc (diff) | |
download | bcm5719-llvm-5c004a7c610396fe2503380d46adb88da1de9df8.tar.gz bcm5719-llvm-5c004a7c610396fe2503380d46adb88da1de9df8.zip |
AMDGPU: Fix dropping mem operands when moving to VALU
Without a memory operand, mayLoad or mayStore instructions
are treated as hasUnorderedMemRef, which results in much worse
scheduling.
We really should have a verifier check that any
non-side effecting mayLoad or mayStore has a memory operand.
There are a few instructions (interp and images) which I'm
not sure what / where to add these.
llvm-svn: 246356
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 23 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll | 52 |
2 files changed, 64 insertions, 11 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index ff077064240..ecb749e37ae 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1888,17 +1888,18 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { // Create the new instruction. unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode()); MachineInstr *Addr64 = - BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) - .addOperand(*VData) - .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. - // This will be replaced later - // with the new value of vaddr. - .addOperand(*SRsrc) - .addOperand(*SOffset) - .addOperand(*Offset) - .addImm(0) // glc - .addImm(0) // slc - .addImm(0); // tfe + BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) + .addOperand(*VData) + .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. + // This will be replaced later + // with the new value of vaddr. + .addOperand(*SRsrc) + .addOperand(*SOffset) + .addOperand(*Offset) + .addImm(0) // glc + .addImm(0) // slc + .addImm(0) // tfe + .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); MI->removeFromParent(); MI = Addr64; diff --git a/llvm/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll b/llvm/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll new file mode 100644 index 00000000000..8f63a587a27 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/drop-mem-operand-move-smrd.ll @@ -0,0 +1,52 @@ +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s + +; The memory operand was dropped from the buffer_load_dword_offset +; when replaced with the addr64 during operand legalization, resulting +; in the global loads not being scheduled together. + +; GCN-LABEL: {{^}}reschedule_global_load_lds_store: +; GCN: buffer_load_dword +; GCN: buffer_load_dword +; GCN: ds_write_b32 +; GCN: ds_write_b32 +; GCN: s_endpgm +define void @reschedule_global_load_lds_store(i32 addrspace(1)* noalias %gptr0, i32 addrspace(1)* noalias %gptr1, i32 addrspace(3)* noalias %lptr, i32 %c) #0 { +entry: + %tid = tail call i32 @llvm.r600.read.tidig.x() #1 + %idx = shl i32 %tid, 2 + %gep0 = getelementptr i32, i32 addrspace(1)* %gptr0, i32 %idx + %gep1 = getelementptr i32, i32 addrspace(1)* %gptr1, i32 %idx + %gep2 = getelementptr i32, i32 addrspace(3)* %lptr, i32 %tid + %cmp0 = icmp eq i32 %c, 0 + br i1 %cmp0, label %for.body, label %exit + +for.body: ; preds = %for.body, %entry + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.body ] + %gptr0.phi = phi i32 addrspace(1)* [ %gep0, %entry ], [ %gep0.inc, %for.body ] + %gptr1.phi = phi i32 addrspace(1)* [ %gep1, %entry ], [ %gep1.inc, %for.body ] + %lptr0.phi = phi i32 addrspace(3)* [ %gep2, %entry ], [ %gep2.inc, %for.body ] + %lptr1 = getelementptr i32, i32 addrspace(3)* %lptr0.phi, i32 1 + %val0 = load i32, i32 addrspace(1)* %gep0 + store i32 %val0, i32 addrspace(3)* %lptr0.phi + %val1 = load i32, i32 addrspace(1)* %gep1 + store i32 %val1, i32 addrspace(3)* %lptr1 + %gep0.inc = getelementptr i32, i32 addrspace(1)* %gptr0.phi, i32 4 + %gep1.inc = getelementptr i32, i32 addrspace(1)* %gptr1.phi, i32 4 + %gep2.inc = getelementptr i32, i32 addrspace(3)* %lptr0.phi, i32 4 + %i.inc = add nsw i32 %i, 1 + %cmp1 = icmp ne i32 %i, 256 + br i1 %cmp1, label %for.body, label %exit + +exit: ; preds = %for.body, %entry + ret void +} + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tidig.x() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tgid.x() #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } +attributes #2 = { noduplicate nounwind } |