summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorNicolai Haehnle <nhaehnle@gmail.com>2017-11-22 12:25:21 +0000
committerNicolai Haehnle <nhaehnle@gmail.com>2017-11-22 12:25:21 +0000
commitdd059c161d6fbb2caf9bba34fbfaf66cdec7c6ce (patch)
treeaf31fa7c3d984732b8752a26302d8a77c6585e4c /llvm
parentf70c5beb228e44861557f6757bbd2f983c964b6b (diff)
downloadbcm5719-llvm-dd059c161d6fbb2caf9bba34fbfaf66cdec7c6ce.tar.gz
bcm5719-llvm-dd059c161d6fbb2caf9bba34fbfaf66cdec7c6ce.zip
AMDGPU: Consider memory dependencies with moved instructions in SILoadStoreOptimizer
Summary: This bug seems to have gone unnoticed because critical cases with LDS instructions are eliminated by the peephole optimizer. However, equivalent situations arise with buffer loads and stores as well, so this fixes regressions since r317751 ("AMDGPU: Merge S_BUFFER_LOAD_DWORD_IMM into x2, x4"). Fixes at least: KHR-GL45.shader_storage_buffer_object.basic-operations-case1-cs KHR-GL45.cull_distance.functional piglit tes-input-gl_ClipDistance.shader_test ... and probably more Change-Id: I0e371536288eb8e6afeaa241a185266fd45d129d Reviewers: arsenm, mareko, rampitec Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits Differential Revision: https://reviews.llvm.org/D40303 llvm-svn: 318829
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp3
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-load-store.mir70
2 files changed, 72 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index d08caa7f173..14c9c8ff728 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -344,7 +344,8 @@ bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
}
if (MBBI->mayLoadOrStore() &&
- !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) {
+ (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) ||
+ !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA))) {
// We fail condition #1, but we may still be able to satisfy condition
// #2. Add this instruction to the move list and then we will check
// if condition #2 holds once we have selected the matching instruction.
diff --git a/llvm/test/CodeGen/AMDGPU/merge-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-load-store.mir
new file mode 100644
index 00000000000..d61cefd5c73
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-load-store.mir
@@ -0,0 +1,70 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck %s
+
+# Check that SILoadStoreOptimizer honors memory dependencies between moved
+# instructions.
+#
+# The following IR snippet would usually be optimized by the peephole optimizer.
+# However, an equivalent situation can occur with buffer instructions as well.
+
+# CHECK-LABEL: name: mem_dependency
+# CHECK: DS_READ2_B32 %0, 0, 1,
+# CHECK: DS_WRITE_B32 %0, killed %1, 64,
+# CHECK: DS_READ2_B32 %0, 16, 17,
+# CHECK: DS_WRITE_B32 killed %0, %5, 0
+
+--- |
+ define amdgpu_kernel void @mem_dependency(i32 addrspace(3)* %ptr.0) nounwind {
+ %ptr.4 = getelementptr i32, i32 addrspace(3)* %ptr.0, i32 1
+ %ptr.64 = getelementptr i32, i32 addrspace(3)* %ptr.0, i32 16
+ %1 = load i32, i32 addrspace(3)* %ptr.0
+ store i32 %1, i32 addrspace(3)* %ptr.64
+ %2 = load i32, i32 addrspace(3)* %ptr.64
+ %3 = load i32, i32 addrspace(3)* %ptr.4
+ %4 = add i32 %2, %3
+ store i32 %4, i32 addrspace(3)* %ptr.0
+ ret void
+ }
+...
+---
+name: mem_dependency
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%vgpr0', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0:
+ liveins: %vgpr0
+
+ %1:vgpr_32 = COPY %vgpr0
+ %m0 = S_MOV_B32 -1
+ %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.0)
+ DS_WRITE_B32 %1, killed %2, 64, 0, implicit %m0, implicit %exec :: (store 4 into %ir.ptr.64)
+
+ ; Make this load unmergeable, to tempt SILoadStoreOptimizer into merging the
+ ; other two loads.
+ %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit %m0, implicit %exec :: (load 8 from %ir.ptr.64, align 4)
+ %3:vgpr_32 = COPY %6.sub0
+ %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.4)
+ %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def %vcc, implicit %exec
+ DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed %m0, implicit %exec :: (store 4 into %ir.ptr.0)
+ S_ENDPGM
+
+...
OpenPOWER on IntegriCloud