summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
diff options
context:
space:
mode:
authorTim Renouf <tpr.llvm@botech.co.uk>2018-01-09 21:34:43 +0000
committerTim Renouf <tpr.llvm@botech.co.uk>2018-01-09 21:34:43 +0000
commit6eaad1e5397dc84c7dbb78be4fa433bcd6fb137f (patch)
tree913b778f0fc2c78591c1a0fe7460de17ba98ff1b /llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
parentf0ef137bd0c34f984090638df8c62ad55d790694 (diff)
downloadbcm5719-llvm-6eaad1e5397dc84c7dbb78be4fa433bcd6fb137f.tar.gz
bcm5719-llvm-6eaad1e5397dc84c7dbb78be4fa433bcd6fb137f.zip
[AMDGPU] Fixed incorrect uniform branch condition
Summary: I had a case where multiple nested uniform ifs resulted in code that did v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first ensuring that bits for inactive lanes were clear. There was already code for inserting an "s_and_b64 vcc, exec, vcc" to clear bits for inactive lanes in the case that the branch is instruction selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in SIFixSGPRCopies. I have added the same code into SILowerControlFlow for the case that the branch is instruction selected as s_cbranch_vccnz. This de-optimizes the code in some cases where the s_and is not needed, because vcc is the result of a v_cmp, or multiple v_cmp instructions combined by s_and/s_or. We should add a pass to re-optimize those cases. Reviewers: arsenm, kzhuravl Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle Differential Revision: https://reviews.llvm.org/D41292 llvm-svn: 322119
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll54
1 files changed, 54 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll b/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
new file mode 100644
index 00000000000..70ee24f0b22
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx800 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
+
+; This checks for a bug where uniform control flow can result in multiple
+; v_cmp results being combined together with s_and_b64, s_or_b64 and s_xor_b64,
+; using the resulting mask in s_cbranch_vccnz
+; without ensuring that the resulting mask has bits clear for inactive lanes.
+; The problematic case is s_xor_b64, as, unlike the other ops, it can actually
+; set bits for inactive lanes.
+;
+; The check for an s_xor_b64 is just to check that this test tests what it is
+; supposed to test. If the s_xor_b64 disappears due to some other case, it does
+; not necessarily mean that the bug has reappeared.
+;
+; The check for "s_and_b64 vcc, exec, something" checks that the bug is fixed.
+
+; CHECK: {{^}}main:
+; CHECK: s_xor_b64
+; CHECK: s_and_b64 vcc, exec,
+
+define amdgpu_cs void @main(i32 inreg %arg) {
+.entry:
+ %tmp44 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
+ %tmp16 = load volatile float, float addrspace(1)* undef
+ %tmp22 = load volatile float, float addrspace(1)* undef
+ %tmp25 = load volatile float, float addrspace(1)* undef
+ %tmp31 = fcmp olt float %tmp16, 0x3FA99999A0000000
+ br i1 %tmp31, label %bb, label %.exit.thread
+
+bb: ; preds = %.entry
+ %tmp42 = fcmp olt float %tmp25, 0x3FA99999A0000000
+ br i1 %tmp42, label %bb43, label %.exit.thread
+
+bb43:
+ %tmp46 = fcmp olt <2 x float> %tmp44, <float 0x3FA99999A0000000, float 0x3FA99999A0000000>
+ %tmp47 = extractelement <2 x i1> %tmp46, i32 0
+ %tmp48 = extractelement <2 x i1> %tmp46, i32 1
+ %tmp49 = and i1 %tmp47, %tmp48
+ br i1 %tmp49, label %bb50, label %.exit.thread
+
+bb50:
+ %tmp53 = fcmp olt float %tmp22, 0x3FA99999A0000000
+ br i1 %tmp53, label %.exit3.i, label %.exit.thread
+
+.exit3.i:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %.exit.thread
+
+.exit.thread:
+ ret void
+}
+
OpenPOWER on IntegriCloud