summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/else.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-08-22 19:33:16 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-08-22 19:33:16 +0000
commit78fc9daf8d1825d32f170f8e60f9158550f93e93 (patch)
tree0fabfdfbe326321516366f98a99cf7de6c0b2e38 /llvm/test/CodeGen/AMDGPU/else.ll
parent88d7da01ca7af18ed6bd446d388999bf9668a3cf (diff)
downloadbcm5719-llvm-78fc9daf8d1825d32f170f8e60f9158550f93e93.tar.gz
bcm5719-llvm-78fc9daf8d1825d32f170f8e60f9158550f93e93.zip
AMDGPU: Split SILowerControlFlow into two pieces
Do most of the lowering in a pre-RA pass. Keep the skip jump insertion late, plus a few other things that require more work to move out. One concern I have is now there may be COPY instructions which do not have the necessary implicit exec uses if they will be lowered to v_mov_b32. This has a positive effect on SGPR usage in shader-db. llvm-svn: 279464
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/else.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/else.ll8
1 files changed, 5 insertions, 3 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/else.ll b/llvm/test/CodeGen/AMDGPU/else.ll
index bb885ac3884..ef1e64763d4 100644
--- a/llvm/test/CodeGen/AMDGPU/else.ll
+++ b/llvm/test/CodeGen/AMDGPU/else.ll
@@ -25,11 +25,13 @@ end:
}
; CHECK-LABEL: {{^}}else_execfix_leave_wqm:
+; CHECK: ; BB#0:
+; CHECK-NEXT: s_mov_b64 [[INIT_EXEC:s\[[0-9]+:[0-9]+\]]], exec
; CHECK: ; %Flow
; CHECK-NEXT: s_or_saveexec_b64 [[DST:s\[[0-9]+:[0-9]+\]]],
-; CHECK-NEXT: s_and_b64 exec, exec,
-; CHECK-NEXT: s_and_b64 [[DST]], exec, [[DST]]
-; CHECK-NEXT: s_xor_b64 exec, exec, [[DST]]
+; CHECK-NEXT: s_and_b64 exec, exec, [[INIT_EXEC]]
+; CHECK-NEXT: s_and_b64 [[AND_INIT:s\[[0-9]+:[0-9]+\]]], exec, [[DST]]
+; CHECK-NEXT: s_xor_b64 exec, exec, [[AND_INIT]]
; CHECK-NEXT: ; mask branch
define amdgpu_ps void @else_execfix_leave_wqm(i32 %z, float %v) {
main_body:
OpenPOWER on IntegriCloud