summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir')
-rw-r--r--llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir48
1 files changed, 24 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index b5dc9d9dac8..24e8ed8e29c 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -184,8 +184,8 @@ body: |
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -241,8 +241,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -298,8 +298,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -359,8 +359,8 @@ body: |
BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -384,7 +384,7 @@ body: |
# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_and_saveexec_xor_wrong_reg
alignment: 0
exposesReturnsTwice: false
@@ -420,8 +420,8 @@ body: |
%sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term %sgpr0_sgpr1
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
@@ -443,7 +443,7 @@ body: |
# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_and_saveexec_xor_modify_copy_to_exec
alignment: 0
@@ -479,8 +479,8 @@ body: |
%sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -540,8 +540,8 @@ body: |
%sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
@@ -565,7 +565,7 @@ body: |
# CHECK: %sgpr0_sgpr1 = COPY %exec
# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_unknown_saveexec
alignment: 0
@@ -599,8 +599,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -656,8 +656,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
@@ -680,7 +680,7 @@ body: |
# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
name: optimize_if_andn2_saveexec_no_commute
alignment: 0
exposesReturnsTwice: false
@@ -713,8 +713,8 @@ body: |
%vgpr0 = V_MOV_B32_e32 4, implicit %exec
%sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
%exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2.end, implicit %exec
- S_BRANCH %bb.1.if
+ SI_MASK_BRANCH %bb.2, implicit %exec
+ S_BRANCH %bb.1
bb.1.if:
liveins: %sgpr0_sgpr1
OpenPOWER on IntegriCloud