summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r--llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop_break.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/multilevel-break.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll32
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll12
7 files changed, 81 insertions, 81 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll b/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
index e44f99cd950..c8e9a469109 100644
--- a/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
+++ b/llvm/test/CodeGen/AMDGPU/diverge-switch-default.ll
@@ -38,8 +38,8 @@ sw.epilog:
; CHECK: load i8
; CHECK-NOT: {{ br }}
; CHECK: [[ICMP:%[a-zA-Z0-9._]+]] = icmp eq
-; CHECK: [[IF:%[a-zA-Z0-9._]+]] = call i64 @llvm.amdgcn.if.break(i1 [[ICMP]], i64 [[PHI]])
-; CHECK: [[LOOP:%[a-zA-Z0-9._]+]] = call i1 @llvm.amdgcn.loop(i64 [[IF]])
+; CHECK: [[IF:%[a-zA-Z0-9._]+]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[ICMP]], i64 [[PHI]])
+; CHECK: [[LOOP:%[a-zA-Z0-9._]+]] = call i1 @llvm.amdgcn.loop.i64(i64 [[IF]])
; CHECK: br i1 [[LOOP]]
sw.while:
diff --git a/llvm/test/CodeGen/AMDGPU/loop_break.ll b/llvm/test/CodeGen/AMDGPU/loop_break.ll
index f37b3a3637a..b4436fc42fa 100644
--- a/llvm/test/CodeGen/AMDGPU/loop_break.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop_break.ll
@@ -15,12 +15,12 @@
; OPT: br label %Flow
; OPT: Flow:
-; OPT: call i64 @llvm.amdgcn.if.break(
-; OPT: call i1 @llvm.amdgcn.loop(i64
+; OPT: call i64 @llvm.amdgcn.if.break.i64.i64(
+; OPT: call i1 @llvm.amdgcn.loop.i64(i64
; OPT: br i1 %{{[0-9]+}}, label %bb9, label %bb1
; OPT: bb9:
-; OPT: call void @llvm.amdgcn.end.cf(i64
+; OPT: call void @llvm.amdgcn.end.cf.i64(i64
; GCN-LABEL: {{^}}break_loop:
; GCN: s_mov_b64 [[OUTER_MASK:s\[[0-9]+:[0-9]+\]]], 0{{$}}
@@ -84,12 +84,12 @@ bb9:
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken)
-; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0)
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @undef_phi_cond_break_loop(i32 %arg) #0 {
@@ -138,12 +138,12 @@ bb9: ; preds = %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken)
-; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0)
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @constexpr_phi_cond_break_loop(i32 %arg) #0 {
@@ -189,12 +189,12 @@ bb9: ; preds = %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken)
-; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0)
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @true_phi_cond_break_loop(i32 %arg) #0 {
@@ -239,12 +239,12 @@ bb9: ; preds = %Flow
; OPT: Flow:
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ]
-; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken)
-; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0)
+; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken)
+; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0)
; OPT-NEXT: br i1 %1, label %bb9, label %bb1
; OPT: bb9: ; preds = %Flow
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0)
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0)
; OPT-NEXT: store volatile i32 7
; OPT-NEXT: ret void
define amdgpu_kernel void @false_phi_cond_break_loop(i32 %arg) #0 {
@@ -294,12 +294,12 @@ bb9: ; preds = %Flow
; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
; OPT-NEXT: %0 = xor i1 %tmp3, true
-; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break(i1 %0, i64 %phi.broken)
-; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop(i64 %1)
+; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %0, i64 %phi.broken)
+; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop.i64(i64 %1)
; OPT-NEXT: br i1 %2, label %bb9, label %bb1
; OPT: bb9:
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %1)
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %1)
; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef
; OPT-NEXT: ret void
define amdgpu_kernel void @invert_true_phi_cond_break_loop(i32 %arg) #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll b/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
index 7988a246a0f..fafc38dbc78 100644
--- a/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
+++ b/llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll
@@ -9,7 +9,7 @@
; StructurizeCFG.
; IR-LABEL: @multi_divergent_region_exit_ret_ret(
-; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0)
; IR: %2 = extractvalue { i1, i64 } %1, 0
; IR: %3 = extractvalue { i1, i64 } %1, 1
; IR: br i1 %2, label %LeafBlock1, label %Flow
@@ -17,7 +17,7 @@
; IR: Flow:
; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
-; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3)
; IR: %7 = extractvalue { i1, i64 } %6, 0
; IR: %8 = extractvalue { i1, i64 } %6, 1
; IR: br i1 %7, label %LeafBlock, label %Flow1
@@ -30,8 +30,8 @@
; IR: Flow2:
; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %19)
-; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11)
; IR: %13 = extractvalue { i1, i64 } %12, 0
; IR: %14 = extractvalue { i1, i64 } %12, 1
; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock
@@ -43,8 +43,8 @@
; IR: Flow1:
; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ]
; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %8)
-; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %8)
+; IR: %17 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %16)
; IR: %18 = extractvalue { i1, i64 } %17, 0
; IR: %19 = extractvalue { i1, i64 } %17, 1
; IR: br i1 %18, label %exit1, label %Flow2
@@ -54,7 +54,7 @@
; IR: br label %Flow2
; IR: UnifiedReturnBlock:
-; IR: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %14)
; IR: ret void
@@ -141,13 +141,13 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
}
; IR-LABEL: @multi_divergent_region_exit_unreachable_unreachable(
-; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0)
-; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3)
; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %19)
-; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11)
; IR: br i1 %13, label %exit0, label %UnifiedUnreachableBlock
@@ -203,7 +203,7 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR: {{^}}Flow:
; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
-; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3)
; IR: br i1 %7, label %LeafBlock, label %Flow1
; IR: {{^}}LeafBlock:
@@ -218,8 +218,8 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR: Flow2:
; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %19)
-; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11)
; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock
; IR: exit0:
@@ -229,8 +229,8 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR: {{^}}Flow1:
; IR: %15 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %4, %Flow ]
; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %8)
-; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %8)
+; IR: %17 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %16)
; IR: %18 = extractvalue { i1, i64 } %17, 0
; IR: %19 = extractvalue { i1, i64 } %17, 1
; IR: br i1 %18, label %exit1, label %Flow2
@@ -240,7 +240,7 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR: br label %Flow2
; IR: UnifiedReturnBlock:
-; IR: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %14)
; IR: ret void
define amdgpu_kernel void @multi_exit_region_divergent_ret_uniform_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 {
entry:
@@ -279,17 +279,17 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
}
; IR-LABEL: @multi_exit_region_uniform_ret_divergent_ret(
-; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0)
; IR: br i1 %2, label %LeafBlock1, label %Flow
; IR: Flow:
; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
-; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3)
; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %19)
-; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11)
define amdgpu_kernel void @multi_exit_region_uniform_ret_divergent_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 {
entry:
@@ -330,11 +330,11 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR-LABEL: @multi_divergent_region_exit_ret_ret_return_value(
; IR: Flow2:
; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %19)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19)
; IR: UnifiedReturnBlock:
; IR: %UnifiedRetVal = phi float [ 2.000000e+00, %Flow2 ], [ 1.000000e+00, %exit0 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %14)
; IR: ret float %UnifiedRetVal
define amdgpu_ps float @multi_divergent_region_exit_ret_ret_return_value(i32 %vgpr) #0 {
entry:
@@ -402,17 +402,17 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
}
; IR-LABEL: @multi_divergent_region_exit_ret_unreachable(
-; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0)
+; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0)
; IR: Flow:
; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ]
; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ]
-; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3)
+; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3)
; IR: Flow2:
; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %19)
-; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19)
+; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11)
; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock
; IR: exit0:
@@ -422,8 +422,8 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR: Flow1:
; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ]
; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ]
-; IR: call void @llvm.amdgcn.end.cf(i64 %8)
-; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16)
+; IR: call void @llvm.amdgcn.end.cf.i64(i64 %8)
+; IR: %17 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %16)
; IR: %18 = extractvalue { i1, i64 } %17, 0
; IR: %19 = extractvalue { i1, i64 } %17, 1
; IR: br i1 %18, label %exit1, label %Flow2
@@ -434,7 +434,7 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR-NEXT: br label %Flow2
; IR: UnifiedReturnBlock:
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %14)
; IR-NEXT: ret void
define amdgpu_kernel void @multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
entry:
@@ -490,7 +490,7 @@ exit1: ; preds = %LeafBlock, %LeafBlock1
; IR-NEXT: br label %Flow2
; IR: UnifiedReturnBlock: ; preds = %exit0, %Flow2
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %14)
; IR-NEXT: ret void
define amdgpu_kernel void @indirect_multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 {
entry:
@@ -645,7 +645,7 @@ uniform.ret:
; IR: br i1 %11, label %uniform.endif, label %uniform.ret0
; IR: UnifiedReturnBlock: ; preds = %Flow3, %Flow2
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %6)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %6)
; IR-NEXT: ret void
define amdgpu_kernel void @uniform_complex_multi_ret_nest_in_divergent_triangle(i32 %arg0) #0 {
entry:
@@ -691,7 +691,7 @@ divergent.ret:
; IR-NEXT: br label %UnifiedReturnBlock
; IR: UnifiedReturnBlock:
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64
; IR-NEXT: ret void
define amdgpu_kernel void @multi_divergent_unreachable_exit() #0 {
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
index 4c1a769d599..d243233119b 100644
--- a/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
+++ b/llvm/test/CodeGen/AMDGPU/multilevel-break.ll
@@ -5,7 +5,7 @@
; OPT: main_body:
; OPT: LOOP.outer:
; OPT: LOOP:
-; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if(
+; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if.i64(
; OPT: [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1
;
; OPT: Flow:
@@ -13,9 +13,9 @@
; Ensure two if.break calls, for both the inner and outer loops
; OPT: call void @llvm.amdgcn.end.cf
-; OPT-NEXT: call i64 @llvm.amdgcn.if.break(i1
-; OPT-NEXT: call i1 @llvm.amdgcn.loop(i64
-; OPT-NEXT: call i64 @llvm.amdgcn.if.break(i1
+; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1
+; OPT-NEXT: call i1 @llvm.amdgcn.loop.i64(i64
+; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1
;
; OPT: Flow1:
diff --git a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
index a007c965f94..48dbb28b4a2 100644
--- a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
+++ b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
@@ -13,7 +13,7 @@
; IR-NEXT: %phi.broken = phi i64 [ %3, %bb10 ], [ 0, %bb ]
; IR-NEXT: %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
; IR-NEXT: %tmp7 = icmp eq i32 %tmp6, 1
-; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %tmp7)
+; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %tmp7)
; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
; IR-NEXT: br i1 %1, label %bb8, label %Flow
@@ -24,14 +24,14 @@
; IR: bb10:
; IR-NEXT: %tmp11 = phi i32 [ %6, %Flow ]
; IR-NEXT: %tmp12 = phi i1 [ %5, %Flow ]
-; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break(i1 %tmp12, i64 %phi.broken)
-; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop(i64 %3)
+; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp12, i64 %phi.broken)
+; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop.i64(i64 %3)
; IR-NEXT: br i1 %4, label %bb23, label %bb5
; IR: Flow:
; IR-NEXT: %5 = phi i1 [ %tmp22, %bb4 ], [ true, %bb5 ]
; IR-NEXT: %6 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %2)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %2)
; IR-NEXT: br label %bb10
; IR: bb13:
@@ -51,7 +51,7 @@
; IR-NEXT: br label %bb9
; IR: bb23:
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %3)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %3)
; IR-NEXT: ret void
; GCN-LABEL: {{^}}reduced_nested_loop_conditions:
@@ -121,27 +121,27 @@ bb23: ; preds = %bb10
; IR-LABEL: @nested_loop_conditions(
; IR: Flow3:
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %21)
-; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %14)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %21)
+; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %14)
; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
; IR-NEXT: br i1 %1, label %bb4.bb13_crit_edge, label %Flow4
; IR: Flow4:
; IR-NEXT: %3 = phi i1 [ true, %bb4.bb13_crit_edge ], [ false, %Flow3 ]
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %2)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %2)
; IR-NEXT: br label %Flow
; IR: Flow:
; IR-NEXT: %4 = phi i1 [ %3, %Flow4 ], [ true, %bb ]
-; IR-NEXT: %5 = call { i1, i64 } @llvm.amdgcn.if(i1 %4)
+; IR-NEXT: %5 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %4)
; IR-NEXT: %6 = extractvalue { i1, i64 } %5, 0
; IR-NEXT: %7 = extractvalue { i1, i64 } %5, 1
; IR-NEXT: br i1 %6, label %bb13, label %bb31
; IR: bb14:
; IR: %tmp15 = icmp eq i32 %tmp1037, 1
-; IR-NEXT: %8 = call { i1, i64 } @llvm.amdgcn.if(i1 %tmp15)
+; IR-NEXT: %8 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %tmp15)
; IR: Flow1:
; IR-NEXT: %11 = phi <4 x i32> [ %tmp9, %bb21 ], [ undef, %bb14 ]
@@ -149,9 +149,9 @@ bb23: ; preds = %bb10
; IR-NEXT: %13 = phi i1 [ %18, %bb21 ], [ true, %bb14 ]
; IR-NEXT: %14 = phi i1 [ %18, %bb21 ], [ false, %bb14 ]
; IR-NEXT: %15 = phi i1 [ false, %bb21 ], [ true, %bb14 ]
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %10)
-; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break(i1 %13, i64 %phi.broken)
-; IR-NEXT: %17 = call i1 @llvm.amdgcn.loop(i64 %16)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %10)
+; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %13, i64 %phi.broken)
+; IR-NEXT: %17 = call i1 @llvm.amdgcn.loop.i64(i64 %16)
; IR-NEXT: br i1 %17, label %Flow2, label %bb14
; IR: bb21:
@@ -160,14 +160,14 @@ bb23: ; preds = %bb10
; IR-NEXT: br label %Flow1
; IR: Flow2:
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %16)
-; IR-NEXT: %19 = call { i1, i64 } @llvm.amdgcn.if(i1 %15)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %16)
+; IR-NEXT: %19 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %15)
; IR-NEXT: %20 = extractvalue { i1, i64 } %19, 0
; IR-NEXT: %21 = extractvalue { i1, i64 } %19, 1
; IR-NEXT: br i1 %20, label %bb31.loopexit, label %Flow3
; IR: bb31:
-; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %7)
+; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %7)
; IR-NEXT: store volatile i32 0, i32 addrspace(1)* undef
; IR-NEXT: ret void
diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll
index e50c595bc6c..9c385191941 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll
@@ -3,8 +3,8 @@
; OPT-LABEL: @annotate_unreachable(
-; OPT: call { i1, i64 } @llvm.amdgcn.if(
-; OPT-NOT: call void @llvm.amdgcn.end.cf(
+; OPT: call { i1, i64 } @llvm.amdgcn.if.i64(
+; OPT-NOT: call void @llvm.amdgcn.end.cf
; GCN-LABEL: {{^}}annotate_unreachable:
diff --git a/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll b/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
index abdc8b90bcc..1d0cb626f01 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll
@@ -17,17 +17,17 @@ define amdgpu_kernel void @multiple_backedges(i32 %arg, i32* %arg1) {
; OPT-NEXT: [[TMP4:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP5:%.*]], [[LOOP]] ], [ 0, [[LOOP_END]] ]
; OPT-NEXT: [[TMP5]] = add nsw i32 [[TMP4]], [[TMP]]
; OPT-NEXT: [[TMP6:%.*]] = icmp slt i32 [[ARG]], [[TMP5]]
-; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break(i1 [[TMP6]], i64 [[PHI_BROKEN]])
-; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop(i64 [[TMP0]])
+; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]])
+; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]])
; OPT-NEXT: br i1 [[TMP1]], label [[LOOP_END]], label [[LOOP]]
; OPT: loop_end:
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 [[TMP0]])
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]])
; OPT-NEXT: [[EXIT:%.*]] = icmp sgt i32 [[TMP5]], [[TMP2]]
-; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break(i1 [[EXIT]], i64 [[PHI_BROKEN1]])
-; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop(i64 [[TMP7]])
+; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[EXIT]], i64 [[PHI_BROKEN1]])
+; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP7]])
; OPT-NEXT: br i1 [[TMP3]], label [[LOOP_EXIT:%.*]], label [[LOOP]]
; OPT: loop_exit:
-; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 [[TMP7]])
+; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP7]])
; OPT-NEXT: [[TMP12:%.*]] = zext i32 [[TMP]] to i64
; OPT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[ARG1:%.*]], i64 [[TMP12]]
; OPT-NEXT: [[TMP14:%.*]] = addrspacecast i32* [[TMP13]] to i32 addrspace(1)*
OpenPOWER on IntegriCloud