summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2020-01-12 17:10:18 -0500
committerMatt Arsenault <arsenm2@gmail.com>2020-01-12 22:44:51 -0500
commit555e7ee04cb5c44e0b11a2eda999e6910b4b27e1 (patch)
tree72f1d2809df3e21b8d40e89b2a612e7b6828a78b
parenta10527cd3731e2ef246c4797fb099385a948f62f (diff)
downloadbcm5719-llvm-555e7ee04cb5c44e0b11a2eda999e6910b4b27e1.tar.gz
bcm5719-llvm-555e7ee04cb5c44e0b11a2eda999e6910b4b27e1.zip
AMDGPU/GlobalISel: Don't use XEXEC class for SGPRs
We don't use the xexec register classes for arbitrary values anymore. Avoids a test variance beween GlobalISel and SelectionDAG>
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir54
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir32
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir28
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir20
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-inttoptr.mir7
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir16
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir34
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-add.mir30
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir22
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir14
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir4
39 files changed, 194 insertions, 197 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 863308c76f0..fbadad3c84a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1784,7 +1784,7 @@ SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
&AMDGPU::SReg_32RegClass;
case 64:
return RB.getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VReg_64RegClass :
- &AMDGPU::SReg_64_XEXECRegClass;
+ &AMDGPU::SReg_64RegClass;
case 96:
return RB.getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VReg_96RegClass :
&AMDGPU::SReg_96RegClass;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir
index e1abb29c204..77c372e93a7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.class.mir
@@ -97,14 +97,14 @@ body: |
liveins: $sgpr0_sgpr1, $vgpr0
; WAVE64-LABEL: name: class_s64_vcc_sv
; WAVE64: liveins: $sgpr0_sgpr1, $vgpr0
- ; WAVE64: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; WAVE64: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec
; WAVE64: S_ENDPGM 0, implicit [[V_CMP_CLASS_F64_e64_]]
; WAVE32-LABEL: name: class_s64_vcc_sv
; WAVE32: liveins: $sgpr0_sgpr1, $vgpr0
; WAVE32: $vcc_hi = IMPLICIT_DEF
- ; WAVE32: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; WAVE32: [[V_CMP_CLASS_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_CLASS_F64_e64 0, [[COPY]], [[COPY1]], implicit $exec
; WAVE32: S_ENDPGM 0, implicit [[V_CMP_CLASS_F64_e64_]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
index 479f14661ee..f9e9978e9ca 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
@@ -54,7 +54,7 @@ body: |
; CHECK-LABEL: name: fract_s64_vs
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $exec
; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F64_e64_]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
index 6c4b5d59670..7371dbb998f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
@@ -75,7 +75,7 @@ body: |
liveins: $sgpr0_sgpr1, $vgpr0
; GCN-LABEL: name: ldexp_s64_vsv
; GCN: liveins: $sgpr0_sgpr1, $vgpr0
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
index cd061fce1b8..6dfdec77927 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
@@ -54,7 +54,7 @@ body: |
; CHECK-LABEL: name: rcp_s64_vs
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $exec
; CHECK: S_ENDPGM 0, implicit [[V_RCP_F64_e64_]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
index ae476d159ce..65fcb5deb44 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
@@ -54,7 +54,7 @@ body: |
; CHECK-LABEL: name: rsq_s64_vs
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $exec
; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F64_e64_]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir
index fa0c07c53f7..0c91352ba1c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir
@@ -51,7 +51,7 @@ body: |
; GCN-LABEL: name: anyext_sgpr_s16_to_sgpr_s64
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[COPY]]
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY [[COPY]]
; GCN: $sgpr0_sgpr1 = COPY [[COPY1]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir
index 2c48572f50b..e3b0d4d1879 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir
@@ -214,28 +214,28 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $vgpr0
; GFX6-LABEL: name: ashr_s64_sv
- ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
; GFX7-LABEL: name: ashr_s64_sv
- ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX7: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
; GFX8-LABEL: name: ashr_s64_sv
- ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX8: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
; GFX8: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
; GFX9-LABEL: name: ashr_s64_sv
- ; GFX9: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX9: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
; GFX9: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
; GFX10-LABEL: name: ashr_s64_sv
; GFX10: $vcc_hi = IMPLICIT_DEF
- ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX10: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir
index a63f475a13f..b69dcf20dd3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-build-vector.mir
@@ -81,7 +81,7 @@ body: |
; GCN: liveins: $sgpr0, $sgpr1
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
@@ -102,7 +102,7 @@ body: |
; GCN-LABEL: name: test_build_vector_s_v2s32_undef_s_s32_s_s32
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE undef %2:sreg_32, %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE undef %2:sreg_32, %subreg.sub0, [[COPY]], %subreg.sub1
; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%1:sgpr(s32) = COPY $sgpr0
%2:sgpr(<2 x s32>) = G_BUILD_VECTOR undef %0:sgpr(s32), %1
@@ -122,7 +122,7 @@ body: |
; GCN-LABEL: name: test_build_vector_s_v2s32_s_s32_undef_s_s32
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32, %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32, %subreg.sub1
; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(s32) = COPY $sgpr0
%2:sgpr(<2 x s32>) = G_BUILD_VECTOR %0, undef %1:sgpr(s32),
@@ -141,8 +141,8 @@ body: |
; GCN-LABEL: name: test_build_vector_s_v2s64_s_s64_s_s64
; GCN: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
index 838edbb8222..3b90f296238 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-concat-vectors.mir
@@ -80,7 +80,7 @@ body: |
; GCN-LABEL: name: test_concat_vectors_s_v4s16_s_v2s16_s_v2s16
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GCN: $sgpr0_sgpr1 = COPY [[REG_SEQUENCE]]
%0:sgpr(<2 x s16>) = COPY $sgpr0
%1:sgpr(<2 x s16>) = COPY $sgpr1
@@ -190,8 +190,8 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; GCN-LABEL: name: test_concat_vectors_s_v8s16_s_v4s16_s_v4s16
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]]
%0:sgpr(<4 x s16>) = COPY $sgpr0_sgpr1
@@ -326,10 +326,10 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7
; GCN-LABEL: name: test_concat_vectors_s_v16s16_s_v4s16_s_v4s16_s_v4s16_s_v4s16
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
- ; GCN: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY $sgpr6_sgpr7
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
+ ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
%0:sgpr(<4 x s16>) = COPY $sgpr0_sgpr1
@@ -390,14 +390,14 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr14_sgpr15
; GCN-LABEL: name: test_concat_vectors_s_v32s16_s_v4s16_s_v4s16_s_v4s16_s_v4s16_s_v4s16_s_v4s16_s_v4s16_s_v4s16
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
- ; GCN: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY $sgpr6_sgpr7
- ; GCN: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr8_sgpr9
- ; GCN: [[COPY5:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
- ; GCN: [[COPY6:%[0-9]+]]:sreg_64_xexec = COPY $sgpr12_sgpr13
- ; GCN: [[COPY7:%[0-9]+]]:sreg_64_xexec = COPY $sgpr14_sgpr15
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
+ ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
+ ; GCN: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr8_sgpr9
+ ; GCN: [[COPY5:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
+ ; GCN: [[COPY6:%[0-9]+]]:sreg_64 = COPY $sgpr12_sgpr13
+ ; GCN: [[COPY7:%[0-9]+]]:sreg_64 = COPY $sgpr14_sgpr15
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7, [[COPY4]], %subreg.sub8_sub9, [[COPY5]], %subreg.sub10_sub11, [[COPY6]], %subreg.sub12_sub13, [[COPY7]], %subreg.sub14_sub15
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
%0:sgpr(<4 x s16>) = COPY $sgpr0_sgpr1
@@ -459,8 +459,8 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; GCN-LABEL: name: test_concat_vectors_s_v4s32_s_v2s32_s_v2s32
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]]
%0:sgpr(<2 x s32>) = COPY $sgpr0_sgpr1
@@ -499,10 +499,10 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7
; GCN-LABEL: name: test_concat_vectors_s_v8s32_s_v2s32_s_v2s32_s_v2s32_s_v2s32
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
- ; GCN: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY $sgpr6_sgpr7
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
+ ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
%0:sgpr(<2 x s32>) = COPY $sgpr0_sgpr1
@@ -700,8 +700,8 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; GCN-LABEL: name: test_concat_vectors_s_v4p3_s_v2p3_s_v2p3
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]]
%0:sgpr(<2 x p3>) = COPY $sgpr0_sgpr1
@@ -720,10 +720,10 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7
; GCN-LABEL: name: test_concat_vectors_s_v8p3_s_v2p3_s_v2p3_v2p3_s_v2p3
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
- ; GCN: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY $sgpr6_sgpr7
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
+ ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
%0:sgpr(<2 x p3>) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir
index 065cf9e9555..db37495f052 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir
@@ -20,18 +20,18 @@ body: |
; GCN: [[LO0:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN: [[HI0:%[0-9]+]]:sreg_32 = S_MOV_B32 1
- ; GCN: %{{[0-9]+}}:sreg_64_xexec = REG_SEQUENCE [[LO0]], %subreg.sub0, [[HI0]], %subreg.sub1
+ ; GCN: %{{[0-9]+}}:sreg_64 = REG_SEQUENCE [[LO0]], %subreg.sub0, [[HI0]], %subreg.sub1
%3:sgpr(s64) = G_CONSTANT i64 4294967296
; GCN: %{{[0-9]+}}:sreg_32 = S_MOV_B32 1065353216
%4:sgpr(s32) = G_FCONSTANT float 1.0
- ; GCN: %5:sreg_64_xexec = S_MOV_B64 4607182418800017408
+ ; GCN: %5:sreg_64 = S_MOV_B64 4607182418800017408
%5:sgpr(s64) = G_FCONSTANT double 1.0
; GCN: [[LO1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN: [[HI1:%[0-9]+]]:sreg_32 = S_MOV_B32 1076101120
- ; GCN: %{{[0-9]+}}:sreg_64_xexec = REG_SEQUENCE [[LO1]], %subreg.sub0, [[HI1]], %subreg.sub1
+ ; GCN: %{{[0-9]+}}:sreg_64 = REG_SEQUENCE [[LO1]], %subreg.sub0, [[HI1]], %subreg.sub1
%6:sgpr(s64) = G_FCONSTANT double 10.0
; GCN: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir
index 4fbec4113b4..1afa71ec4a3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir
@@ -14,13 +14,13 @@ body: |
liveins: $sgpr2_sgpr3
; WAVE64-LABEL: name: copy
- ; WAVE64: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[COPY]]
; WAVE64: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; WAVE64: FLAT_STORE_DWORD [[COPY1]], [[DEF]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; WAVE32-LABEL: name: copy
; WAVE32: $vcc_hi = IMPLICIT_DEF
- ; WAVE32: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[COPY]]
; WAVE32: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; WAVE32: GLOBAL_STORE_DWORD [[COPY1]], [[DEF]], 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
index f2d53090f87..e76927138f4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract-vector-elt.mir
@@ -14,13 +14,13 @@ body: |
liveins: $sgpr0_sgpr1, $sgpr2
; MOVREL-LABEL: name: extract_vector_elt_s_s32_v2s32
- ; MOVREL: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; MOVREL: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
; MOVREL: $m0 = COPY [[COPY1]]
; MOVREL: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B32_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s32_v2s32
- ; GPRIDX: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GPRIDX: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
; GPRIDX: $m0 = COPY [[COPY1]]
; GPRIDX: [[S_MOVRELS_B32_:%[0-9]+]]:sreg_32 = S_MOVRELS_B32 [[COPY]].sub0, implicit $m0, implicit [[COPY]]
@@ -179,13 +179,13 @@ body: |
; MOVREL: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
; MOVREL: $m0 = COPY [[COPY1]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v2s64
; GPRIDX: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr4
; GPRIDX: $m0 = COPY [[COPY1]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<2 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr(s32) = COPY $sgpr4
@@ -206,13 +206,13 @@ body: |
; MOVREL: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
; MOVREL: $m0 = COPY [[COPY1]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v4s64
; GPRIDX: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
; GPRIDX: $m0 = COPY [[COPY1]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<4 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
%1:sgpr(s32) = COPY $sgpr8
@@ -233,13 +233,13 @@ body: |
; MOVREL: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
; MOVREL: $m0 = COPY [[COPY1]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64
; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr8
; GPRIDX: $m0 = COPY [[COPY1]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<8 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
%1:sgpr(s32) = COPY $sgpr8
@@ -260,13 +260,13 @@ body: |
; MOVREL: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
; MOVREL: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
; MOVREL: $m0 = COPY [[COPY1]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v16s64
; GPRIDX: [[COPY:%[0-9]+]]:sreg_1024 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
; GPRIDX: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr40
; GPRIDX: $m0 = COPY [[COPY1]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<16 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31
%1:sgpr(s32) = COPY $sgpr40
@@ -421,7 +421,7 @@ body: |
; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1
; MOVREL: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; MOVREL: $m0 = COPY [[S_ADD_U32_]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_1
; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -429,7 +429,7 @@ body: |
; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1
; GPRIDX: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; GPRIDX: $m0 = COPY [[S_ADD_U32_]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<8 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
%1:sgpr(s32) = COPY $sgpr8
@@ -454,7 +454,7 @@ body: |
; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2
; MOVREL: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; MOVREL: $m0 = COPY [[S_ADD_U32_]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_2
; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -462,7 +462,7 @@ body: |
; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2
; GPRIDX: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; GPRIDX: $m0 = COPY [[S_ADD_U32_]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<8 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
%1:sgpr(s32) = COPY $sgpr8
@@ -487,7 +487,7 @@ body: |
; MOVREL: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967295
; MOVREL: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; MOVREL: $m0 = COPY [[S_ADD_U32_]]
- ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; MOVREL: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; MOVREL: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
; GPRIDX-LABEL: name: extract_vector_elt_s_s64_v8s64_idx_offset_m1
; GPRIDX: [[COPY:%[0-9]+]]:sreg_512 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -495,7 +495,7 @@ body: |
; GPRIDX: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967295
; GPRIDX: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
; GPRIDX: $m0 = COPY [[S_ADD_U32_]]
- ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64_xexec = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
+ ; GPRIDX: [[S_MOVRELS_B64_:%[0-9]+]]:sreg_64 = S_MOVRELS_B64 [[COPY]].sub0_sub1, implicit $m0, implicit [[COPY]]
; GPRIDX: S_ENDPGM 0, implicit [[S_MOVRELS_B64_]]
%0:sgpr(<8 x s64>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
%1:sgpr(s32) = COPY $sgpr8
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
index cbcdc7dd6a1..d0cca04784b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-extract.mir
@@ -168,8 +168,8 @@ body: |
bb.0:
; CHECK-LABEL: name: extract_sgpr_s64_from_s128
; CHECK: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY [[DEF]].sub0_sub1
- ; CHECK: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[DEF]].sub2_sub3
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY [[DEF]].sub0_sub1
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY [[DEF]].sub2_sub3
; CHECK: S_ENDPGM 0, implicit [[COPY]], implicit [[COPY1]]
%0:sgpr(s128) = G_IMPLICIT_DEF
%1:sgpr(s64) = G_EXTRACT %0, 0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir
index 8bdb565997e..586ec579246 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fabs.mir
@@ -201,7 +201,7 @@ body: |
; GCN-LABEL: name: fabs_s64_ss
; GCN: liveins: $sgpr0_sgpr1
; GCN: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
- ; GCN: [[FABS:%[0-9]+]]:sreg_64_xexec(s64) = G_FABS [[COPY]]
+ ; GCN: [[FABS:%[0-9]+]]:sreg_64(s64) = G_FABS [[COPY]]
; GCN: $sgpr0_sgpr1 = COPY [[FABS]](s64)
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_FABS %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
index 537464f0940..70c5b76d758 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
@@ -53,7 +53,7 @@ body: |
; CHECK-LABEL: name: fceil_s64_sv
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $exec
; CHECK: $vgpr0_vgpr1 = COPY [[V_CEIL_F64_e64_]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
index 1ab91dbd3cd..b69776bb7b3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
@@ -18,7 +18,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
@@ -86,7 +86,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
index 5c886e031d1..0b82dd159d9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
@@ -19,7 +19,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
@@ -85,7 +85,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
index eee3130867f..b5d9c8851bf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
@@ -18,7 +18,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
@@ -86,7 +86,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
index 3d4a09e6307..2f7319c57ac 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
@@ -19,7 +19,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
@@ -85,7 +85,7 @@ body: |
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
- ; GFX7: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
+ ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
index 34d0d5f58a3..c7dbeada2ca 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
@@ -18,9 +18,9 @@ body: |
; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
- ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
+ ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
@@ -50,7 +50,7 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5
; GCN-LABEL: name: fmul_f64
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; GCN: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
@@ -133,16 +133,16 @@ body: |
; GCN: [[V_MUL_F32_e64_7:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec
; GCN: [[V_MUL_F32_e64_8:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec
; GCN: [[V_MUL_F32_e64_9:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_3]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_4]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_5]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_6]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_7]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_8]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
- ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_9]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_3]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_4]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_5]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_6]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_7]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_8]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+ ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_9]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(p1) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir
index 4a39f47fa7f..ddea2d9f08e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fneg.mir
@@ -201,7 +201,7 @@ body: |
; GCN-LABEL: name: fneg_s64_ss
; GCN: liveins: $sgpr0_sgpr1
; GCN: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
- ; GCN: [[FNEG:%[0-9]+]]:sreg_64_xexec(s64) = G_FNEG [[COPY]]
+ ; GCN: [[FNEG:%[0-9]+]]:sreg_64(s64) = G_FNEG [[COPY]]
; GCN: $sgpr0_sgpr1 = COPY [[FNEG]](s64)
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_FNEG %0
@@ -464,7 +464,7 @@ body: |
; GCN: liveins: $sgpr0_sgpr1
; GCN: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
; GCN: [[FABS:%[0-9]+]]:sgpr(s64) = G_FABS [[COPY]]
- ; GCN: [[FNEG:%[0-9]+]]:sreg_64_xexec(s64) = G_FNEG [[FABS]]
+ ; GCN: [[FNEG:%[0-9]+]]:sreg_64(s64) = G_FNEG [[FABS]]
; GCN: $sgpr0_sgpr1 = COPY [[FNEG]](s64)
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_FABS %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
index a278b249090..17da10515a3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir
@@ -40,7 +40,7 @@ regBankSelected: true
body: |
bb.0:
; GCN-LABEL: name: implicit_def_s64_sgpr
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; GCN: S_ENDPGM 0, implicit [[DEF]]
%0:sgpr(s64) = G_IMPLICIT_DEF
S_ENDPGM 0, implicit %0
@@ -69,7 +69,7 @@ regBankSelected: true
body: |
bb.0:
; GCN-LABEL: name: implicit_def_p0_sgpr
- ; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GCN: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; GCN: S_ENDPGM 0, implicit [[DEF]]
%0:sgpr(p0) = G_IMPLICIT_DEF
S_ENDPGM 0, implicit %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
index ffa909c1f35..9284fcd5f49 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-insert.mir
@@ -96,9 +96,9 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: insert_s_s64_s_s32_0
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_64_xexec = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0
+ ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_64 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:sgpr(s32) = COPY $sgpr2
@@ -116,9 +116,9 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: insert_s_s64_s_s32_32
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_64_xexec = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1
+ ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_64 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:sgpr(s32) = COPY $sgpr2
@@ -136,7 +136,7 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $vgpr0
; CHECK-LABEL: name: insert_s_s64_v_s32_32
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:vreg_64 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
@@ -217,7 +217,7 @@ body: |
liveins: $sgpr0_sgpr1_sgpr2, $sgpr4_sgpr5
; CHECK-LABEL: name: insert_s_s96_s_s64_0
; CHECK: [[COPY:%[0-9]+]]:sgpr_96_with_sub0_sub1 = COPY $sgpr0_sgpr1_sgpr2
- ; CHECK: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_96 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2
@@ -237,7 +237,7 @@ body: |
liveins: $sgpr0_sgpr1_sgpr2, $sgpr4_sgpr5
; CHECK-LABEL: name: insert_s_s96_s_s64_32
; CHECK: [[COPY:%[0-9]+]]:sgpr_96_with_sub1_sub2 = COPY $sgpr0_sgpr1_sgpr2
- ; CHECK: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_96 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub1_sub2
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2
@@ -257,7 +257,7 @@ body: |
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5
; CHECK-LABEL: name: insert_s_s128_s_s64_0
; CHECK: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
- ; CHECK: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_128 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub0_sub1
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
@@ -292,7 +292,7 @@ body: |
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5
; CHECK-LABEL: name: insert_s_s128_s_s64_64
; CHECK: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
- ; CHECK: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sgpr_128 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub2_sub3
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
@@ -332,7 +332,7 @@ body: |
liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, $sgpr8_sgpr9
; CHECK-LABEL: name: insert_s_s256_s_s64_128
; CHECK: [[COPY:%[0-9]+]]:sreg_256 = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; CHECK: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:sreg_256 = INSERT_SUBREG [[COPY]], [[COPY1]], %subreg.sub4_sub5
; CHECK: S_ENDPGM 0, implicit [[INSERT_SUBREG]]
%0:sgpr(s256) = COPY $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
index 22b3ad8e121..550f47c5471 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
@@ -53,7 +53,7 @@ body: |
; CHECK-LABEL: name: intrinsic_trunc_s64_sv
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $exec
; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-inttoptr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-inttoptr.mir
index 410190e7bc1..1920b6b9f3b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-inttoptr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-inttoptr.mir
@@ -7,12 +7,9 @@ legalized: true
regBankSelected: true
# GCN-LABEL: name: inttoptr
-# GCN: [[S64:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+# GCN: [[S64:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
# GCN: [[V64:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-# FIXME: This extra copy is unnecessary, but is it the instruction selector's
-# job to clean this up?
-# GCN: [[S64_COPY:%[0-9]+]]:sreg_64 = COPY [[S64]]
-# GCN: [[VAL:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S64_COPY]], 0, 0
+# GCN: [[VAL:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S64]], 0, 0
# GCN: [[V_VAL:%[0-9]+]]:vgpr_32 = COPY [[VAL]]
# GCN: FLAT_STORE_DWORD [[V64]], [[V_VAL]]
#
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir
index 725744f7e25..720b0de8280 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-constant.mir
@@ -510,22 +510,22 @@ body: |
; GFX6-LABEL: name: load_constant_p999_from_8
; GFX6: liveins: $sgpr0_sgpr1
; GFX6: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX6: [[LOAD:%[0-9]+]]:sreg_64_xexec(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX6: [[LOAD:%[0-9]+]]:sreg_64(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX6: $sgpr0_sgpr1 = COPY [[LOAD]](p999)
; GFX7-LABEL: name: load_constant_p999_from_8
; GFX7: liveins: $sgpr0_sgpr1
; GFX7: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX7: [[LOAD:%[0-9]+]]:sreg_64_xexec(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX7: [[LOAD:%[0-9]+]]:sreg_64(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX7: $sgpr0_sgpr1 = COPY [[LOAD]](p999)
; GFX8-LABEL: name: load_constant_p999_from_8
; GFX8: liveins: $sgpr0_sgpr1
; GFX8: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX8: [[LOAD:%[0-9]+]]:sreg_64_xexec(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX8: [[LOAD:%[0-9]+]]:sreg_64(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX8: $sgpr0_sgpr1 = COPY [[LOAD]](p999)
; GFX10-LABEL: name: load_constant_p999_from_8
; GFX10: liveins: $sgpr0_sgpr1
; GFX10: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX10: [[LOAD:%[0-9]+]]:sreg_64_xexec(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX10: [[LOAD:%[0-9]+]]:sreg_64(p999) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX10: $sgpr0_sgpr1 = COPY [[LOAD]](p999)
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(p999) = G_LOAD %0 :: (load 8, align 8, addrspace 4)
@@ -547,22 +547,22 @@ body: |
; GFX6-LABEL: name: load_constant_v2p3
; GFX6: liveins: $sgpr0_sgpr1
; GFX6: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX6: [[LOAD:%[0-9]+]]:sreg_64_xexec(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX6: [[LOAD:%[0-9]+]]:sreg_64(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX6: $sgpr0_sgpr1 = COPY [[LOAD]](<2 x p3>)
; GFX7-LABEL: name: load_constant_v2p3
; GFX7: liveins: $sgpr0_sgpr1
; GFX7: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX7: [[LOAD:%[0-9]+]]:sreg_64_xexec(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX7: [[LOAD:%[0-9]+]]:sreg_64(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX7: $sgpr0_sgpr1 = COPY [[LOAD]](<2 x p3>)
; GFX8-LABEL: name: load_constant_v2p3
; GFX8: liveins: $sgpr0_sgpr1
; GFX8: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX8: [[LOAD:%[0-9]+]]:sreg_64_xexec(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX8: [[LOAD:%[0-9]+]]:sreg_64(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX8: $sgpr0_sgpr1 = COPY [[LOAD]](<2 x p3>)
; GFX10-LABEL: name: load_constant_v2p3
; GFX10: liveins: $sgpr0_sgpr1
; GFX10: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
- ; GFX10: [[LOAD:%[0-9]+]]:sreg_64_xexec(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
+ ; GFX10: [[LOAD:%[0-9]+]]:sreg_64(<2 x p3>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4)
; GFX10: $sgpr0_sgpr1 = COPY [[LOAD]](<2 x p3>)
%0:sgpr(p4) = COPY $sgpr0_sgpr1
%1:sgpr(<2 x p3>) = G_LOAD %0 :: (load 8, align 8, addrspace 4)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
index e988b0a729b..925339ad7db 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
@@ -44,7 +44,7 @@ regBankSelected: true
# Max immediate for CI
# SIVI: [[K_LO:%[0-9]+]]:sreg_32 = S_MOV_B32 4294967292
# SIVI: [[K_HI:%[0-9]+]]:sreg_32 = S_MOV_B32 3
-# SIVI: [[K:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1
+# SIVI: [[K:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1
# SIVI-DAG: [[K_SUB0:%[0-9]+]]:sreg_32 = COPY [[K]].sub0
# SIVI-DAG: [[PTR_LO:%[0-9]+]]:sreg_32 = COPY [[PTR]].sub0
# SIVI-DAG: [[ADD_PTR_LO:%[0-9]+]]:sreg_32 = S_ADD_U32 [[PTR_LO]], [[K_SUB0]]
@@ -58,7 +58,7 @@ regBankSelected: true
# Immediate overflow for CI
# GCN: [[K_LO:%[0-9]+]]:sreg_32 = S_MOV_B32 0
# GCN: [[K_HI:%[0-9]+]]:sreg_32 = S_MOV_B32 4
-# GCN: [[K:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1
+# GCN: [[K:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1
# GCN-DAG: [[K_SUB0:%[0-9]+]]:sreg_32 = COPY [[K]].sub0
# GCN-DAG: [[PTR_LO:%[0-9]+]]:sreg_32 = COPY [[PTR]].sub0
# GCN-DAG: [[ADD_PTR_LO:%[0-9]+]]:sreg_32 = S_ADD_U32 [[PTR_LO]], [[K_SUB0]]
@@ -76,7 +76,7 @@ regBankSelected: true
# Overflow 32-bit byte offset
# SIVI: [[K_LO:%[0-9]+]]:sreg_32 = S_MOV_B32 0
# SIVI: [[K_HI:%[0-9]+]]:sreg_32 = S_MOV_B32 1
-# SIVI: [[K:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1
+# SIVI: [[K:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1
# SIVI-DAG: [[K_SUB0:%[0-9]+]]:sreg_32 = COPY [[K]].sub0
# SIVI-DAG: [[PTR_LO:%[0-9]+]]:sreg_32 = COPY [[PTR]].sub0
# SIVI-DAG: [[ADD_PTR_LO:%[0-9]+]]:sreg_32 = S_ADD_U32 [[PTR_LO]], [[K_SUB0]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir
index da8fe2728d2..1923c824dc1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir
@@ -214,28 +214,28 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $vgpr0
; GFX6-LABEL: name: lshr_s64_sv
- ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
; GFX7-LABEL: name: lshr_s64_sv
- ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX7: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
; GFX8-LABEL: name: lshr_s64_sv
- ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX8: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
; GFX8: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
; GFX9-LABEL: name: lshr_s64_sv
- ; GFX9: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX9: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
; GFX9: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
; GFX10-LABEL: name: lshr_s64_sv
; GFX10: $vcc_hi = IMPLICIT_DEF
- ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX10: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
index d2fa37f05d9..f0bcd376fe4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-merge-values.mir
@@ -88,7 +88,7 @@ body: |
; GCN: liveins: $sgpr0, $sgpr1
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
@@ -109,7 +109,7 @@ body: |
; GCN-LABEL: name: test_merge_values_s_s64_undef_s_s32_s_s32
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE undef %2:sreg_32, %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE undef %2:sreg_32, %subreg.sub0, [[COPY]], %subreg.sub1
; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%1:sgpr(s32) = COPY $sgpr0
%2:sgpr(s64) = G_MERGE_VALUES undef %0:sgpr(s32), %1
@@ -129,7 +129,7 @@ body: |
; GCN-LABEL: name: test_merge_values_s_s64_s_s32_undef_s_s32
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32, %subreg.sub1
+ ; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, undef %2:sreg_32, %subreg.sub1
; GCN: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(s32) = COPY $sgpr0
%2:sgpr(s64) = G_MERGE_VALUES %0, undef %1:sgpr(s32),
@@ -246,8 +246,8 @@ body: |
; GCN-LABEL: name: test_merge_values_s_s128_s_s64_s_s64
; GCN: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[REG_SEQUENCE]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -394,10 +394,10 @@ body: |
; GCN-LABEL: name: test_merge_values_s_s256_s_s64_s_s64_s_s64_s_s64
; GCN: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
- ; GCN: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY $sgpr6_sgpr7
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
+ ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_256 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7 = COPY [[REG_SEQUENCE]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -464,14 +464,14 @@ body: |
; GCN-LABEL: name: test_merge_values_s_s512_s_s64_s_s64_s_s64_s_s64_s_s64_s_s64_s_s64_s_s64
; GCN: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr14_sgpr15
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY $sgpr4_sgpr5
- ; GCN: [[COPY3:%[0-9]+]]:sreg_64_xexec = COPY $sgpr6_sgpr7
- ; GCN: [[COPY4:%[0-9]+]]:sreg_64_xexec = COPY $sgpr8_sgpr9
- ; GCN: [[COPY5:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
- ; GCN: [[COPY6:%[0-9]+]]:sreg_64_xexec = COPY $sgpr12_sgpr13
- ; GCN: [[COPY7:%[0-9]+]]:sreg_64_xexec = COPY $sgpr14_sgpr15
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $sgpr4_sgpr5
+ ; GCN: [[COPY3:%[0-9]+]]:sreg_64 = COPY $sgpr6_sgpr7
+ ; GCN: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr8_sgpr9
+ ; GCN: [[COPY5:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
+ ; GCN: [[COPY6:%[0-9]+]]:sreg_64 = COPY $sgpr12_sgpr13
+ ; GCN: [[COPY7:%[0-9]+]]:sreg_64 = COPY $sgpr14_sgpr15
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_512 = REG_SEQUENCE [[COPY]], %subreg.sub0_sub1, [[COPY1]], %subreg.sub2_sub3, [[COPY2]], %subreg.sub4_sub5, [[COPY3]], %subreg.sub6_sub7, [[COPY4]], %subreg.sub8_sub9, [[COPY5]], %subreg.sub10_sub11, [[COPY6]], %subreg.sub12_sub13, [[COPY7]], %subreg.sub14_sub15
; GCN: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15 = COPY [[REG_SEQUENCE]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
index 8f75d13021c..c0bfa388122 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-phi.mir
@@ -210,8 +210,8 @@ body: |
; GCN: bb.0:
; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; GCN: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN: S_CMP_EQ_U32 [[COPY2]], [[S_MOV_B32_]], implicit-def $scc
@@ -223,7 +223,7 @@ body: |
; GCN: successors: %bb.2(0x80000000)
; GCN: S_BRANCH %bb.2
; GCN: bb.2:
- ; GCN: [[PHI:%[0-9]+]]:sreg_64_xexec = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1
+ ; GCN: [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1
; GCN: $sgpr0_sgpr1 = COPY [[PHI]]
; GCN: S_SETPC_B64 undef $sgpr30_sgpr31
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-add.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-add.mir
index 1418c461c14..01f43cd5a27 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-add.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-add.mir
@@ -14,8 +14,8 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; GFX6-LABEL: name: gep_p0_sgpr_sgpr
- ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GFX6: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GFX6: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GFX6: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
; GFX6: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
@@ -26,8 +26,8 @@ body: |
; GFX6: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX8-LABEL: name: gep_p0_sgpr_sgpr
; GFX8: $vcc_hi = IMPLICIT_DEF
- ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GFX8: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX8: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GFX8: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GFX8: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
; GFX8: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
@@ -38,8 +38,8 @@ body: |
; GFX8: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX9-LABEL: name: gep_p0_sgpr_sgpr
; GFX9: $vcc_hi = IMPLICIT_DEF
- ; GFX9: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GFX9: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GFX9: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX9: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GFX9: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GFX9: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
; GFX9: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
@@ -49,8 +49,8 @@ body: |
; GFX9: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
; GFX9: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX10-WAVE64-LABEL: name: gep_p0_sgpr_sgpr
- ; GFX10-WAVE64: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GFX10-WAVE64: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GFX10-WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-WAVE64: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GFX10-WAVE64: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GFX10-WAVE64: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
; GFX10-WAVE64: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
@@ -61,8 +61,8 @@ body: |
; GFX10-WAVE64: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX10-WAVE32-LABEL: name: gep_p0_sgpr_sgpr
; GFX10-WAVE32: $vcc_hi = IMPLICIT_DEF
- ; GFX10-WAVE32: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
- ; GFX10-WAVE32: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY $sgpr2_sgpr3
+ ; GFX10-WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+ ; GFX10-WAVE32: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
; GFX10-WAVE32: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GFX10-WAVE32: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
; GFX10-WAVE32: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
@@ -160,7 +160,7 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
; GFX6-LABEL: name: gep_p0_sgpr_vgpr
- ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]].sub0
@@ -172,7 +172,7 @@ body: |
; GFX6: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX8-LABEL: name: gep_p0_sgpr_vgpr
; GFX8: $vcc_hi = IMPLICIT_DEF
- ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX8: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX8: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GFX8: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]].sub0
@@ -184,7 +184,7 @@ body: |
; GFX8: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX9-LABEL: name: gep_p0_sgpr_vgpr
; GFX9: $vcc_hi = IMPLICIT_DEF
- ; GFX9: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX9: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX9: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX9: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GFX9: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]].sub0
@@ -195,7 +195,7 @@ body: |
; GFX9: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_I32_e64_]], %subreg.sub0, %8, %subreg.sub1
; GFX9: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX10-WAVE64-LABEL: name: gep_p0_sgpr_vgpr
- ; GFX10-WAVE64: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX10-WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX10-WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX10-WAVE64: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GFX10-WAVE64: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]].sub0
@@ -207,7 +207,7 @@ body: |
; GFX10-WAVE64: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
; GFX10-WAVE32-LABEL: name: gep_p0_sgpr_vgpr
; GFX10-WAVE32: $vcc_hi = IMPLICIT_DEF
- ; GFX10-WAVE32: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX10-WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX10-WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GFX10-WAVE32: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GFX10-WAVE32: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]].sub0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir
index 9a234e498ff..fcc9565ce9e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptr-mask.mir
@@ -111,12 +111,12 @@ body: |
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: ptr_mask_p0_sgpr_sgpr_1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -2
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
- ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(p0) = G_PTR_MASK %0, 1
@@ -134,12 +134,12 @@ body: |
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: ptr_mask_p0_sgpr_sgpr_2
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -4
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
- ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(p0) = G_PTR_MASK %0, 2
@@ -157,12 +157,12 @@ body: |
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: ptr_mask_p0_sgpr_sgpr_3
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -8
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
- ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(p0) = G_PTR_MASK %0, 3
@@ -180,12 +180,12 @@ body: |
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: ptr_mask_p0_sgpr_sgpr_4
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -16
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
- ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(p0) = G_PTR_MASK %0, 4
@@ -203,12 +203,12 @@ body: |
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: ptr_mask_p0_sgpr_sgpr_29
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -16
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; CHECK: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def $scc
- ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[COPY2]], %subreg.sub1
; CHECK: S_ENDPGM 0, implicit [[REG_SEQUENCE]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(p0) = G_PTR_MASK %0, 4
@@ -461,7 +461,7 @@ body: |
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: ptr_mask_p0_vgpr_sgpr_2
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 -4, implicit $exec
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir
index 8777db58950..38d25829933 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ptrtoint.mir
@@ -53,7 +53,7 @@ body: |
; CHECK-LABEL: name: ptrtoint_s_p0_to_s_s64
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: S_ENDPGM 0, implicit [[COPY]]
%0:sgpr(p0) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_PTRTOINT %0
@@ -73,7 +73,7 @@ body: |
; CHECK-LABEL: name: ptrtoint_s_p1_to_s_s64
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: S_ENDPGM 0, implicit [[COPY]]
%0:sgpr(p1) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_PTRTOINT %0
@@ -93,7 +93,7 @@ body: |
; CHECK-LABEL: name: ptrtoint_s_p999_to_s_s64
; CHECK: liveins: $sgpr0_sgpr1
- ; CHECK: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: S_ENDPGM 0, implicit [[COPY]]
%0:sgpr(p999) = COPY $sgpr0_sgpr1
%1:sgpr(s64) = G_PTRTOINT %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir
index d6ae78a24f2..d25a74ce1ab 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir
@@ -33,7 +33,7 @@ body: |
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1
- ; GCN: [[S_BFE_I64_:%[0-9]+]]:sreg_64_xexec = S_BFE_I64 [[REG_SEQUENCE]], 65536, implicit-def $scc
+ ; GCN: [[S_BFE_I64_:%[0-9]+]]:sreg_64 = S_BFE_I64 [[REG_SEQUENCE]], 65536, implicit-def $scc
; GCN: $sgpr0_sgpr1 = COPY [[S_BFE_I64_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s1) = G_TRUNC %0
@@ -93,7 +93,7 @@ body: |
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1
- ; GCN: [[S_BFE_I64_:%[0-9]+]]:sreg_64_xexec = S_BFE_I64 [[REG_SEQUENCE]], 1048576, implicit-def $scc
+ ; GCN: [[S_BFE_I64_:%[0-9]+]]:sreg_64 = S_BFE_I64 [[REG_SEQUENCE]], 1048576, implicit-def $scc
; GCN: $sgpr0_sgpr1 = COPY [[S_BFE_I64_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir
index 491f2ef939c..c60c0e40744 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir
@@ -214,28 +214,28 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1, $vgpr0
; GFX6-LABEL: name: shl_s64_sv
- ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
; GFX7-LABEL: name: shl_s64_sv
- ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX7: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
; GFX8-LABEL: name: shl_s64_sv
- ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX8: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX8: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
; GFX8: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
; GFX9-LABEL: name: shl_s64_sv
- ; GFX9: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX9: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
; GFX9: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
; GFX10-LABEL: name: shl_s64_sv
; GFX10: $vcc_hi = IMPLICIT_DEF
- ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GFX10: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
index e1265210a05..28dd145e007 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-trunc.mir
@@ -43,7 +43,7 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1
; GCN-LABEL: name: trunc_sgpr_s64_to_s32
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GCN: S_ENDPGM 0, implicit [[COPY1]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -60,7 +60,7 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1
; GCN-LABEL: name: trunc_sgpr_s64_to_s16
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GCN: S_ENDPGM 0, implicit [[COPY1]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -77,7 +77,7 @@ body: |
bb.0:
liveins: $sgpr0_sgpr1
; GCN-LABEL: name: trunc_sgpr_s64_to_s1
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GCN: S_ENDPGM 0, implicit [[COPY1]]
%0:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -112,7 +112,7 @@ body: |
liveins: $sgpr0_sgpr1_sgpr2
; GCN-LABEL: name: trunc_sgpr_s96_to_s64
; GCN: [[COPY:%[0-9]+]]:sgpr_96_with_sub0_sub1 = COPY $sgpr0_sgpr1_sgpr2
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[COPY]].sub0_sub1
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY [[COPY]].sub0_sub1
; GCN: S_ENDPGM 0, implicit [[COPY1]]
%0:sgpr(s96) = COPY $sgpr0_sgpr1_sgpr2
%1:sgpr(s64) = G_TRUNC %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
index 0eef7c3dd40..30270d22b1a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-unmerge-values.mir
@@ -39,7 +39,7 @@ body: |
; GCN-LABEL: name: test_unmerge_values_s_s32_s_s32_s_s64
; GCN: liveins: $sgpr0_sgpr1
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]]
@@ -60,7 +60,7 @@ body: |
; GCN-LABEL: name: test_unmerge_values_v_s32_s_s32_s_s64
; GCN: liveins: $sgpr0_sgpr1
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub1
; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]]
@@ -81,7 +81,7 @@ body: |
; GCN-LABEL: name: test_unmerge_values_s_s32_v_s32_s_s64
; GCN: liveins: $sgpr0_sgpr1
- ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
+ ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]]
@@ -100,8 +100,8 @@ body: |
bb.0:
; GCN-LABEL: name: test_unmerge_values_s_s32_v_s32_s_s64_undef_src
- ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY undef %2.sub0:sreg_64_xexec
- ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY undef %2.sub1:sreg_64_xexec
+ ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY undef %2.sub0:sreg_64
+ ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY undef %2.sub1:sreg_64
; GCN: S_ENDPGM 0, implicit [[COPY]], implicit [[COPY1]]
%1:sgpr(s32), %2:vgpr(s32) = G_UNMERGE_VALUES undef %0:sgpr(s64)
S_ENDPGM 0, implicit %1, implicit %2
@@ -165,8 +165,8 @@ body: |
; GCN-LABEL: name: test_unmerge_values_s_s64_s_s64_s_s128
; GCN: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
; GCN: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
- ; GCN: [[COPY1:%[0-9]+]]:sreg_64_xexec = COPY [[COPY]].sub0_sub1
- ; GCN: [[COPY2:%[0-9]+]]:sreg_64_xexec = COPY [[COPY]].sub2_sub3
+ ; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY [[COPY]].sub0_sub1
+ ; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY [[COPY]].sub2_sub3
; GCN: S_ENDPGM 0, implicit [[COPY1]], implicit [[COPY2]]
%0:sgpr(s128) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:sgpr(s64), %2:sgpr(s64) = G_UNMERGE_VALUES %0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir
index fac8f44f223..0ae30e3e54b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir
@@ -33,7 +33,7 @@ body: |
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1
- ; GCN: [[S_BFE_U64_:%[0-9]+]]:sreg_64_xexec = S_BFE_U64 [[REG_SEQUENCE]], 65536, implicit-def $scc
+ ; GCN: [[S_BFE_U64_:%[0-9]+]]:sreg_64 = S_BFE_U64 [[REG_SEQUENCE]], 65536, implicit-def $scc
; GCN: $sgpr0_sgpr1 = COPY [[S_BFE_U64_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s1) = G_TRUNC %0
@@ -94,7 +94,7 @@ body: |
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[DEF]], %subreg.sub1
- ; GCN: [[S_BFE_U64_:%[0-9]+]]:sreg_64_xexec = S_BFE_U64 [[REG_SEQUENCE]], 1048576, implicit-def $scc
+ ; GCN: [[S_BFE_U64_:%[0-9]+]]:sreg_64 = S_BFE_U64 [[REG_SEQUENCE]], 1048576, implicit-def $scc
; GCN: $sgpr0_sgpr1 = COPY [[S_BFE_U64_]]
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
OpenPOWER on IntegriCloud