diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-10-07 03:55:04 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-10-07 03:55:04 +0000 |
commit | 93401f4b5ec75b6e70ec53bc0716a7b1e29880d9 (patch) | |
tree | 1d5fa4008c77e52110f2f285e8259aa7542c8bf2 /llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll | |
parent | 752c5bab0397997b26cad5757161f700969faa04 (diff) | |
download | bcm5719-llvm-93401f4b5ec75b6e70ec53bc0716a7b1e29880d9.tar.gz bcm5719-llvm-93401f4b5ec75b6e70ec53bc0716a7b1e29880d9.zip |
AMDGPU: Change check prefix in test
llvm-svn: 283521
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll | 494 |
1 files changed, 247 insertions, 247 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll index 5c217ff421f..d5f53a384fc 100644 --- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll +++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll @@ -1,17 +1,17 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; Tests for indirect addressing on SI, which is implemented using dynamic ; indexing of vectors. -; CHECK-LABEL: {{^}}extract_w_offset: -; CHECK-DAG: s_load_dword [[IN:s[0-9]+]] -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000 -; CHECK-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0 -; CHECK-DAG: s_mov_b32 m0, [[IN]] -; CHECK: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]] +; GCN-LABEL: {{^}}extract_w_offset: +; GCN-DAG: s_load_dword [[IN:s[0-9]+]] +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000 +; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0 +; GCN-DAG: s_mov_b32 m0, [[IN]] +; GCN: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]] define void @extract_w_offset(float addrspace(1)* %out, i32 %in) { entry: %idx = add i32 %in, 1 @@ -21,17 +21,17 @@ entry: } ; XXX: Could do v_or_b32 directly -; CHECK-LABEL: {{^}}extract_w_offset_salu_use_vector: -; CHECK: s_mov_b32 m0 -; CHECK-DAG: s_or_b32 -; CHECK-DAG: s_or_b32 -; CHECK-DAG: s_or_b32 -; CHECK-DAG: s_or_b32 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; CHECK: v_movrels_b32_e32 +; GCN-LABEL: {{^}}extract_w_offset_salu_use_vector: +; GCN: s_mov_b32 m0 +; GCN-DAG: s_or_b32 +; GCN-DAG: s_or_b32 +; GCN-DAG: s_or_b32 +; GCN-DAG: s_or_b32 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN: v_movrels_b32_e32 define void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) { entry: %idx = add i32 %in, 1 @@ -41,14 +41,14 @@ entry: ret void } -; CHECK-LABEL: {{^}}extract_wo_offset: -; CHECK-DAG: s_load_dword [[IN:s[0-9]+]] -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0 -; CHECK-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0 -; CHECK-DAG: s_mov_b32 m0, [[IN]] -; CHECK: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]] +; GCN-LABEL: {{^}}extract_wo_offset: +; GCN-DAG: s_load_dword [[IN:s[0-9]+]] +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0 +; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0 +; GCN-DAG: s_mov_b32 m0, [[IN]] +; GCN: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]] define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) { entry: %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in @@ -56,10 +56,10 @@ entry: ret void } -; CHECK-LABEL: {{^}}extract_neg_offset_sgpr: +; GCN-LABEL: {{^}}extract_neg_offset_sgpr: ; The offset depends on the register that holds the first element of the vector. -; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} -; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0 +; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} +; GCN: v_movrels_b32_e32 v{{[0-9]}}, v0 define void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) { entry: %index = add i32 %offset, -512 @@ -68,10 +68,10 @@ entry: ret void } -; CHECK-LABEL: {{^}}extract_neg_offset_sgpr_loaded: +; GCN-LABEL: {{^}}extract_neg_offset_sgpr_loaded: ; The offset depends on the register that holds the first element of the vector. -; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} -; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0 +; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} +; GCN: v_movrels_b32_e32 v{{[0-9]}}, v0 define void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) { entry: %index = add i32 %offset, -512 @@ -81,19 +81,19 @@ entry: ret void } -; CHECK-LABEL: {{^}}extract_neg_offset_vgpr: +; GCN-LABEL: {{^}}extract_neg_offset_vgpr: ; The offset depends on the register that holds the first element of the vector. ; FIXME: The waitcnt for the argument load can go after the loop -; CHECK: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec -; CHECK: s_waitcnt lgkmcnt(0) +; GCN: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec +; GCN: s_waitcnt lgkmcnt(0) -; CHECK: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}} -; CHECK: s_add_i32 m0, [[READLANE]], 0xfffffe0 -; CHECK: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1 -; CHECK: s_cbranch_execnz +; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}} +; GCN: s_add_i32 m0, [[READLANE]], 0xfffffe0 +; GCN: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1 +; GCN: s_cbranch_execnz -; CHECK: buffer_store_dword [[RESULT]] +; GCN: buffer_store_dword [[RESULT]] define void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) { entry: %id = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -103,7 +103,7 @@ entry: ret void } -; CHECK-LABEL: {{^}}extract_undef_offset_sgpr: +; GCN-LABEL: {{^}}extract_undef_offset_sgpr: define void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { entry: %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in @@ -112,10 +112,10 @@ entry: ret void } -; CHECK-LABEL: {{^}}insert_undef_offset_sgpr_vector_src: -; CHECK-DAG: buffer_load_dwordx4 -; CHECK-DAG: s_mov_b32 m0, -; CHECK: v_movreld_b32 +; GCN-LABEL: {{^}}insert_undef_offset_sgpr_vector_src: +; GCN-DAG: buffer_load_dwordx4 +; GCN-DAG: s_mov_b32 m0, +; GCN: v_movreld_b32 define void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { entry: %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in @@ -124,16 +124,16 @@ entry: ret void } -; CHECK-LABEL: {{^}}insert_w_offset: -; CHECK-DAG: s_load_dword [[IN:s[0-9]+]] -; CHECK-DAG: s_mov_b32 m0, [[IN]] -; CHECK-DAG: v_mov_b32_e32 v[[ELT0:[0-9]+]], 1.0 -; CHECK-DAG: v_mov_b32_e32 v[[ELT1:[0-9]+]], 2.0 -; CHECK-DAG: v_mov_b32_e32 v[[ELT2:[0-9]+]], 0x40400000 -; CHECK-DAG: v_mov_b32_e32 v[[ELT3:[0-9]+]], 4.0 -; CHECK-DAG: v_mov_b32_e32 v[[INS:[0-9]+]], 0x40a00000 -; CHECK: v_movreld_b32_e32 v[[ELT1]], v[[INS]] -; CHECK: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}} +; GCN-LABEL: {{^}}insert_w_offset: +; GCN-DAG: s_load_dword [[IN:s[0-9]+]] +; GCN-DAG: s_mov_b32 m0, [[IN]] +; GCN-DAG: v_mov_b32_e32 v[[ELT0:[0-9]+]], 1.0 +; GCN-DAG: v_mov_b32_e32 v[[ELT1:[0-9]+]], 2.0 +; GCN-DAG: v_mov_b32_e32 v[[ELT2:[0-9]+]], 0x40400000 +; GCN-DAG: v_mov_b32_e32 v[[ELT3:[0-9]+]], 4.0 +; GCN-DAG: v_mov_b32_e32 v[[INS:[0-9]+]], 0x40a00000 +; GCN: v_movreld_b32_e32 v[[ELT1]], v[[INS]] +; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}} define void @insert_w_offset(<4 x float> addrspace(1)* %out, i32 %in) { entry: %0 = add i32 %in, 1 @@ -142,11 +142,11 @@ entry: ret void } -; CHECK-LABEL: {{^}}insert_wo_offset: -; CHECK: s_load_dword [[IN:s[0-9]+]] -; CHECK: s_mov_b32 m0, [[IN]] -; CHECK: v_movreld_b32_e32 v[[ELT0:[0-9]+]] -; CHECK: buffer_store_dwordx4 v{{\[}}[[ELT0]]: +; GCN-LABEL: {{^}}insert_wo_offset: +; GCN: s_load_dword [[IN:s[0-9]+]] +; GCN: s_mov_b32 m0, [[IN]] +; GCN: v_movreld_b32_e32 v[[ELT0:[0-9]+]] +; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]: define void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) { entry: %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in @@ -154,10 +154,10 @@ entry: ret void } -; CHECK-LABEL: {{^}}insert_neg_offset_sgpr: +; GCN-LABEL: {{^}}insert_neg_offset_sgpr: ; The offset depends on the register that holds the first element of the vector. -; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} -; CHECK: v_movreld_b32_e32 v0, 5 +; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} +; GCN: v_movreld_b32_e32 v0, 5 define void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) { entry: %index = add i32 %offset, -512 @@ -169,10 +169,10 @@ entry: ; The vector indexed into is originally loaded into an SGPR rather ; than built with a reg_sequence -; CHECK-LABEL: {{^}}insert_neg_offset_sgpr_loadreg: +; GCN-LABEL: {{^}}insert_neg_offset_sgpr_loadreg: ; The offset depends on the register that holds the first element of the vector. -; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} -; CHECK: v_movreld_b32_e32 v0, 5 +; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} +; GCN: v_movreld_b32_e32 v0, 5 define void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) { entry: %index = add i32 %offset, -512 @@ -181,25 +181,25 @@ entry: ret void } -; CHECK-LABEL: {{^}}insert_neg_offset_vgpr: +; GCN-LABEL: {{^}}insert_neg_offset_vgpr: ; The offset depends on the register that holds the first element of the vector. -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}} -; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec -; CHECK: s_waitcnt lgkmcnt(0) +; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec +; GCN: s_waitcnt lgkmcnt(0) -; CHECK: [[LOOPBB:BB[0-9]+_[0-9]+]]: -; CHECK: v_readfirstlane_b32 [[READLANE:s[0-9]+]] -; CHECK: s_add_i32 m0, [[READLANE]], 0xfffffe00 -; CHECK: v_movreld_b32_e32 [[VEC_ELT0]], 5 -; CHECK: s_cbranch_execnz [[LOOPBB]] +; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]: +; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]] +; GCN: s_add_i32 m0, [[READLANE]], 0xfffffe00 +; GCN: v_movreld_b32_e32 [[VEC_ELT0]], 5 +; GCN: s_cbranch_execnz [[LOOPBB]] -; CHECK: s_mov_b64 exec, [[SAVEEXEC]] -; CHECK: buffer_store_dword +; GCN: s_mov_b64 exec, [[SAVEEXEC]] +; GCN: buffer_store_dword define void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) { entry: %id = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -209,22 +209,22 @@ entry: ret void } -; CHECK-LABEL: {{^}}insert_neg_inline_offset_vgpr: +; GCN-LABEL: {{^}}insert_neg_inline_offset_vgpr: -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}} -; CHECK-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}} +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}} +; GCN-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}} -; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec -; CHECK: s_waitcnt lgkmcnt(0) +; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec +; GCN: s_waitcnt lgkmcnt(0) ; The offset depends on the register that holds the first element of the vector. -; CHECK: v_readfirstlane_b32 [[READLANE:s[0-9]+]] -; CHECK: s_add_i32 m0, [[READLANE]], -16 -; CHECK: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]] -; CHECK: s_cbranch_execnz +; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]] +; GCN: s_add_i32 m0, [[READLANE]], -16 +; GCN: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]] +; GCN: s_cbranch_execnz define void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) { entry: %id = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -237,44 +237,44 @@ entry: ; When the block is split to insert the loop, make sure any other ; places that need to be expanded in the same block are also handled. -; CHECK-LABEL: {{^}}extract_vgpr_offset_multiple_in_block: +; GCN-LABEL: {{^}}extract_vgpr_offset_multiple_in_block: ; FIXME: Why is vector copied in between? -; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]] -; CHECK-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9 -; CHECK-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7 -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]] -; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]] +; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]] +; GCN-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9 +; GCN-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7 +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]] +; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]] -; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec -; CHECK: s_waitcnt vmcnt(0) +; GCN: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec +; GCN: s_waitcnt vmcnt(0) -; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]: -; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] -; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] -; CHECK: s_mov_b32 m0, [[READLANE]] -; CHECK: s_and_saveexec_b64 vcc, vcc -; CHECK: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]] -; CHECK-NEXT: s_xor_b64 exec, exec, vcc -; CHECK-NEXT: s_cbranch_execnz [[LOOP0]] +; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]: +; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] +; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] +; GCN: s_mov_b32 m0, [[READLANE]] +; GCN: s_and_saveexec_b64 vcc, vcc +; GCN: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]] +; GCN-NEXT: s_xor_b64 exec, exec, vcc +; GCN-NEXT: s_cbranch_execnz [[LOOP0]] ; FIXME: Redundant copy -; CHECK: s_mov_b64 exec, [[MASK]] -; CHECK: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]] -; CHECK: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec - -; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]: -; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] -; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] -; CHECK: s_mov_b32 m0, [[READLANE]] -; CHECK: s_and_saveexec_b64 vcc, vcc -; CHECK-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]] -; CHECK-NEXT: s_xor_b64 exec, exec, vcc -; CHECK: s_cbranch_execnz [[LOOP1]] - -; CHECK: buffer_store_dword [[MOVREL0]] -; CHECK: buffer_store_dword [[MOVREL1]] +; GCN: s_mov_b64 exec, [[MASK]] +; GCN: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]] +; GCN: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec + +; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]: +; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] +; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] +; GCN: s_mov_b32 m0, [[READLANE]] +; GCN: s_and_saveexec_b64 vcc, vcc +; GCN-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]] +; GCN-NEXT: s_xor_b64 exec, exec, vcc +; GCN: s_cbranch_execnz [[LOOP1]] + +; GCN: buffer_store_dword [[MOVREL0]] +; GCN: buffer_store_dword [[MOVREL1]] define void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 { entry: %id = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -298,41 +298,41 @@ bb2: ret void } -; CHECK-LABEL: {{^}}insert_vgpr_offset_multiple_in_block: -; CHECK-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}} -; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]] -; CHECK-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62 - -; CHECK-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]] -; CHECK: v_mov_b32_e32 v[[VEC_ELT2:[0-9]+]], s{{[0-9]+}} -; CHECK: v_mov_b32_e32 v[[VEC_ELT1:[0-9]+]], s{{[0-9]+}} -; CHECK: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]] - -; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]: -; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] -; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] -; CHECK: s_mov_b32 m0, [[READLANE]] -; CHECK: s_and_saveexec_b64 vcc, vcc -; CHECK-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]] -; CHECK-NEXT: s_xor_b64 exec, exec, vcc -; CHECK: s_cbranch_execnz [[LOOP0]] +; GCN-LABEL: {{^}}insert_vgpr_offset_multiple_in_block: +; GCN-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}} +; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]] +; GCN-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62 + +; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]] +; GCN: v_mov_b32_e32 v[[VEC_ELT2:[0-9]+]], s{{[0-9]+}} +; GCN: v_mov_b32_e32 v[[VEC_ELT1:[0-9]+]], s{{[0-9]+}} +; GCN: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]] + +; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]: +; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] +; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] +; GCN: s_mov_b32 m0, [[READLANE]] +; GCN: s_and_saveexec_b64 vcc, vcc +; GCN-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]] +; GCN-NEXT: s_xor_b64 exec, exec, vcc +; GCN: s_cbranch_execnz [[LOOP0]] ; FIXME: Redundant copy -; CHECK: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]] -; CHECK: s_mov_b64 [[MASK]], exec +; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]] +; GCN: s_mov_b64 [[MASK]], exec -; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]: -; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] -; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] -; CHECK: s_mov_b32 m0, [[READLANE]] -; CHECK: s_and_saveexec_b64 vcc, vcc -; CHECK-NEXT: v_movreld_b32_e32 v[[VEC_ELT1]], 63 -; CHECK-NEXT: s_xor_b64 exec, exec, vcc -; CHECK: s_cbranch_execnz [[LOOP1]] +; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]: +; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] +; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] +; GCN: s_mov_b32 m0, [[READLANE]] +; GCN: s_and_saveexec_b64 vcc, vcc +; GCN-NEXT: v_movreld_b32_e32 v[[VEC_ELT1]], 63 +; GCN-NEXT: s_xor_b64 exec, exec, vcc +; GCN: s_cbranch_execnz [[LOOP1]] -; CHECK: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]: +; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]: -; CHECK: buffer_store_dword [[INS0]] +; GCN: buffer_store_dword [[INS0]] define void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 { entry: %id = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -355,24 +355,24 @@ bb2: ret void } -; CHECK-LABEL: {{^}}extract_adjacent_blocks: -; CHECK: s_load_dword [[ARG:s[0-9]+]] -; CHECK: s_cmp_lg_u32 -; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]] +; GCN-LABEL: {{^}}extract_adjacent_blocks: +; GCN: s_load_dword [[ARG:s[0-9]+]] +; GCN: s_cmp_lg_u32 +; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]] -; CHECK: buffer_load_dwordx4 -; CHECK: s_mov_b32 m0, -; CHECK: v_movrels_b32_e32 -; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]] +; GCN: buffer_load_dwordx4 +; GCN: s_mov_b32 m0, +; GCN: v_movrels_b32_e32 +; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]] -; CHECK: [[BB4]]: -; CHECK: buffer_load_dwordx4 -; CHECK: s_mov_b32 m0, -; CHECK: v_movrels_b32_e32 +; GCN: [[BB4]]: +; GCN: buffer_load_dwordx4 +; GCN: s_mov_b32 m0, +; GCN: v_movrels_b32_e32 -; CHECK: [[ENDBB]]: -; CHECK: buffer_store_dword -; CHECK: s_endpgm +; GCN: [[ENDBB]]: +; GCN: buffer_store_dword +; GCN: s_endpgm define void @extract_adjacent_blocks(i32 %arg) #0 { bb: %tmp = icmp eq i32 %arg, 0 @@ -394,24 +394,24 @@ bb7: ret void } -; CHECK-LABEL: {{^}}insert_adjacent_blocks: -; CHECK: s_load_dword [[ARG:s[0-9]+]] -; CHECK: s_cmp_lg_u32 -; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]] +; GCN-LABEL: {{^}}insert_adjacent_blocks: +; GCN: s_load_dword [[ARG:s[0-9]+]] +; GCN: s_cmp_lg_u32 +; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]] -; CHECK: buffer_load_dwordx4 -; CHECK: s_mov_b32 m0, -; CHECK: v_movreld_b32_e32 -; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]] +; GCN: buffer_load_dwordx4 +; GCN: s_mov_b32 m0, +; GCN: v_movreld_b32_e32 +; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]] -; CHECK: [[BB4]]: -; CHECK: buffer_load_dwordx4 -; CHECK: s_mov_b32 m0, -; CHECK: v_movreld_b32_e32 +; GCN: [[BB4]]: +; GCN: buffer_load_dwordx4 +; GCN: s_mov_b32 m0, +; GCN: v_movreld_b32_e32 -; CHECK: [[ENDBB]]: -; CHECK: buffer_store_dword -; CHECK: s_endpgm +; GCN: [[ENDBB]]: +; GCN: buffer_store_dword +; GCN: s_endpgm define void @insert_adjacent_blocks(i32 %arg, float %val0) #0 { bb: %tmp = icmp eq i32 %arg, 0 @@ -435,32 +435,32 @@ bb7: ; preds = %bb4, %bb1 ; FIXME: Should be able to fold zero input to movreld to inline imm? -; CHECK-LABEL: {{^}}multi_same_block: - -; CHECK-DAG: v_mov_b32_e32 v[[VEC0_ELT0:[0-9]+]], 0x41880000 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000 -; CHECK-DAG: v_mov_b32_e32 v[[VEC0_ELT2:[0-9]+]], 0x41980000 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000 -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000 -; CHECK-DAG: s_load_dword [[ARG:s[0-9]+]] - -; CHECK-DAG: s_add_i32 m0, [[ARG]], -16 -; CHECK: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0 -; CHECK-NOT: m0 - -; CHECK: v_mov_b32_e32 v[[VEC0_ELT2]], 0x4188cccd -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4190cccd -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4198cccd -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a0cccd -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a8cccd -; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b0cccd -; CHECK: v_movreld_b32_e32 v[[VEC0_ELT2]], -4.0 - -; CHECK: s_mov_b32 m0, -1 -; CHECK: ds_write_b32 -; CHECK: ds_write_b32 -; CHECK: s_endpgm +; GCN-LABEL: {{^}}multi_same_block: + +; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT0:[0-9]+]], 0x41880000 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000 +; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT2:[0-9]+]], 0x41980000 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000 +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000 +; GCN-DAG: s_load_dword [[ARG:s[0-9]+]] + +; GCN-DAG: s_add_i32 m0, [[ARG]], -16 +; GCN: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0 +; GCN-NOT: m0 + +; GCN: v_mov_b32_e32 v[[VEC0_ELT2]], 0x4188cccd +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4190cccd +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4198cccd +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a0cccd +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a8cccd +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b0cccd +; GCN: v_movreld_b32_e32 v[[VEC0_ELT2]], -4.0 + +; GCN: s_mov_b32 m0, -1 +; GCN: ds_write_b32 +; GCN: ds_write_b32 +; GCN: s_endpgm define void @multi_same_block(i32 %arg) #0 { bb: %tmp1 = add i32 %arg, -16 @@ -477,12 +477,12 @@ bb: } ; offset puts outside of superegister bounaries, so clamp to 1st element. -; CHECK-LABEL: {{^}}extract_largest_inbounds_offset: -; CHECK-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} -; CHECK-DAG: s_load_dword [[IDX:s[0-9]+]] -; CHECK: s_mov_b32 m0, [[IDX]] -; CHECK: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]] -; CHECK: buffer_store_dword [[EXTRACT]] +; GCN-LABEL: {{^}}extract_largest_inbounds_offset: +; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} +; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] +; GCN: s_mov_b32 m0, [[IDX]] +; GCN: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]] +; GCN: buffer_store_dword [[EXTRACT]] define void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) { entry: %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in @@ -492,12 +492,12 @@ entry: ret void } -; CHECK-LABEL: {{^}}extract_out_of_bounds_offset: -; CHECK-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} -; CHECK-DAG: s_load_dword [[IDX:s[0-9]+]] -; CHECK: s_add_i32 m0, [[IDX]], 4 -; CHECK: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]] -; CHECK: buffer_store_dword [[EXTRACT]] +; GCN-LABEL: {{^}}extract_out_of_bounds_offset: +; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} +; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] +; GCN: s_add_i32 m0, [[IDX]], 4 +; GCN: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]] +; GCN: buffer_store_dword [[EXTRACT]] define void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) { entry: %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in @@ -510,12 +510,12 @@ entry: ; Test that the or is folded into the base address register instead of ; added to m0 -; CHECK-LABEL: {{^}}extractelement_v4i32_or_index: -; CHECK: s_load_dword [[IDX_IN:s[0-9]+]] -; CHECK: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] -; CHECK-NOT: [[IDX_SHL]] -; CHECK: s_mov_b32 m0, [[IDX_SHL]] -; CHECK: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} +; GCN-LABEL: {{^}}extractelement_v4i32_or_index: +; GCN: s_load_dword [[IDX_IN:s[0-9]+]] +; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] +; GCN-NOT: [[IDX_SHL]] +; GCN: s_mov_b32 m0, [[IDX_SHL]] +; GCN: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} define void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) { entry: %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in @@ -526,12 +526,12 @@ entry: ret void } -; CHECK-LABEL: {{^}}insertelement_v4f32_or_index: -; CHECK: s_load_dword [[IDX_IN:s[0-9]+]] -; CHECK: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] -; CHECK-NOT: [[IDX_SHL]] -; CHECK: s_mov_b32 m0, [[IDX_SHL]] -; CHECK: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} +; GCN-LABEL: {{^}}insertelement_v4f32_or_index: +; GCN: s_load_dword [[IDX_IN:s[0-9]+]] +; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] +; GCN-NOT: [[IDX_SHL]] +; GCN: s_mov_b32 m0, [[IDX_SHL]] +; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind { %idx.shl = shl i32 %idx.in, 2 %idx = or i32 %idx.shl, 1 @@ -540,21 +540,21 @@ define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x f ret void } -; CHECK-LABEL: {{^}}broken_phi_bb: -; CHECK: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8 +; GCN-LABEL: {{^}}broken_phi_bb: +; GCN: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8 -; CHECK: s_branch [[BB2:BB[0-9]+_[0-9]+]] +; GCN: s_branch [[BB2:BB[0-9]+_[0-9]+]] -; CHECK: {{^BB[0-9]+_[0-9]+}}: -; CHECK: s_mov_b64 exec, +; GCN: {{^BB[0-9]+_[0-9]+}}: +; GCN: s_mov_b64 exec, -; CHECK: [[BB2]]: -; CHECK: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]] -; CHECK: buffer_load_dword +; GCN: [[BB2]]: +; GCN: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]] +; GCN: buffer_load_dword -; CHECK: [[REGLOOP:BB[0-9]+_[0-9]+]]: -; CHECK: v_movreld_b32_e32 -; CHECK: s_cbranch_execnz [[REGLOOP]] +; GCN: [[REGLOOP:BB[0-9]+_[0-9]+]]: +; GCN: v_movreld_b32_e32 +; GCN: s_cbranch_execnz [[REGLOOP]] define void @broken_phi_bb(i32 %arg, i32 %arg1) #0 { bb: br label %bb2 |