diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll | 70 |
1 files changed, 41 insertions, 29 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll index 8e02303377c..2013fca503e 100644 --- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll +++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll @@ -22,7 +22,7 @@ define amdgpu_kernel void @extract_w_offset(float addrspace(1)* %out, i32 %in) { entry: %idx = add i32 %in, 1 - %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx + %elt = extractelement <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, i32 %idx store float %elt, float addrspace(1)* %out ret void } @@ -44,11 +44,11 @@ entry: ; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0{{$}} ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} ; IDXMODE-NEXT: s_set_gpr_idx_off -define amdgpu_kernel void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) { +define amdgpu_kernel void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <16 x i32> %or.val) { entry: %idx = add i32 %in, 1 - %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4> - %elt = extractelement <4 x i32> %vec, i32 %idx + %vec = or <16 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16> + %elt = extractelement <16 x i32> %vec, i32 %idx store i32 %elt, i32 addrspace(1)* %out ret void } @@ -68,7 +68,7 @@ entry: ; IDXMODE-NEXT: s_set_gpr_idx_off define amdgpu_kernel void @extract_wo_offset(float addrspace(1)* %out, i32 %in) { entry: - %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in + %elt = extractelement <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, i32 %in store float %elt, float addrspace(1)* %out ret void } @@ -79,15 +79,15 @@ entry: ; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0 ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}} -; IDXMODE: v_mov_b32_e32 v2, 2 -; IDXMODE: v_mov_b32_e32 v3, 3 +; IDXMODE: v_mov_b32_e32 v14, 15 +; IDXMODE: v_mov_b32_e32 v15, 16 ; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}} ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} ; IDXMODE-NEXT: s_set_gpr_idx_off define amdgpu_kernel void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) { entry: %index = add i32 %offset, -512 - %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index + %value = extractelement <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, i32 %index store i32 %value, i32 addrspace(1)* %out ret void } @@ -102,14 +102,26 @@ entry: ; IDXMODE: v_mov_b32_e32 v1, ; IDXMODE: v_mov_b32_e32 v2, ; IDXMODE: v_mov_b32_e32 v3, +; IDXMODE: v_mov_b32_e32 v4, +; IDXMODE: v_mov_b32_e32 v5, +; IDXMODE: v_mov_b32_e32 v6, +; IDXMODE: v_mov_b32_e32 v7, +; IDXMODE: v_mov_b32_e32 v8, +; IDXMODE: v_mov_b32_e32 v9, +; IDXMODE: v_mov_b32_e32 v10, +; IDXMODE: v_mov_b32_e32 v11, +; IDXMODE: v_mov_b32_e32 v12, +; IDXMODE: v_mov_b32_e32 v13, +; IDXMODE: v_mov_b32_e32 v14, +; IDXMODE: v_mov_b32_e32 v15, ; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}} ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} ; IDXMODE-NEXT: s_set_gpr_idx_off -define amdgpu_kernel void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) { +define amdgpu_kernel void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <16 x i32> %vec0, <16 x i32> %vec1, i32 %offset) { entry: %index = add i32 %offset, -512 - %or = or <4 x i32> %vec0, %vec1 - %value = extractelement <4 x i32> %or, i32 %index + %or = or <16 x i32> %vec0, %vec1 + %value = extractelement <16 x i32> %or, i32 %index store i32 %value, i32 addrspace(1)* %out ret void } @@ -138,7 +150,7 @@ define amdgpu_kernel void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) { entry: %id = call i32 @llvm.amdgcn.workitem.id.x() #1 %index = add i32 %id, -512 - %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index + %value = extractelement <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, i32 %index store i32 %value, i32 addrspace(1)* %out ret void } @@ -364,9 +376,9 @@ entry: %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext %idx0 = load volatile i32, i32 addrspace(1)* %gep %idx1 = add i32 %idx0, 1 - %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0 + %val0 = extractelement <16 x i32> <i32 7, i32 9, i32 11, i32 13, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, i32 %idx0 %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={s4}" () - %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1 + %val1 = extractelement <16 x i32> <i32 7, i32 9, i32 11, i32 13, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, i32 %idx1 store volatile i32 %val0, i32 addrspace(1)* %out0 store volatile i32 %val1, i32 addrspace(1)* %out0 %cmp = icmp eq i32 %id, 0 @@ -522,7 +534,7 @@ bb: ; offset puts outside of superegister bounaries, so clamp to 1st element. ; GCN-LABEL: {{^}}extract_largest_inbounds_offset: -; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} +; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\].* offset:48}} ; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] ; MOVREL: s_mov_b32 m0, [[IDX]] ; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]] @@ -532,11 +544,11 @@ bb: ; IDXMODE: s_set_gpr_idx_off ; GCN: buffer_store_dword [[EXTRACT]] -define amdgpu_kernel void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) { +define amdgpu_kernel void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <16 x i32> addrspace(1)* %in, i32 %idx) { entry: - %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in - %offset = add i32 %idx, 3 - %value = extractelement <4 x i32> %ld, i32 %offset + %ld = load volatile <16 x i32>, <16 x i32> addrspace(1)* %in + %offset = add i32 %idx, 15 + %value = extractelement <16 x i32> %ld, i32 %offset store i32 %value, i32 addrspace(1)* %out ret void } @@ -544,20 +556,20 @@ entry: ; GCN-LABEL: {{^}}extract_out_of_bounds_offset: ; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} ; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] -; MOVREL: s_add_i32 m0, [[IDX]], 4 +; MOVREL: s_add_i32 m0, [[IDX]], 16 ; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]] -; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[IDX]], 4 +; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[IDX]], 16 ; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], src0 ; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]] ; IDXMODE: s_set_gpr_idx_off ; GCN: buffer_store_dword [[EXTRACT]] -define amdgpu_kernel void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) { +define amdgpu_kernel void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <16 x i32> addrspace(1)* %in, i32 %idx) { entry: - %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in - %offset = add i32 %idx, 4 - %value = extractelement <4 x i32> %ld, i32 %offset + %ld = load volatile <16 x i32>, <16 x i32> addrspace(1)* %in + %offset = add i32 %idx, 16 + %value = extractelement <16 x i32> %ld, i32 %offset store i32 %value, i32 addrspace(1)* %out ret void } @@ -565,7 +577,7 @@ entry: ; Test that the or is folded into the base address register instead of ; added to m0 -; GCN-LABEL: {{^}}extractelement_v4i32_or_index: +; GCN-LABEL: {{^}}extractelement_v16i32_or_index: ; GCN: s_load_dword [[IDX_IN:s[0-9]+]] ; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] ; GCN-NOT: [[IDX_SHL]] @@ -576,12 +588,12 @@ entry: ; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], src0 ; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} ; IDXMODE: s_set_gpr_idx_off -define amdgpu_kernel void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) { +define amdgpu_kernel void @extractelement_v16i32_or_index(i32 addrspace(1)* %out, <16 x i32> addrspace(1)* %in, i32 %idx.in) { entry: - %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in + %ld = load volatile <16 x i32>, <16 x i32> addrspace(1)* %in %idx.shl = shl i32 %idx.in, 2 %idx = or i32 %idx.shl, 1 - %value = extractelement <4 x i32> %ld, i32 %idx + %value = extractelement <16 x i32> %ld, i32 %idx store i32 %value, i32 addrspace(1)* %out ret void } |