diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2014-09-08 15:07:31 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2014-09-08 15:07:31 +0000 |
commit | 7ac9c4a0745b33879001ed0ac0ab7a57be540f6d (patch) | |
tree | 969214e11d52344046b787bbb2a2048f13169fe0 /llvm/test/CodeGen/R600/local-atomics.ll | |
parent | 9903ccf7ee56f911eb0aee34c81f6962dec79c1b (diff) | |
download | bcm5719-llvm-7ac9c4a0745b33879001ed0ac0ab7a57be540f6d.tar.gz bcm5719-llvm-7ac9c4a0745b33879001ed0ac0ab7a57be540f6d.zip |
R600/SI: Replace LDS atomics with no return versions
llvm-svn: 217379
Diffstat (limited to 'llvm/test/CodeGen/R600/local-atomics.ll')
-rw-r--r-- | llvm/test/CodeGen/R600/local-atomics.ll | 250 |
1 files changed, 250 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/R600/local-atomics.ll b/llvm/test/CodeGen/R600/local-atomics.ll index 043269f4017..f85afd2a3da 100644 --- a/llvm/test/CodeGen/R600/local-atomics.ll +++ b/llvm/test/CodeGen/R600/local-atomics.ll @@ -279,3 +279,253 @@ define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspac store i32 %result, i32 addrspace(1)* %out, align 4 ret void } + +; FUNC-LABEL: @lds_atomic_xchg_noret_i32: +; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]], +; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4 +; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]] +; SI: DS_WRXCHG_RTN_B32 [[RESULT:v[0-9]+]], [[VPTR]], [[DATA]], 0x0, [M0] +; SI: S_ENDPGM +define void @lds_atomic_xchg_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_xchg_noret_i32_offset: +; SI: DS_WRXCHG_RTN_B32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; XXX - Is it really necessary to load 4 into VGPR? +; FUNC-LABEL: @lds_atomic_add_noret_i32: +; SI: S_LOAD_DWORD [[SPTR:s[0-9]+]], +; SI: V_MOV_B32_e32 [[DATA:v[0-9]+]], 4 +; SI: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[SPTR]] +; SI: DS_ADD_U32 [[VPTR]], [[DATA]], 0x0, [M0] +; SI: S_ENDPGM +define void @lds_atomic_add_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_add_noret_i32_offset: +; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_add_noret_i32_bad_si_offset +; SI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0 +; CI: DS_ADD_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind { + %sub = sub i32 %a, %b + %add = add i32 %sub, 4 + %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add + %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_inc_noret_i32: +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1 +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]] +; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x0 +; SI: S_ENDPGM +define void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw add i32 addrspace(3)* %ptr, i32 1 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_inc_noret_i32_offset: +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1 +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]] +; SI: DS_INC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10 +; SI: S_ENDPGM +define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_inc_noret_i32_bad_si_offset: +; SI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0 +; CI: DS_INC_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind { + %sub = sub i32 %a, %b + %add = add i32 %sub, 4 + %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add + %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_sub_noret_i32: +; SI: DS_SUB_U32 +; SI: S_ENDPGM +define void @lds_atomic_sub_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_sub_noret_i32_offset: +; SI: DS_SUB_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_dec_noret_i32: +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1 +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]] +; SI: DS_DEC_U32 v{{[0-9]+}}, [[NEGONE]], 0x0 +; SI: S_ENDPGM +define void @lds_atomic_dec_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw sub i32 addrspace(3)* %ptr, i32 1 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_dec_noret_i32_offset: +; SI: S_MOV_B32 [[SNEGONE:s[0-9]+]], -1 +; SI: V_MOV_B32_e32 [[NEGONE:v[0-9]+]], [[SNEGONE]] +; SI: DS_DEC_U32 v{{[0-9]+}}, [[NEGONE]], 0x10 +; SI: S_ENDPGM +define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_and_noret_i32: +; SI: DS_AND_B32 +; SI: S_ENDPGM +define void @lds_atomic_and_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw and i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_and_noret_i32_offset: +; SI: DS_AND_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_or_noret_i32: +; SI: DS_OR_B32 +; SI: S_ENDPGM +define void @lds_atomic_or_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw or i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_or_noret_i32_offset: +; SI: DS_OR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_xor_noret_i32: +; SI: DS_XOR_B32 +; SI: S_ENDPGM +define void @lds_atomic_xor_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw xor i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_xor_noret_i32_offset: +; SI: DS_XOR_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FIXME: There is no atomic nand instr +; XFUNC-LABEL: @lds_atomic_nand_noret_i32:uction, so we somehow need to expand this. +; define void @lds_atomic_nand_noret_i32(i32 addrspace(3)* %ptr) nounwind { +; %result = atomicrmw nand i32 addrspace(3)* %ptr, i32 4 seq_cst +; ret void +; } + +; FUNC-LABEL: @lds_atomic_min_noret_i32: +; SI: DS_MIN_I32 +; SI: S_ENDPGM +define void @lds_atomic_min_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw min i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_min_noret_i32_offset: +; SI: DS_MIN_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_max_noret_i32: +; SI: DS_MAX_I32 +; SI: S_ENDPGM +define void @lds_atomic_max_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw max i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_max_noret_i32_offset: +; SI: DS_MAX_I32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_umin_noret_i32: +; SI: DS_MIN_U32 +; SI: S_ENDPGM +define void @lds_atomic_umin_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw umin i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_umin_noret_i32_offset: +; SI: DS_MIN_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_umax_noret_i32: +; SI: DS_MAX_U32 +; SI: S_ENDPGM +define void @lds_atomic_umax_noret_i32(i32 addrspace(3)* %ptr) nounwind { + %result = atomicrmw umax i32 addrspace(3)* %ptr, i32 4 seq_cst + ret void +} + +; FUNC-LABEL: @lds_atomic_umax_noret_i32_offset: +; SI: DS_MAX_U32 v{{[0-9]+}}, v{{[0-9]+}}, 0x10 +; SI: S_ENDPGM +define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i32 addrspace(3)* %ptr, i32 4 + %result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst + ret void +} |