diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-11-24 00:26:40 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-11-24 00:26:40 +0000 |
| commit | 9e5c7b10316aca49605dd7cf5f4e6e4a3ab76cd6 (patch) | |
| tree | b163ccaf77708f0e9a1e3082c207e49e2d8b6d7b /llvm/test/CodeGen/AMDGPU/spill-m0.ll | |
| parent | 8812f28f47a74d32bdd14181ebfe906cbfcd7346 (diff) | |
| download | bcm5719-llvm-9e5c7b10316aca49605dd7cf5f4e6e4a3ab76cd6.tar.gz bcm5719-llvm-9e5c7b10316aca49605dd7cf5f4e6e4a3ab76cd6.zip | |
AMDGPU: Make m0 unallocatable
m0 may need to be written for spill code, so
we don't want general code uses relying on the
value stored in it.
This introduces a few code quality regressions where copies
from m0 are not coalesced into copies of a copy of m0.
llvm-svn: 287841
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/spill-m0.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/spill-m0.ll | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/spill-m0.ll b/llvm/test/CodeGen/AMDGPU/spill-m0.ll index c5ef75e5fb7..548735f1e78 100644 --- a/llvm/test/CodeGen/AMDGPU/spill-m0.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-m0.ll @@ -9,38 +9,39 @@ ; GCN-LABEL: {{^}}spill_m0: ; TOSMEM: s_mov_b32 s84, SCRATCH_RSRC_DWORD0 -; GCN: s_cmp_lg_u32 +; GCN-DAG: s_cmp_lg_u32 -; TOVGPR: s_mov_b32 vcc_hi, m0 -; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], vcc_hi, 0 +; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0 +; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0 -; TOVMEM: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], m0 +; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0 +; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]] ; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Spill ; TOVMEM: s_waitcnt vmcnt(0) -; TOSMEM: s_mov_b32 vcc_hi, m0 +; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0 ; TOSMEM: s_mov_b32 m0, s3{{$}} -; TOSMEM-NOT: vcc_hi -; TOSMEM: s_buffer_store_dword vcc_hi, s[84:87], m0 ; 4-byte Folded Spill +; TOSMEM-NOT: [[M0_COPY]] +; TOSMEM: s_buffer_store_dword [[M0_COPY]], s[84:87], m0 ; 4-byte Folded Spill ; TOSMEM: s_waitcnt lgkmcnt(0) ; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]] ; GCN: [[ENDIF]]: -; TOVGPR: v_readlane_b32 vcc_hi, [[SPILL_VREG]], 0 -; TOVGPR: s_mov_b32 m0, vcc_hi +; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 0 +; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]] ; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Reload ; TOVMEM: s_waitcnt vmcnt(0) -; TOVMEM: v_readfirstlane_b32 vcc_hi, [[RELOAD_VREG]] -; TOVMEM: s_mov_b32 m0, vcc_hi +; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]] +; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]] ; TOSMEM: s_mov_b32 m0, s3{{$}} -; TOSMEM: s_buffer_load_dword vcc_hi, s[84:87], m0 ; 4-byte Folded Reload -; TOSMEM-NOT: vcc_hi -; TOSMEM: s_mov_b32 m0, vcc_hi +; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s[84:87], m0 ; 4-byte Folded Reload +; TOSMEM-NOT: [[M0_RESTORE]] +; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]] -; GCN: s_add_i32 m0, m0, 1 +; GCN: s_add_i32 s{{[0-9]+}}, m0, 1 define void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 { entry: %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 |

