diff options
author | Geoff Berry <gberry@codeaurora.org> | 2017-08-16 20:50:01 +0000 |
---|---|---|
committer | Geoff Berry <gberry@codeaurora.org> | 2017-08-16 20:50:01 +0000 |
commit | 87f8d25150facee0e5ae1884871db420c925877c (patch) | |
tree | b770d78ed491fed130a9d01d63675f48a3638a43 /llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll | |
parent | 0fb5b788920b6039b13197fd11212c3cf8d9d714 (diff) | |
download | bcm5719-llvm-87f8d25150facee0e5ae1884871db420c925877c.tar.gz bcm5719-llvm-87f8d25150facee0e5ae1884871db420c925877c.zip |
[MachineCopyPropagation] Extend pass to do COPY source forwarding
This change extends MachineCopyPropagation to do COPY source forwarding.
This change also extends the MachineCopyPropagation pass to be able to
be run during register allocation, after physical registers have been
assigned, but before the virtual registers have been re-written, which
allows it to remove virtual register COPY LiveIntervals that become dead
through the forwarding of all of their uses.
Reviewers: qcolombet, javed.absar, MatzeB, jonpa
Subscribers: jyknight, nemanjai, llvm-commits, nhaehnle, mcrosier, mgorny
Differential Revision: https://reviews.llvm.org/D30751
llvm-svn: 311038
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll b/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll index 742c4f8af85..17f8c1a55a1 100644 --- a/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll +++ b/llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll @@ -5,49 +5,49 @@ ; Test addressing modes when the scratch base is not a frame index. ; GCN-LABEL: {{^}}store_private_offset_i8: -; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:8 +; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @store_private_offset_i8() #0 { store volatile i8 5, i8* inttoptr (i32 8 to i8*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i16: -; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s2 offset:8 +; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @store_private_offset_i16() #0 { store volatile i16 5, i16* inttoptr (i32 8 to i16*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i32: -; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8 +; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @store_private_offset_i32() #0 { store volatile i32 5, i32* inttoptr (i32 8 to i32*) ret void } ; GCN-LABEL: {{^}}store_private_offset_v2i32: -; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 +; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @store_private_offset_v2i32() #0 { store volatile <2 x i32> <i32 5, i32 10>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) ret void } ; GCN-LABEL: {{^}}store_private_offset_v4i32: -; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 +; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @store_private_offset_v4i32() #0 { store volatile <4 x i32> <i32 5, i32 10, i32 15, i32 0>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) ret void } ; GCN-LABEL: {{^}}load_private_offset_i8: -; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s2 offset:8 +; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @load_private_offset_i8() #0 { %load = load volatile i8, i8* inttoptr (i32 8 to i8*) ret void } ; GCN-LABEL: {{^}}sextload_private_offset_i8: -; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 +; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s3 offset:8 define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 { %load = load volatile i8, i8* inttoptr (i32 8 to i8*) %sextload = sext i8 %load to i32 @@ -56,7 +56,7 @@ define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 } ; GCN-LABEL: {{^}}zextload_private_offset_i8: -; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 +; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s3 offset:8 define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 { %load = load volatile i8, i8* inttoptr (i32 8 to i8*) %zextload = zext i8 %load to i32 @@ -65,14 +65,14 @@ define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 } ; GCN-LABEL: {{^}}load_private_offset_i16: -; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s2 offset:8 +; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @load_private_offset_i16() #0 { %load = load volatile i16, i16* inttoptr (i32 8 to i16*) ret void } ; GCN-LABEL: {{^}}sextload_private_offset_i16: -; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s8 offset:8 +; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s3 offset:8 define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 { %load = load volatile i16, i16* inttoptr (i32 8 to i16*) %sextload = sext i16 %load to i32 @@ -81,7 +81,7 @@ define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) # } ; GCN-LABEL: {{^}}zextload_private_offset_i16: -; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8 +; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s3 offset:8 define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 { %load = load volatile i16, i16* inttoptr (i32 8 to i16*) %zextload = zext i16 %load to i32 @@ -90,28 +90,28 @@ define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) # } ; GCN-LABEL: {{^}}load_private_offset_i32: -; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s2 offset:8 +; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @load_private_offset_i32() #0 { %load = load volatile i32, i32* inttoptr (i32 8 to i32*) ret void } ; GCN-LABEL: {{^}}load_private_offset_v2i32: -; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 +; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @load_private_offset_v2i32() #0 { %load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) ret void } ; GCN-LABEL: {{^}}load_private_offset_v4i32: -; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s2 offset:8 +; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s1 offset:8 define amdgpu_kernel void @load_private_offset_v4i32() #0 { %load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) ret void } ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset: -; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s2 offset:4095 +; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s1 offset:4095 define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { store volatile i8 5, i8* inttoptr (i32 4095 to i8*) ret void @@ -119,7 +119,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1: ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 -; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen{{$}} +; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen{{$}} define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { store volatile i8 5, i8* inttoptr (i32 4096 to i8*) ret void @@ -127,7 +127,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { ; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2: ; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 -; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s2 offen offset:1{{$}} +; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s1 offen offset:1{{$}} define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 { store volatile i8 5, i8* inttoptr (i32 4097 to i8*) ret void |