From 2b957b5a6f228797a871426d48098fee479883ef Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 2 May 2016 20:07:26 +0000 Subject: AMDGPU: Make i64 loads/stores promote to v2i32 Now that unaligned access expansion should not attempt to produce i64 accesses, we can remove the hack in PreprocessISelDAG where this is done. This allows splitting i64 private accesses while allowing the new add nodes indexing the vector components can be folded with the base pointer arithmetic. llvm-svn: 268293 --- llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll') diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll b/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll index 0d72adc3a8f..150e3430a5e 100644 --- a/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll +++ b/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll @@ -14,7 +14,7 @@ define void @materialize_0_i32(i32 addrspace(1)* %out) { ; GCN-LABEL: {{^}}materialize_0_i64: ; GCN: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}} -; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], 0{{$}} +; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], v[[LOK]]{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}} define void @materialize_0_i64(i64 addrspace(1)* %out) { store i64 0, i64 addrspace(1)* %out @@ -31,7 +31,7 @@ define void @materialize_neg1_i32(i32 addrspace(1)* %out) { ; GCN-LABEL: {{^}}materialize_neg1_i64: ; GCN: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}} -; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], -1{{$}} +; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], v[[LOK]]{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[LOK]]:[[HIK]]{{\]}} define void @materialize_neg1_i64(i64 addrspace(1)* %out) { store i64 -1, i64 addrspace(1)* %out -- cgit v1.2.3