diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-04-22 20:21:36 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-04-22 20:21:36 +0000 |
| commit | 629d12de70959f49f0b8f78eb9e6e217103a24c7 (patch) | |
| tree | b4b1bfc0306ed7cbc5a790a87418dc1e473f3d7e /llvm/test/CodeGen/AMDGPU | |
| parent | 66ac1d61526268d1f81db8ed1d5caccfed2452ec (diff) | |
| download | bcm5719-llvm-629d12de70959f49f0b8f78eb9e6e217103a24c7.tar.gz bcm5719-llvm-629d12de70959f49f0b8f78eb9e6e217103a24c7.zip | |
DAGCombiner: Relax alignment restriction when changing load type
If the target allows the alignment, this should still be OK.
llvm-svn: 267209
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll | 38 |
1 files changed, 38 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll b/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll new file mode 100644 index 00000000000..c255b05324b --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}reduce_i64_load_align_4_width_to_i32: +; GCN: buffer_load_dword [[VAL:v[0-9]+]] +; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, [[VAL]] +; GCN: buffer_store_dwordx2 +define void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %and = and i64 %a, 1234567 + store i64 %and, i64 addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt0: +; GCN: buffer_load_dword [[VAL:v[0-9]+]] +; GCN: buffer_store_dword [[VAL]] +define void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %vec = bitcast i64 %a to <2 x i32> + %elt0 = extractelement <2 x i32> %vec, i32 0 + store i32 %elt0, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt1: +; GCN: buffer_load_dword [[VAL:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 +; GCN: buffer_store_dword [[VAL]] +define void @reduce_i64_align_4_bitcast_v2i32_elt1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %vec = bitcast i64 %a to <2 x i32> + %elt0 = extractelement <2 x i32> %vec, i32 1 + store i32 %elt0, i32 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } |

