diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2018-03-27 18:39:45 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2018-03-27 18:39:45 +0000 |
| commit | 0a0c871f601aae4a26694eea06f37becba78d1e4 (patch) | |
| tree | d86517493e06fa8a5349df43c1bb03fcf45ea37f | |
| parent | 126a874952d76a19bfc4bbd1b0a69cac65c8b35c (diff) | |
| download | bcm5719-llvm-0a0c871f601aae4a26694eea06f37becba78d1e4.tar.gz bcm5719-llvm-0a0c871f601aae4a26694eea06f37becba78d1e4.zip | |
AMDGPU: Fix crash when MachinePointerInfo invalid
The combine on a select of a load only triggers for
addrspace 0, and discards the MachinePointerInfo. The
conservative default needs to be used for this.
llvm-svn: 328652
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/load-select-ptr.ll | 82 |
2 files changed, 83 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 58afd167e0c..51c27b0b1f7 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1087,7 +1087,7 @@ bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { const MemSDNode *MemNode = cast<MemSDNode>(N); const Value *Ptr = MemNode->getMemOperand()->getValue(); - const Instruction *I = dyn_cast<Instruction>(Ptr); + const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); return I && I->getMetadata("amdgpu.noclobber"); } diff --git a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll new file mode 100644 index 00000000000..5d584b63c6c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll @@ -0,0 +1,82 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; Combine on select c, (load x), (load y) -> load (select c, x, y) +; drops MachinePointerInfo, so it can't be relied on for correctness. + +; GCN-LABEL: {{^}}select_ptr_crash_i64_flat: +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 + +; GCN: v_cmp_eq_u32 +; GCN: v_cndmask_b32 +; GCN: v_cndmask_b32 + +; GCN-NOT: load_dword +; GCN: flat_load_dwordx2 +; GCN-NOT: load_dword + +; GCN: flat_store_dwordx2 +define amdgpu_kernel void @select_ptr_crash_i64_flat(i32 %tmp, i64* %ptr0, i64* %ptr1, i64 addrspace(1)* %ptr2) { + %tmp2 = icmp eq i32 %tmp, 0 + %tmp3 = load i64, i64* %ptr0, align 8 + %tmp4 = load i64, i64* %ptr1, align 8 + %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4 + store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8 + ret void +} + +; The transform currently doesn't happen for non-addrspace 0, but it +; should. + +; GCN-LABEL: {{^}}select_ptr_crash_i64_global: +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}} +; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}} +; GCN: v_cndmask_b32 +; GCN: v_cndmask_b32 +; GCN: flat_store_dwordx2 +define amdgpu_kernel void @select_ptr_crash_i64_global(i32 %tmp, i64 addrspace(1)* %ptr0, i64 addrspace(1)* %ptr1, i64 addrspace(1)* %ptr2) { + %tmp2 = icmp eq i32 %tmp, 0 + %tmp3 = load i64, i64 addrspace(1)* %ptr0, align 8 + %tmp4 = load i64, i64 addrspace(1)* %ptr1, align 8 + %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4 + store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8 + ret void +} + +; GCN-LABEL: {{^}}select_ptr_crash_i64_local: +; GCN: ds_read_b64 +; GCN: ds_read_b64 +; GCN: v_cndmask_b32 +; GCN: v_cndmask_b32 +; GCN: flat_store_dwordx2 +define amdgpu_kernel void @select_ptr_crash_i64_local(i32 %tmp, i64 addrspace(3)* %ptr0, i64 addrspace(3)* %ptr1, i64 addrspace(1)* %ptr2) { + %tmp2 = icmp eq i32 %tmp, 0 + %tmp3 = load i64, i64 addrspace(3)* %ptr0, align 8 + %tmp4 = load i64, i64 addrspace(3)* %ptr1, align 8 + %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4 + store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8 + ret void +} + +; The transform will break addressing mode matching, so unclear it +; would be good to do + +; GCN-LABEL: {{^}}select_ptr_crash_i64_local_offsets: +; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:128 +; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} offset:512 +; GCN: v_cndmask_b32 +; GCN: v_cndmask_b32 +define amdgpu_kernel void @select_ptr_crash_i64_local_offsets(i32 %tmp, i64 addrspace(3)* %ptr0, i64 addrspace(3)* %ptr1, i64 addrspace(1)* %ptr2) { + %tmp2 = icmp eq i32 %tmp, 0 + %gep0 = getelementptr inbounds i64, i64 addrspace(3)* %ptr0, i64 16 + %gep1 = getelementptr inbounds i64, i64 addrspace(3)* %ptr1, i64 64 + %tmp3 = load i64, i64 addrspace(3)* %gep0, align 8 + %tmp4 = load i64, i64 addrspace(3)* %gep1, align 8 + %tmp5 = select i1 %tmp2, i64 %tmp3, i64 %tmp4 + store i64 %tmp5, i64 addrspace(1)* %ptr2, align 8 + ret void +} |

