diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-04-22 22:48:38 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-04-22 22:48:38 +0000 |
commit | efa3fe14d14e427e56a19a11983f8f68246a8c9d (patch) | |
tree | d421ad3e70f463c5d485eea5078ed7cc46b79fd7 | |
parent | 784ec12a3c3d1de370c35ee527f7d5d57b13cd98 (diff) | |
download | bcm5719-llvm-efa3fe14d14e427e56a19a11983f8f68246a8c9d.tar.gz bcm5719-llvm-efa3fe14d14e427e56a19a11983f8f68246a8c9d.zip |
AMDGPU: Re-visit nodes in performAndCombine
This fixes test regressions when i64 loads/stores are made promote.
llvm-svn: 267240
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 5 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/and.ll | 15 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll | 6 |
3 files changed, 17 insertions, 9 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 92ed678ba8f..4878e5b7a22 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2229,6 +2229,11 @@ SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N, SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS); SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS); + // Re-visit the ands. It's possible we eliminated one of them and it could + // simplify the vector. + DCI.AddToWorklist(Lo.getNode()); + DCI.AddToWorklist(Hi.getNode()); + SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, LoAnd, HiAnd); return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); } diff --git a/llvm/test/CodeGen/AMDGPU/and.ll b/llvm/test/CodeGen/AMDGPU/and.ll index 530b7f2d9d2..338747c413b 100644 --- a/llvm/test/CodeGen/AMDGPU/and.ll +++ b/llvm/test/CodeGen/AMDGPU/and.ll @@ -213,12 +213,14 @@ define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) { ; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64: ; SI: s_load_dwordx2 -; SI: s_load_dwordx2 -; SI: s_load_dwordx2 +; SI: s_load_dword [[A:s[0-9]+]] +; SI: s_load_dword [[B:s[0-9]+]] ; SI: s_load_dwordx2 ; SI-NOT: and -; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 62 -; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 62 +; SI: s_lshl_b32 [[A]], [[A]], 1 +; SI: s_lshl_b32 [[B]], [[B]], 1 +; SI: s_and_b32 s{{[0-9]+}}, [[A]], 62 +; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62 ; SI-NOT: and ; SI: buffer_store_dwordx2 define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) { @@ -336,9 +338,10 @@ define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* % } ; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink: -; SI: s_lshl_b64 s{{\[}}[[VALLO:[0-9]+]]:{{[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1 +; SI: s_load_dword [[A:s[0-9]+]] +; SI: s_lshl_b32 [[A]], [[A]], 1{{$}} ; SI-NOT: and -; SI: s_and_b32 s{{[0-9]+}}, s[[VALLO]], 64 +; SI: s_and_b32 s{{[0-9]+}}, [[A]], 64 ; SI-NOT: and ; SI: s_add_u32 ; SI-NEXT: s_addc_u32 diff --git a/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll index 29dae1b40d2..f5ab732710a 100644 --- a/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll +++ b/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll @@ -299,9 +299,9 @@ define void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* %out, i64 addr } ; GCN-LABEL: {{^}}and_not_mask_i64: -; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]] -; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 20 -; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, v[[SHRLO]] +; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}} +; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 20, v[[VALLO]] +; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, [[SHR]] ; GCN-DAG: v_mov_b32_e32 v[[SHRHI]], 0{{$}} ; GCN-NOT: v[[SHRLO]] ; GCN-NOT: v[[SHRHI]] |