diff options
| author | Nirav Dave <niravd@google.com> | 2017-07-20 13:48:17 +0000 |
|---|---|---|
| committer | Nirav Dave <niravd@google.com> | 2017-07-20 13:48:17 +0000 |
| commit | 77cc6f23b99f7693f1552470ecc0081a712c7987 (patch) | |
| tree | 345660e6542a34247110d15eb6b2fdbefe4fb09a /llvm | |
| parent | d9e9e44baa859d2335ca180cb879f86c2ea886d6 (diff) | |
| download | bcm5719-llvm-77cc6f23b99f7693f1552470ecc0081a712c7987.tar.gz bcm5719-llvm-77cc6f23b99f7693f1552470ecc0081a712c7987.zip | |
[DAG] Optimize away degenerate INSERT_VECTOR_ELT nodes.
Summary:
Add missing vector write of vector read reduction, i.e.:
(insert_vector_elt x (extract_vector_elt x idx) idx) to x
Reviewers: spatel, RKSimon, efriedma
Reviewed By: RKSimon
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D35563
llvm-svn: 308617
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-ashr-256.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-lshr-256.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shift-shl-256.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll | 2 |
5 files changed, 6 insertions, 14 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 535b272f793..50cad23a00a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -13572,6 +13572,12 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { EVT VT = InVec.getValueType(); + // Remove redundant insertions: + // (insert_vector_elt x (extract_vector_elt x idx) idx) -> x + if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT && + InVec == InVal->getOperand(0) && EltNo == InVal->getOperand(1)) + return InVec; + // Check that we know which element is being inserted if (!isa<ConstantSDNode>(EltNo)) return SDValue(); diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll index 6bb57d8f5f7..d21a29770cf 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -708,8 +708,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; ; X32-AVX1-LABEL: splatvar_shift_v4i64: ; X32-AVX1: # BB#0: -; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax -; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] ; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2 ; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 @@ -724,8 +722,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; ; X32-AVX2-LABEL: splatvar_shift_v4i64: ; X32-AVX2: # BB#0: -; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax -; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648] ; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2 ; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll index 46be36b76e9..86bd6f0bc21 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll @@ -562,8 +562,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; ; X32-AVX1-LABEL: splatvar_shift_v4i64: ; X32-AVX1: # BB#0: -; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax -; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2 ; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 @@ -572,8 +570,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; ; X32-AVX2-LABEL: splatvar_shift_v4i64: ; X32-AVX2: # BB#0: -; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax -; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 ; X32-AVX2-NEXT: retl %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll index 4a134f440a7..d4f1a8b61f0 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll @@ -506,8 +506,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; ; X32-AVX1-LABEL: splatvar_shift_v4i64: ; X32-AVX1: # BB#0: -; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax -; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; X32-AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2 ; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 @@ -516,8 +514,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind { ; ; X32-AVX2-LABEL: splatvar_shift_v4i64: ; X32-AVX2: # BB#0: -; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax -; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; X32-AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0 ; X32-AVX2-NEXT: retl %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll index d0b7e4eb205..542f30dc0d2 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2735,8 +2735,6 @@ define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) { ; AVX512F-32-LABEL: test_v8i64_2_5: ; AVX512F-32: # BB#0: ; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1 -; AVX512F-32-NEXT: vpextrd $1, %xmm1, %eax -; AVX512F-32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 ; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0 ; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax ; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 |

