diff options
author | Roman Lebedev <lebedev.ri@gmail.com> | 2018-06-15 09:56:52 +0000 |
---|---|---|
committer | Roman Lebedev <lebedev.ri@gmail.com> | 2018-06-15 09:56:52 +0000 |
commit | 84c11aed10eb809aa133da350071d82924bbdb23 (patch) | |
tree | 483f4e5a0d5771435787a6637da92505329f45c2 | |
parent | dec562c84956d4816b168b1a7bfe7b2703eaaf86 (diff) | |
download | bcm5719-llvm-84c11aed10eb809aa133da350071d82924bbdb23.tar.gz bcm5719-llvm-84c11aed10eb809aa133da350071d82924bbdb23.zip |
[InstCombine] Recommit: Fold (x << y) >> y -> x & (-1 >> y)
Summary:
We already do it for splat constants, but not just values.
Also, undef cases are mostly non-functional.
The original commit was reverted because
it broke tests for amdgpu backend, which i didn't check.
Now, the backed was updated to recognize these new
patterns, so we are good.
https://bugs.llvm.org/show_bug.cgi?id=37603
https://rise4fun.com/Alive/cplX
Reviewers: spatel, craig.topper, mareko, bogner, rampitec, nhaehnle, arsenm
Reviewed By: spatel, rampitec, nhaehnle
Subscribers: wdng, nhaehnle, llvm-commits
Differential Revision: https://reviews.llvm.org/D47980
llvm-svn: 334818
3 files changed, 22 insertions, 14 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 9c6bc1a1044..c69b305dfa7 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -811,6 +811,15 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) { return &I; } } + + // Transform (x << y) >> y to x & (-1 >> y) + Value *X; + if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))))) { + Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); + Value *Mask = Builder.CreateLShr(AllOnes, Op1); + return BinaryOperator::CreateAnd(Mask, X); + } + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll index e66e054c780..5ef3f5db2f7 100644 --- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll @@ -895,8 +895,8 @@ define i32 @ubfe_offset_33(i32 %src, i32 %width) { ; CHECK-LABEL: @ubfe_offset_0( ; CHECK-NEXT: %1 = sub i32 32, %width -; CHECK-NEXT: %2 = shl i32 %src, %1 -; CHECK-NEXT: %bfe = lshr i32 %2, %1 +; CHECK-NEXT: %2 = lshr i32 -1, %1 +; CHECK-NEXT: %bfe = and i32 %2, %src ; CHECK-NEXT: ret i32 %bfe define i32 @ubfe_offset_0(i32 %src, i32 %width) { %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 0, i32 %width) @@ -905,8 +905,8 @@ define i32 @ubfe_offset_0(i32 %src, i32 %width) { ; CHECK-LABEL: @ubfe_offset_32( ; CHECK-NEXT: %1 = sub i32 32, %width -; CHECK-NEXT: %2 = shl i32 %src, %1 -; CHECK-NEXT: %bfe = lshr i32 %2, %1 +; CHECK-NEXT: %2 = lshr i32 -1, %1 +; CHECK-NEXT: %bfe = and i32 %2, %src ; CHECK-NEXT: ret i32 %bfe define i32 @ubfe_offset_32(i32 %src, i32 %width) { %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 32, i32 %width) @@ -915,8 +915,8 @@ define i32 @ubfe_offset_32(i32 %src, i32 %width) { ; CHECK-LABEL: @ubfe_offset_31( ; CHECK-NEXT: %1 = sub i32 32, %width -; CHECK-NEXT: %2 = shl i32 %src, %1 -; CHECK-NEXT: %bfe = lshr i32 %2, %1 +; CHECK-NEXT: %2 = lshr i32 -1, %1 +; CHECK-NEXT: %bfe = and i32 %2, %src ; CHECK-NEXT: ret i32 %bfe define i32 @ubfe_offset_31(i32 %src, i32 %width) { %bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 32, i32 %width) @@ -1002,8 +1002,8 @@ define i64 @ubfe_offset_33_width_4_i64(i64 %src) { ; CHECK-LABEL: @ubfe_offset_0_i64( ; CHECK-NEXT: %1 = sub i32 64, %width ; CHECK-NEXT: %2 = zext i32 %1 to i64 -; CHECK-NEXT: %3 = shl i64 %src, %2 -; CHECK-NEXT: %bfe = lshr i64 %3, %2 +; CHECK-NEXT: %3 = lshr i64 -1, %2 +; CHECK-NEXT: %bfe = and i64 %3, %src ; CHECK-NEXT: ret i64 %bfe define i64 @ubfe_offset_0_i64(i64 %src, i32 %width) { %bfe = call i64 @llvm.amdgcn.ubfe.i64(i64 %src, i32 0, i32 %width) diff --git a/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll b/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll index 81af5a7979e..9de0b337de2 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll @@ -14,8 +14,8 @@ define i32 @positive_samevar(i32 %x, i32 %y) { ; CHECK-LABEL: @positive_samevar( -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = lshr i32 [[TMP0]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 -1, [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[RET]] ; %tmp0 = shl i32 %x, %y @@ -124,8 +124,8 @@ define i32 @positive_biggerLshr_shlnuw_lshrexact(i32 %x) { define <2 x i32> @positive_samevar_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @positive_samevar_vec( -; CHECK-NEXT: [[TMP0:%.*]] = shl <2 x i32> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = lshr <2 x i32> [[TMP0]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> <i32 -1, i32 -1>, [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %tmp0 = shl <2 x i32> %x, %y @@ -171,8 +171,7 @@ define <3 x i32> @positive_sameconst_vec_undef1(<3 x i32> %x) { define <3 x i32> @positive_sameconst_vec_undef2(<3 x i32> %x) { ; CHECK-LABEL: @positive_sameconst_vec_undef2( -; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i32> [[X:%.*]], <i32 5, i32 undef, i32 5> -; CHECK-NEXT: [[RET:%.*]] = lshr <3 x i32> [[TMP0]], <i32 5, i32 undef, i32 5> +; CHECK-NEXT: [[RET:%.*]] = and <3 x i32> [[X:%.*]], <i32 134217727, i32 undef, i32 134217727> ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %tmp0 = shl <3 x i32> %x, <i32 5, i32 undef, i32 5> |