diff options
author | Sanjay Patel <spatel@rotateright.com> | 2017-05-16 21:51:04 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2017-05-16 21:51:04 +0000 |
commit | 877364ff99ba0aba504e1a2a518ac1efe9523505 (patch) | |
tree | 4156cae8ac3ac72f71dc77e3d8370359efe835ea /llvm/test | |
parent | cc19560253cfba353d118476e344252a1aeb8f7c (diff) | |
download | bcm5719-llvm-877364ff99ba0aba504e1a2a518ac1efe9523505.tar.gz bcm5719-llvm-877364ff99ba0aba504e1a2a518ac1efe9523505.zip |
[InstSimplify] add folds for constant mask of value shifted by constant
We would eventually catch these via demanded bits and computing known bits in InstCombine,
but I think it's better to handle the simple cases as soon as possible as a matter of efficiency.
This fold allows further simplifications based on distributed ops transforms. eg:
%a = lshr i8 %x, 7
%b = or i8 %a, 2
%c = and i8 %b, 1
InstSimplify can directly fold this now:
%a = lshr i8 %x, 7
Differential Revision: https://reviews.llvm.org/D33221
llvm-svn: 303213
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/Transforms/InstSimplify/AndOrXor.ll | 12 |
1 files changed, 4 insertions, 8 deletions
diff --git a/llvm/test/Transforms/InstSimplify/AndOrXor.ll b/llvm/test/Transforms/InstSimplify/AndOrXor.ll index 427ea655fcb..a9b4e4e5cfc 100644 --- a/llvm/test/Transforms/InstSimplify/AndOrXor.ll +++ b/llvm/test/Transforms/InstSimplify/AndOrXor.ll @@ -738,8 +738,7 @@ define i32 @test54(i32 %a, i32 %b) { define i8 @lshr_perfect_mask(i8 %x) { ; CHECK-LABEL: @lshr_perfect_mask( ; CHECK-NEXT: [[SH:%.*]] = lshr i8 %x, 5 -; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], 7 -; CHECK-NEXT: ret i8 [[MASK]] +; CHECK-NEXT: ret i8 [[SH]] ; %sh = lshr i8 %x, 5 %mask = and i8 %sh, 7 ; 0x07 @@ -749,8 +748,7 @@ define i8 @lshr_perfect_mask(i8 %x) { define <2 x i8> @lshr_oversized_mask_splat(<2 x i8> %x) { ; CHECK-LABEL: @lshr_oversized_mask_splat( ; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> %x, <i8 5, i8 5> -; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], <i8 -121, i8 -121> -; CHECK-NEXT: ret <2 x i8> [[MASK]] +; CHECK-NEXT: ret <2 x i8> [[SH]] ; %sh = lshr <2 x i8> %x, <i8 5, i8 5> %mask = and <2 x i8> %sh, <i8 135, i8 135> ; 0x87 @@ -771,8 +769,7 @@ define i8 @lshr_undersized_mask(i8 %x) { define <2 x i8> @shl_perfect_mask_splat(<2 x i8> %x) { ; CHECK-LABEL: @shl_perfect_mask_splat( ; CHECK-NEXT: [[SH:%.*]] = shl <2 x i8> %x, <i8 6, i8 6> -; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], <i8 -64, i8 -64> -; CHECK-NEXT: ret <2 x i8> [[MASK]] +; CHECK-NEXT: ret <2 x i8> [[SH]] ; %sh = shl <2 x i8> %x, <i8 6, i8 6> %mask = and <2 x i8> %sh, <i8 192, i8 192> ; 0xC0 @@ -782,8 +779,7 @@ define <2 x i8> @shl_perfect_mask_splat(<2 x i8> %x) { define i8 @shl_oversized_mask(i8 %x) { ; CHECK-LABEL: @shl_oversized_mask( ; CHECK-NEXT: [[SH:%.*]] = shl i8 %x, 6 -; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], -61 -; CHECK-NEXT: ret i8 [[MASK]] +; CHECK-NEXT: ret i8 [[SH]] ; %sh = shl i8 %x, 6 %mask = and i8 %sh, 195 ; 0xC3 |