diff options
author | Sanjay Patel <spatel@rotateright.com> | 2017-05-10 13:56:52 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2017-05-10 13:56:52 +0000 |
commit | 2e069f250a156cfaed3b334ade37a39814bd6bef (patch) | |
tree | b5e0bb8bfd6a8d0265b5d1a33eee74e2c035a563 /llvm/test | |
parent | b7fb267ed3033fff977b40553714defa1a0b57d1 (diff) | |
download | bcm5719-llvm-2e069f250a156cfaed3b334ade37a39814bd6bef.tar.gz bcm5719-llvm-2e069f250a156cfaed3b334ade37a39814bd6bef.zip |
[InstCombine] add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1
This is another step towards favoring 'not' ops over random 'xor' in IR:
https://bugs.llvm.org/show_bug.cgi?id=32706
This transformation may have occurred in longer IR sequences using computeKnownBits,
but that could be much more expensive to calculate.
As the scalar result shows, we do not currently favor 'not' in all cases. The 'not'
created by the transform is transformed again (unnecessarily). Vectors don't have
this problem because vectors are (wrongly) excluded from several other combines.
llvm-svn: 302659
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/Transforms/InstCombine/add.ll | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll index 955ffaaeecd..5f7101e8fec 100644 --- a/llvm/test/Transforms/InstCombine/add.ll +++ b/llvm/test/Transforms/InstCombine/add.ll @@ -31,9 +31,8 @@ define <2 x i32> @select_0_or_1_from_bool_vec(<2 x i1> %x) { define i32 @flip_and_mask(i32 %x) { ; CHECK-LABEL: @flip_and_mask( -; CHECK-NEXT: [[SHL:%.*]] = shl i32 %x, 31 -; CHECK-NEXT: [[SHR:%.*]] = ashr exact i32 [[SHL]], 31 -; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[SHR]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1 +; CHECK-NEXT: [[INC:%.*]] = xor i32 [[TMP1]], 1 ; CHECK-NEXT: ret i32 [[INC]] ; %shl = shl i32 %x, 31 @@ -44,9 +43,8 @@ define i32 @flip_and_mask(i32 %x) { define <2 x i8> @flip_and_mask_splat(<2 x i8> %x) { ; CHECK-LABEL: @flip_and_mask_splat( -; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> %x, <i8 7, i8 7> -; CHECK-NEXT: [[SHR:%.*]] = ashr exact <2 x i8> [[SHL]], <i8 7, i8 7> -; CHECK-NEXT: [[INC:%.*]] = add nsw <2 x i8> [[SHR]], <i8 1, i8 1> +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> %x, <i8 1, i8 1> +; CHECK-NEXT: [[INC:%.*]] = and <2 x i8> [[TMP1]], <i8 1, i8 1> ; CHECK-NEXT: ret <2 x i8> [[INC]] ; %shl = shl <2 x i8> %x, <i8 7, i8 7> |