diff options
| author | David Majnemer <david.majnemer@gmail.com> | 2014-08-24 09:10:57 +0000 |
|---|---|---|
| committer | David Majnemer <david.majnemer@gmail.com> | 2014-08-24 09:10:57 +0000 |
| commit | 0ffccf7fb525a1534eb69fe32574c418dcab9746 (patch) | |
| tree | 91ad63dca6bda86a82b1dfe01a689412de4b254d /llvm/test/Transforms/InstCombine/icmp.ll | |
| parent | 3a1f4c77df96565ecdf47a07b63ca53e46848cf6 (diff) | |
| download | bcm5719-llvm-0ffccf7fb525a1534eb69fe32574c418dcab9746.tar.gz bcm5719-llvm-0ffccf7fb525a1534eb69fe32574c418dcab9746.zip | |
InstCombine: Properly optimize or'ing bittests together
CFE, with -03, would turn:
bool f(unsigned x) {
bool a = x & 1;
bool b = x & 2;
return a | b;
}
into:
%1 = lshr i32 %x, 1
%2 = or i32 %1, %x
%3 = and i32 %2, 1
%4 = icmp ne i32 %3, 0
This sort of thing exposes a nasty pathology in GCC, ICC and LLVM.
Instead, we would rather want:
%1 = and i32 %x, 3
%2 = icmp ne i32 %1, 0
Things get a bit more interesting in the following case:
%1 = lshr i32 %x, %y
%2 = or i32 %1, %x
%3 = and i32 %2, 1
%4 = icmp ne i32 %3, 0
Replacing it with the following sequence is better:
%1 = shl nuw i32 1, %y
%2 = or i32 %1, 1
%3 = and i32 %2, %x
%4 = icmp ne i32 %3, 0
This sequence is preferable because %1 doesn't involve %x and could
potentially be hoisted out of loops if it is invariant; only perform
this transform in the non-constant case if we know we won't increase
register pressure.
llvm-svn: 216343
Diffstat (limited to 'llvm/test/Transforms/InstCombine/icmp.ll')
| -rw-r--r-- | llvm/test/Transforms/InstCombine/icmp.ll | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll index 26e144f93a5..39438e94832 100644 --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -1424,3 +1424,29 @@ define i1 @icmp_neg_cst_slt(i32 %a) { %2 = icmp slt i32 %1, -10 ret i1 %2 } + +; CHECK-LABEL: @icmp_and_or_lshr +; CHECK-NEXT: [[SHL:%[a-z0-9]+]] = shl nuw i32 1, %y +; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[SHL]], 1 +; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[OR]], %x +; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i32 [[AND]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +define i1 @icmp_and_or_lshr(i32 %x, i32 %y) { + %shf = lshr i32 %x, %y + %or = or i32 %shf, %x + %and = and i32 %or, 1 + %ret = icmp ne i32 %and, 0 + ret i1 %ret +} + +; CHECK-LABEL: @icmp_and_or_lshr_cst +; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 3 +; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i32 [[AND]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +define i1 @icmp_and_or_lshr_cst(i32 %x) { + %shf = lshr i32 %x, 1 + %or = or i32 %shf, %x + %and = and i32 %or, 1 + %ret = icmp ne i32 %and, 0 + ret i1 %ret +} |

