diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2018-05-01 20:53:44 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2018-05-01 20:53:44 +0000 |
| commit | 2b36e95d451368a3839953135d44af4eca0e3ff4 (patch) | |
| tree | c11216ea4b0ec0df1caa709f231dbe8cf66ac40a /llvm/test/Transforms/PhaseOrdering | |
| parent | 78536664386887a26157fa90fceb003edbe80290 (diff) | |
| download | bcm5719-llvm-2b36e95d451368a3839953135d44af4eca0e3ff4.tar.gz bcm5719-llvm-2b36e95d451368a3839953135d44af4eca0e3ff4.zip | |
[PhaseOrdering] add tests for bittest patterns from bitfields; NFC
As mentioned in D45986, there's a potential ordering dependency
between instcombine and aggressive-instcombine for detecting these,
so I'm adding a few tests to confirm that the expected folds occur
using -O3 (because aggressive-instcombine only runs at -O3 currently).
llvm-svn: 331308
Diffstat (limited to 'llvm/test/Transforms/PhaseOrdering')
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll | 152 |
1 files changed, 152 insertions, 0 deletions
diff --git a/llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll b/llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll new file mode 100644 index 00000000000..59bb9457734 --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll @@ -0,0 +1,152 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -O3 -S < %s | FileCheck %s +; RUN: opt -passes='default<O3>' -S < %s | FileCheck %s + +; These are tests that check for set/clear bits in a bitfield based on PR37098: +; https://bugs.llvm.org/show_bug.cgi?id=37098 +; +; The initial IR from clang has been transformed by SROA, but no other passes +; have run yet. In all cases, we should reduce these to a mask and compare +; instead of shift/cast/logic ops. +; +; Currently, this happens mostly through a combination of instcombine and +; aggressive-instcombine. If pass ordering changes, we may have to adjust +; the pattern matching in 1 or both of those passes. + +; Legal i32 is required to allow casting transforms that eliminate the zexts. +target datalayout = "n32" + +define i32 @allclear(i32 %a) { +; CHECK-LABEL: @allclear( +; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1 +; CHECK-NEXT: [[BF_CLEAR1:%.*]] = or i32 [[BF_LSHR]], [[A]] +; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2 +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[BF_CLEAR1]], [[BF_LSHR5]] +; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3 +; CHECK-NEXT: [[OR83:%.*]] = or i32 [[OR2]], [[BF_LSHR10]] +; CHECK-NEXT: [[OR13:%.*]] = and i32 [[OR83]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[OR13]], 1 +; CHECK-NEXT: ret i32 [[TMP1]] +; + %a.sroa.0.0.trunc = trunc i32 %a to i8 + %a.sroa.5.0.shift = lshr i32 %a, 8 + %bf.clear = and i8 %a.sroa.0.0.trunc, 1 + %bf.cast = zext i8 %bf.clear to i32 + %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 + %bf.clear2 = and i8 %bf.lshr, 1 + %bf.cast3 = zext i8 %bf.clear2 to i32 + %or = or i32 %bf.cast, %bf.cast3 + %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 + %bf.clear6 = and i8 %bf.lshr5, 1 + %bf.cast7 = zext i8 %bf.clear6 to i32 + %or8 = or i32 %or, %bf.cast7 + %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 + %bf.clear11 = and i8 %bf.lshr10, 1 + %bf.cast12 = zext i8 %bf.clear11 to i32 + %or13 = or i32 %or8, %bf.cast12 + %cmp = icmp eq i32 %or13, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @anyset(i32 %a) { +; CHECK-LABEL: @anyset( +; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1 +; CHECK-NEXT: [[BF_CLEAR1:%.*]] = or i32 [[BF_LSHR]], [[A]] +; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2 +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[BF_CLEAR1]], [[BF_LSHR5]] +; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3 +; CHECK-NEXT: [[OR83:%.*]] = or i32 [[OR2]], [[BF_LSHR10]] +; CHECK-NEXT: [[OR13:%.*]] = and i32 [[OR83]], 1 +; CHECK-NEXT: ret i32 [[OR13]] +; + %a.sroa.0.0.trunc = trunc i32 %a to i8 + %a.sroa.5.0.shift = lshr i32 %a, 8 + %bf.clear = and i8 %a.sroa.0.0.trunc, 1 + %bf.cast = zext i8 %bf.clear to i32 + %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 + %bf.clear2 = and i8 %bf.lshr, 1 + %bf.cast3 = zext i8 %bf.clear2 to i32 + %or = or i32 %bf.cast, %bf.cast3 + %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 + %bf.clear6 = and i8 %bf.lshr5, 1 + %bf.cast7 = zext i8 %bf.clear6 to i32 + %or8 = or i32 %or, %bf.cast7 + %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 + %bf.clear11 = and i8 %bf.lshr10, 1 + %bf.cast12 = zext i8 %bf.clear11 to i32 + %or13 = or i32 %or8, %bf.cast12 + %cmp = icmp ne i32 %or13, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +; FIXME: aggressive-instcombine does not match this yet. + +define i32 @allset(i32 %a) { +; CHECK-LABEL: @allset( +; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1 +; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2 +; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3 +; CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i32 [[A]], 1 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[BF_CLEAR2]], [[BF_LSHR]] +; CHECK-NEXT: [[AND8:%.*]] = and i32 [[AND]], [[BF_LSHR5]] +; CHECK-NEXT: [[AND13:%.*]] = and i32 [[AND8]], [[BF_LSHR10]] +; CHECK-NEXT: ret i32 [[AND13]] +; + %a.sroa.0.0.trunc = trunc i32 %a to i8 + %a.sroa.5.0.shift = lshr i32 %a, 8 + %bf.clear = and i8 %a.sroa.0.0.trunc, 1 + %bf.cast = zext i8 %bf.clear to i32 + %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 + %bf.clear2 = and i8 %bf.lshr, 1 + %bf.cast3 = zext i8 %bf.clear2 to i32 + %and = and i32 %bf.cast, %bf.cast3 + %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 + %bf.clear6 = and i8 %bf.lshr5, 1 + %bf.cast7 = zext i8 %bf.clear6 to i32 + %and8 = and i32 %and, %bf.cast7 + %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 + %bf.clear11 = and i8 %bf.lshr10, 1 + %bf.cast12 = zext i8 %bf.clear11 to i32 + %and13 = and i32 %and8, %bf.cast12 + %cmp = icmp ne i32 %and13, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +; FIXME: aggressive-instcombine does not match this yet. + +define i32 @anyclear(i32 %a) { +; CHECK-LABEL: @anyclear( +; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1 +; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2 +; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3 +; CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i32 [[A]], 1 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[BF_CLEAR2]], [[BF_LSHR]] +; CHECK-NEXT: [[AND8:%.*]] = and i32 [[AND]], [[BF_LSHR5]] +; CHECK-NEXT: [[AND13:%.*]] = and i32 [[AND8]], [[BF_LSHR10]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND13]], 1 +; CHECK-NEXT: ret i32 [[TMP1]] +; + %a.sroa.0.0.trunc = trunc i32 %a to i8 + %a.sroa.5.0.shift = lshr i32 %a, 8 + %bf.clear = and i8 %a.sroa.0.0.trunc, 1 + %bf.cast = zext i8 %bf.clear to i32 + %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 + %bf.clear2 = and i8 %bf.lshr, 1 + %bf.cast3 = zext i8 %bf.clear2 to i32 + %and = and i32 %bf.cast, %bf.cast3 + %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 + %bf.clear6 = and i8 %bf.lshr5, 1 + %bf.cast7 = zext i8 %bf.clear6 to i32 + %and8 = and i32 %and, %bf.cast7 + %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 + %bf.clear11 = and i8 %bf.lshr10, 1 + %bf.cast12 = zext i8 %bf.clear11 to i32 + %and13 = and i32 %and8, %bf.cast12 + %cmp = icmp eq i32 %and13, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + |

