diff options
| author | Roman Lebedev <lebedev.ri@gmail.com> | 2019-08-29 14:46:49 +0000 |
|---|---|---|
| committer | Roman Lebedev <lebedev.ri@gmail.com> | 2019-08-29 14:46:49 +0000 |
| commit | 05ef49515e87b3be28d1df535b8e3f10fbb25117 (patch) | |
| tree | 937aa5527829627f1224b651db35e533cc2e09df /llvm/test/Transforms/SimplifyCFG | |
| parent | 6220ce60de873c00bdf6162625e9c23d1ebecae6 (diff) | |
| download | bcm5719-llvm-05ef49515e87b3be28d1df535b8e3f10fbb25117.tar.gz bcm5719-llvm-05ef49515e87b3be28d1df535b8e3f10fbb25117.zip | |
[NFC][SimplifyCFG] 'Safely extract low bits' pattern will also benefit from -phi-node-folding-threshold=3
This is the naive implementation of x86 BZHI/BEXTR instruction:
it takes input and bit count, and extracts low nbits up to bit width.
I.e. unlike shift it does not have any UB when nbits >= bitwidth.
Which means we don't need a while PHI here, simple select will do.
And if it's a select, it should then be trivial to fix codegen
to select it to BEXTR/BZHI.
See https://bugs.llvm.org/show_bug.cgi?id=34704
llvm-svn: 370369
Diffstat (limited to 'llvm/test/Transforms/SimplifyCFG')
| -rw-r--r-- | llvm/test/Transforms/SimplifyCFG/safe-low-bit-extract.ll | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/llvm/test/Transforms/SimplifyCFG/safe-low-bit-extract.ll b/llvm/test/Transforms/SimplifyCFG/safe-low-bit-extract.ll new file mode 100644 index 00000000000..354cfff7ba7 --- /dev/null +++ b/llvm/test/Transforms/SimplifyCFG/safe-low-bit-extract.ll @@ -0,0 +1,35 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -simplifycfg -S | FileCheck %s + +; This is the naive implementation of x86 BZHI/BEXTR instruction: +; it takes input and bit count, and extracts low nbits up to bit width. +; I.e. unlike shift it does not have any UB when nbits >= bitwidth. +; Which means we don't need a while PHI here, simple select will do. +define i32 @extract_low_bits(i32 %input, i32 %nbits) { +; CHECK-LABEL: @extract_low_bits( +; CHECK-NEXT: begin: +; CHECK-NEXT: [[SHOULD_MASK:%.*]] = icmp ult i32 [[NBITS:%.*]], 32 +; CHECK-NEXT: br i1 [[SHOULD_MASK]], label [[PERFORM_MASKING:%.*]], label [[END:%.*]] +; CHECK: perform_masking: +; CHECK-NEXT: [[MASK_NOT:%.*]] = shl nsw i32 -1, [[NBITS]] +; CHECK-NEXT: [[MASK:%.*]] = xor i32 [[MASK_NOT]], -1 +; CHECK-NEXT: [[MASKED:%.*]] = and i32 [[MASK]], [[INPUT:%.*]] +; CHECK-NEXT: br label [[END]] +; CHECK: end: +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[MASKED]], [[PERFORM_MASKING]] ], [ [[INPUT]], [[BEGIN:%.*]] ] +; CHECK-NEXT: ret i32 [[RES]] +; +begin: + %should_mask = icmp ult i32 %nbits, 32 + br i1 %should_mask, label %perform_masking, label %end + +perform_masking: ; preds = %begin + %mask.not = shl nsw i32 -1, %nbits + %mask = xor i32 %mask.not, -1 + %masked = and i32 %mask, %input + br label %end + +end: ; preds = %perform_masking, %begin + %res = phi i32 [ %masked, %perform_masking ], [ %input, %begin ] + ret i32 %res +} |

