diff options
author | Roman Lebedev <lebedev.ri@gmail.com> | 2019-09-17 19:32:11 +0000 |
---|---|---|
committer | Roman Lebedev <lebedev.ri@gmail.com> | 2019-09-17 19:32:11 +0000 |
commit | bed6e08e23b36dc5ac38d675c2a705c2f41a4a94 (patch) | |
tree | c114a6161b0319809e4fc281d73efae0314be1a0 | |
parent | 6476d7cf0b2bc509e88a00c541f475b7676c4141 (diff) | |
download | bcm5719-llvm-bed6e08e23b36dc5ac38d675c2a705c2f41a4a94.tar.gz bcm5719-llvm-bed6e08e23b36dc5ac38d675c2a705c2f41a4a94.zip |
[NFC][InstCombine] More tests for "Dropping pointless masking before left shift" (PR42563)
While we already fold that pattern if the sum of shift amounts is not
smaller than bitwidth, there's painfully obvious generalization:
https://rise4fun.com/Alive/F5R
I.e. the "sub of shift amounts" tells us how many bits will be left
in the output. If it's less than bitwidth, we simply need to
apply a mask, which is constant.
llvm-svn: 372170
4 files changed, 260 insertions, 50 deletions
diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll new file mode 100644 index 00000000000..6a944cf9509 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-a.ll @@ -0,0 +1,130 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; If we have some pattern that leaves only some low bits set, and then performs +; left-shift of those bits, we can combine those two shifts into a shift+mask. + +; There are many variants to this pattern: +; a) (x & ((1 << maskNbits) - 1)) << shiftNbits +; simplify to: +; (x << shiftNbits) & (~(-1 << (maskNbits+shiftNbits))) + +; Simple tests. + +declare void @use32(i32) + +define i32 @t0_basic(i32 %x, i32 %nbits) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], -1 +; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], -1 +; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]] +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: call void @use32(i32 [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T3]], [[T4]] +; CHECK-NEXT: ret i32 [[T5]] +; + %t0 = add i32 %nbits, -1 + %t1 = shl i32 1, %t0 ; shifting by nbits-1 + %t2 = add i32 %t1, -1 + %t3 = and i32 %t2, %x + %t4 = sub i32 32, %nbits + call void @use32(i32 %t0) + call void @use32(i32 %t1) + call void @use32(i32 %t2) + call void @use32(i32 %t4) + %t5 = shl i32 %t3, %t4 + ret i32 %t5 +} + +; Vectors + +declare void @use8xi32(<8 x i32>) + +define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> +; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]] +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]] +; CHECK-NEXT: ret <8 x i32> [[T5]] +; + %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %t1 = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %t0 + %t2 = add <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %t3 = and <8 x i32> %t2, %x + %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t2) + call void @use8xi32(<8 x i32> %t4) + %t5 = shl <8 x i32> %t3, %t4 + ret <8 x i32> %t5 +} + +define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32> +; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]] +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]] +; CHECK-NEXT: ret <8 x i32> [[T5]] +; + %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32> + %t1 = shl <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %t0 + %t2 = add <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %t3 = and <8 x i32> %t2, %x + %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t2) + call void @use8xi32(<8 x i32> %t4) + %t5 = shl <8 x i32> %t3, %t4 + ret <8 x i32> %t5 +} + +; Extra uses. + +define i32 @n3_extrause(i32 %x, i32 %nbits) { +; CHECK-LABEL: @n3_extrause( +; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], -1 +; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], -1 +; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]] +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: call void @use32(i32 [[T3]]) +; CHECK-NEXT: call void @use32(i32 [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T3]], [[T4]] +; CHECK-NEXT: ret i32 [[T5]] +; + %t0 = add i32 %nbits, -1 + %t1 = shl i32 1, %t0 ; shifting by nbits-1 + %t2 = add i32 %t1, -1 + %t3 = and i32 %t2, %x ; this mask must be one-use. + %t4 = sub i32 32, %nbits + call void @use32(i32 %t0) + call void @use32(i32 %t1) + call void @use32(i32 %t2) + call void @use32(i32 %t3) ; BAD + call void @use32(i32 %t4) + %t5 = shl i32 %t3, %t4 + ret i32 %t5 +} diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll new file mode 100644 index 00000000000..784de8bc718 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll @@ -0,0 +1,130 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -instcombine -S | FileCheck %s + +; If we have some pattern that leaves only some low bits set, and then performs +; left-shift of those bits, we can combine those two shifts into a shift+mask. + +; There are many variants to this pattern: +; b) (x & (~(-1 << maskNbits))) << shiftNbits +; simplify to: +; (x << shiftNbits) & (~(-1 << (maskNbits+shiftNbits))) + +; Simple tests. + +declare void @use32(i32) + +define i32 @t0_basic(i32 %x, i32 %nbits) { +; CHECK-LABEL: @t0_basic( +; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], -1 +; CHECK-NEXT: [[T1:%.*]] = shl i32 -1, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor i32 [[T1]], -1 +; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]] +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: call void @use32(i32 [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T3]], [[T4]] +; CHECK-NEXT: ret i32 [[T5]] +; + %t0 = add i32 %nbits, -1 + %t1 = shl i32 -1, %t0 ; shifting by nbits-1 + %t2 = xor i32 %t1, -1 + %t3 = and i32 %t2, %x + %t4 = sub i32 32, %nbits + call void @use32(i32 %t0) + call void @use32(i32 %t1) + call void @use32(i32 %t2) + call void @use32(i32 %t4) + %t5 = shl i32 %t3, %t4 + ret i32 %t5 +} + +; Vectors + +declare void @use8xi32(<8 x i32>) + +define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t1_vec_splat( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> +; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]] +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]] +; CHECK-NEXT: ret <8 x i32> [[T5]] +; + %t0 = add <8 x i32> %nbits, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %t1 = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %t0 + %t2 = xor <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %t3 = and <8 x i32> %t2, %x + %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t2) + call void @use8xi32(<8 x i32> %t4) + %t5 = shl <8 x i32> %t3, %t4 + ret <8 x i32> %t5 +} + +define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { +; CHECK-LABEL: @t2_vec_nonsplat( +; CHECK-NEXT: [[T0:%.*]] = add <8 x i32> [[NBITS:%.*]], <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32> +; CHECK-NEXT: [[T1:%.*]] = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor <8 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]] +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) +; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl <8 x i32> [[T3]], [[T4]] +; CHECK-NEXT: ret <8 x i32> [[T5]] +; + %t0 = add <8 x i32> %nbits, <i32 -33, i32 -32, i32 -31, i32 -1, i32 0, i32 1, i32 31, i32 32> + %t1 = shl <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, %t0 + %t2 = xor <8 x i32> %t1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %t3 = and <8 x i32> %t2, %x + %t4 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %nbits + call void @use8xi32(<8 x i32> %t0) + call void @use8xi32(<8 x i32> %t1) + call void @use8xi32(<8 x i32> %t2) + call void @use8xi32(<8 x i32> %t4) + %t5 = shl <8 x i32> %t3, %t4 + ret <8 x i32> %t5 +} + +; Extra uses. + +define i32 @n3_extrause(i32 %x, i32 %nbits) { +; CHECK-LABEL: @n3_extrause( +; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], -1 +; CHECK-NEXT: [[T1:%.*]] = shl i32 -1, [[T0]] +; CHECK-NEXT: [[T2:%.*]] = xor i32 [[T1]], -1 +; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]] +; CHECK-NEXT: call void @use32(i32 [[T0]]) +; CHECK-NEXT: call void @use32(i32 [[T1]]) +; CHECK-NEXT: call void @use32(i32 [[T2]]) +; CHECK-NEXT: call void @use32(i32 [[T3]]) +; CHECK-NEXT: call void @use32(i32 [[T4]]) +; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T3]], [[T4]] +; CHECK-NEXT: ret i32 [[T5]] +; + %t0 = add i32 %nbits, -1 + %t1 = shl i32 -1, %t0 ; shifting by nbits-1 + %t2 = xor i32 %t1, -1 + %t3 = and i32 %t2, %x ; this mask must be one-use. + %t4 = sub i32 32, %nbits + call void @use32(i32 %t0) + call void @use32(i32 %t1) + call void @use32(i32 %t2) + call void @use32(i32 %t3) ; BAD + call void @use32(i32 %t4) + %t5 = shl i32 %t3, %t4 + ret i32 %t5 +} diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll index 95527286a38..e7145fcccd7 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-a.ll @@ -407,28 +407,3 @@ define i32 @n13_not_minus_one(i32 %x, i32 %nbits) { %t4 = shl i32 %t2, %t3 ret i32 %t4 } - -define i32 @n14_insifficient_sum(i32 %x, i32 %nbits) { -; CHECK-LABEL: @n14_insifficient_sum( -; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = add nsw i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[T3:%.*]] = sub i32 31, [[NBITS]] -; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] -; CHECK-NEXT: ret i32 [[T4]] -; - %t0 = shl i32 1, %nbits - %t1 = add nsw i32 %t0, -1 - %t2 = and i32 %t1, %x - %t3 = sub i32 31, %nbits ; summary shift amount is less than 32 - call void @use32(i32 %t0) - call void @use32(i32 %t1) - call void @use32(i32 %t2) - call void @use32(i32 %t3) - %t4 = shl i32 %t2, %t3 - ret i32 %t4 -} diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll index 347010dd106..94e786d38b0 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll @@ -382,28 +382,3 @@ define i32 @n12_not_minus_one(i32 %x, i32 %nbits) { %t4 = shl i32 %t2, %t3 ret i32 %t4 } - -define i32 @n13_insufficient_sum(i32 %x, i32 %nbits) { -; CHECK-LABEL: @n13_insufficient_sum( -; CHECK-NEXT: [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[T3:%.*]] = sub i32 31, [[NBITS]] -; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] -; CHECK-NEXT: ret i32 [[T4]] -; - %t0 = shl i32 -1, %nbits - %t1 = xor i32 %t0, -1 - %t2 = and i32 %t1, %x - %t3 = sub i32 31, %nbits ; summary shift amount is less than 32 - call void @use32(i32 %t0) - call void @use32(i32 %t1) - call void @use32(i32 %t2) - call void @use32(i32 %t3) - %t4 = shl i32 %t2, %t3 - ret i32 %t4 -} |