diff options
| author | Simon Tatham <simon.tatham@arm.com> | 2019-12-02 16:18:34 +0000 | 
|---|---|---|
| committer | Simon Tatham <simon.tatham@arm.com> | 2019-12-02 16:20:30 +0000 | 
| commit | 01aefae4a173c32a0235feb9600beffbcd0308b4 (patch) | |
| tree | 5c598fdc8af5b499f056a8e20a70a3febfeb3538 | |
| parent | effcdc3a82f2a32829170e7f7a2ff3d7853b612d (diff) | |
| download | bcm5719-llvm-01aefae4a173c32a0235feb9600beffbcd0308b4.tar.gz bcm5719-llvm-01aefae4a173c32a0235feb9600beffbcd0308b4.zip  | |
[ARM,MVE] Add an InstCombine rule permitting VPNOT.
Summary:
If a user writing C code using the ACLE MVE intrinsics generates a
predicate and then complements it, then the resulting IR will use the
`pred_v2i` IR intrinsic to turn some `<n x i1>` vector into a 16-bit
integer; complement that integer; and convert back. This will generate
machine code that moves the predicate out of the `P0` register,
complements it in an integer GPR, and moves it back in again.
This InstCombine rule replaces `i2v(~v2i(x))` with a direct complement
of the original predicate vector, which we can already instruction-
select as the VPNOT instruction which complements P0 in place.
Reviewers: ostannard, MarkMurrayARM, dmgreen
Reviewed By: dmgreen
Subscribers: kristof.beyls, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70484
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 13 | ||||
| -rw-r--r-- | llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll | 22 | ||||
| -rw-r--r-- | llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll | 94 | 
3 files changed, 129 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 536e84b4a35..157885e0310 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3329,6 +3329,19 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {      if (match(Arg, m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg))) &&          II->getType() == ArgArg->getType())        return replaceInstUsesWith(*II, ArgArg); +    Constant *XorMask; +    if (match(Arg, +              m_Xor(m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg)), +                    m_Constant(XorMask))) && +        II->getType() == ArgArg->getType()) { +      if (auto *CI = dyn_cast<ConstantInt>(XorMask)) { +        if (CI->getValue().trunc(16).isAllOnesValue()) { +          auto TrueVector = Builder.CreateVectorSplat( +              II->getType()->getVectorNumElements(), Builder.getTrue()); +          return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); +        } +      } +    }      KnownBits ScalarKnown(32);      if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16),                               ScalarKnown, 0)) diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll b/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll index c7533503fa7..f46eb77e755 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll @@ -1,5 +1,7 @@  ; RUN: opt -instcombine %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +  define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {  ; CHECK-LABEL: test_vpt_block:  ; CHECK:       @ %bb.0: @ %entry @@ -16,7 +18,27 @@ entry:    ret <8 x i16> %5  } +define arm_aapcs_vfpcc <8 x i16> @test_vpnot(<8 x i16> %v, <8 x i16> %w, <8 x i16> %x, i32 %n) { +; CHECK-LABEL: test_vpnot: +; CHECK:       @ %bb.0: @ %entry +; CHECK-NEXT:    vctp.16 r0 +; CHECK-NEXT:    vpnot +; CHECK-NEXT:    vpst +; CHECK-NEXT:    vaddt.i16 q0, q1, q2 +; CHECK-NEXT:    bx lr +entry: +  %0 = call <8 x i1> @llvm.arm.vctp16(i32 %n) +  %1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0) +  %2 = trunc i32 %1 to i16 +  %3 = xor i16 %2, -1 +  %4 = zext i16 %3 to i32 +  %5 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %4) +  %6 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %w, <8 x i16> %x, <8 x i1> %5, <8 x i16> %v) +  ret <8 x i16> %6 +} +  declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)  declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)  declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) +declare <8 x i1> @llvm.arm.vctp16(i32) diff --git a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll index 4594102a468..7c511135ba1 100644 --- a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll +++ b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll @@ -1,6 +1,8 @@  ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py  ; RUN: opt -instcombine -S -o - %s | FileCheck %s +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" +  declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>)  declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)  declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>) @@ -234,3 +236,95 @@ entry:    %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)    ret <4 x i1> %vout  } + +; If a predicate vector is round-tripped to an integer and back, and +; complemented while it's in integer form, we should collapse that to +; a complement of the vector itself. (Rationale: this is likely to +; allow it to be code-generated as MVE VPNOT.) + +define <4 x i1> @vpnot_4(<4 x i1> %vin) { +; CHECK-LABEL: @vpnot_4( +; CHECK-NEXT:  entry: +; CHECK-NEXT:    [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT:    ret <4 x i1> [[VOUT]] +; +entry: +  %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) +  %flipped = xor i32 %int, 65535 +  %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped) +  ret <4 x i1> %vout +} + +define <8 x i1> @vpnot_8(<8 x i1> %vin) { +; CHECK-LABEL: @vpnot_8( +; CHECK-NEXT:  entry: +; CHECK-NEXT:    [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT:    ret <8 x i1> [[VOUT]] +; +entry: +  %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) +  %flipped = xor i32 %int, 65535 +  %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped) +  ret <8 x i1> %vout +} + +define <16 x i1> @vpnot_16(<16 x i1> %vin) { +; CHECK-LABEL: @vpnot_16( +; CHECK-NEXT:  entry: +; CHECK-NEXT:    [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT:    ret <16 x i1> [[VOUT]] +; +entry: +  %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) +  %flipped = xor i32 %int, 65535 +  %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped) +  ret <16 x i1> %vout +} + +; And this still works even if the i32 is narrowed to i16 and back on +; opposite sides of the xor. + +define <4 x i1> @vpnot_narrow_4(<4 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_4( +; CHECK-NEXT:  entry: +; CHECK-NEXT:    [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT:    ret <4 x i1> [[VOUT]] +; +entry: +  %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) +  %narrow = trunc i32 %int to i16 +  %flipped_narrow = xor i16 %narrow, -1 +  %flipped = zext i16 %flipped_narrow to i32 +  %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped) +  ret <4 x i1> %vout +} + +define <8 x i1> @vpnot_narrow_8(<8 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_8( +; CHECK-NEXT:  entry: +; CHECK-NEXT:    [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT:    ret <8 x i1> [[VOUT]] +; +entry: +  %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) +  %narrow = trunc i32 %int to i16 +  %flipped_narrow = xor i16 %narrow, -1 +  %flipped = zext i16 %flipped_narrow to i32 +  %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped) +  ret <8 x i1> %vout +} + +define <16 x i1> @vpnot_narrow_16(<16 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_16( +; CHECK-NEXT:  entry: +; CHECK-NEXT:    [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT:    ret <16 x i1> [[VOUT]] +; +entry: +  %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) +  %narrow = trunc i32 %int to i16 +  %flipped_narrow = xor i16 %narrow, -1 +  %flipped = zext i16 %flipped_narrow to i32 +  %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped) +  ret <16 x i1> %vout +}  | 

