diff options
-rw-r--r-- | llvm/include/llvm/IR/IntrinsicsAArch64.td | 14 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 41 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 14 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/SVEInstrFormats.td | 19 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/sve-int-imm.ll | 471 |
5 files changed, 550 insertions, 9 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index fa5d107edd6..fff7fc899d8 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1044,6 +1044,12 @@ class AdvSIMD_GatherLoad_VecTorBase_Intrinsic ], [IntrReadMem, IntrArgMemOnly]>; +class AdvSIMD_1VectorArg_Imm_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + llvm_i32_ty], + [IntrNoMem, ImmArg<1>]>; + // // Integer arithmetic // @@ -1052,6 +1058,14 @@ def int_aarch64_sve_add : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_sub : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_subr : AdvSIMD_Pred2VectorArg_Intrinsic; +def int_aarch64_sve_add_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; +def int_aarch64_sve_sub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; +def int_aarch64_sve_subr_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; +def int_aarch64_sve_sqadd_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; +def int_aarch64_sve_uqadd_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; +def int_aarch64_sve_sqsub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; +def int_aarch64_sve_uqsub_imm : AdvSIMD_1VectorArg_Imm_Intrinsic; + def int_aarch64_sve_mul : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_smulh : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_umulh : AdvSIMD_Pred2VectorArg_Intrinsic; diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 637fc791c50..4640dcf7361 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -159,6 +159,11 @@ public: return false; } + template<MVT::SimpleValueType VT> + bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) { + return SelectSVEAddSubImm(N, VT, Imm, Shift); + } + /// Form sequences of consecutive 64/128-bit registers for use in NEON /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have /// between 1 and 4 elements. If it contains a single element that is returned @@ -235,6 +240,7 @@ private: bool SelectCMP_SWAP(SDNode *N); + bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift); }; } // end anonymous namespace @@ -2811,6 +2817,41 @@ bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) { return true; } +bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift) { + if (auto CNode = dyn_cast<ConstantSDNode>(N)) { + const int64_t ImmVal = CNode->getZExtValue(); + SDLoc DL(N); + + switch (VT.SimpleTy) { + case MVT::i8: + if ((ImmVal & 0xFF) == ImmVal) { + Shift = CurDAG->getTargetConstant(0, DL, MVT::i32); + Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32); + return true; + } + break; + case MVT::i16: + case MVT::i32: + case MVT::i64: + if ((ImmVal & 0xFF) == ImmVal) { + Shift = CurDAG->getTargetConstant(0, DL, MVT::i32); + Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32); + return true; + } else if ((ImmVal & 0xFF00) == ImmVal) { + Shift = CurDAG->getTargetConstant(8, DL, MVT::i32); + Imm = CurDAG->getTargetConstant(ImmVal >> 8, DL, MVT::i32); + return true; + } + break; + default: + break; + } + } + + return false; +} + + bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) { // tagp(FrameIndex, IRGstack, tag_offset): // since the offset between FrameIndex and IRGstack is a compile-time diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 350aa63a263..cdd1b035849 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -75,13 +75,13 @@ let Predicates = [HasSVE] in { defm AND_ZPmZ : sve_int_bin_pred_log<0b010, "and", int_aarch64_sve_and>; defm BIC_ZPmZ : sve_int_bin_pred_log<0b011, "bic", int_aarch64_sve_bic>; - defm ADD_ZI : sve_int_arith_imm0<0b000, "add">; - defm SUB_ZI : sve_int_arith_imm0<0b001, "sub">; - defm SUBR_ZI : sve_int_arith_imm0<0b011, "subr">; - defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd">; - defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd">; - defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub">; - defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub">; + defm ADD_ZI : sve_int_arith_imm0<0b000, "add", int_aarch64_sve_add_imm>; + defm SUB_ZI : sve_int_arith_imm0<0b001, "sub", int_aarch64_sve_sub_imm>; + defm SUBR_ZI : sve_int_arith_imm0<0b011, "subr", int_aarch64_sve_subr_imm>; + defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_imm>; + defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_imm>; + defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_imm>; + defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_imm>; defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>; defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index 4be57550df3..27e0d0f611d 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -202,6 +202,11 @@ def addsub_imm8_opt_lsl_i64 : imm8_opt_lsl<64, "uint64_t", SVEAddSubImmOperand64 return AArch64_AM::isSVEAddSubImm<int64_t>(Imm); }]>; +def SVEAddSubImm8Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i8>", []>; +def SVEAddSubImm16Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i16>", []>; +def SVEAddSubImm32Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i32>", []>; +def SVEAddSubImm64Pat : ComplexPattern<i32, 2, "SelectSVEAddSubImm<MVT::i64>", []>; + class SVEExactFPImm<string Suffix, string ValA, string ValB> : AsmOperandClass { let Name = "SVEExactFPImmOperand" # Suffix; let DiagnosticType = "Invalid" # Name; @@ -288,6 +293,11 @@ class SVE_1_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1, : Pat<(vtd (op vt1:$Op1)), (inst $Op1)>; +class SVE_1_Op_Imm_OptLsl_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty, + ComplexPattern cpx, Instruction inst> + : Pat<(vt (op (vt zprty:$Op1), (i32 (cpx i32:$imm, i32:$shift)))), + (inst $Op1, i32:$imm, i32:$shift)>; + class SVE_2_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1, ValueType vt2, Instruction inst> : Pat<(vtd (op vt1:$Op1, vt2:$Op2)), @@ -3278,11 +3288,16 @@ class sve_int_arith_imm0<bits<2> sz8_64, bits<3> opc, string asm, let ElementSize = ElementSizeNone; } -multiclass sve_int_arith_imm0<bits<3> opc, string asm> { - def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8, addsub_imm8_opt_lsl_i8>; +multiclass sve_int_arith_imm0<bits<3> opc, string asm, SDPatternOperator op> { + def _B : sve_int_arith_imm0<0b00, opc, asm, ZPR8, addsub_imm8_opt_lsl_i8>; def _H : sve_int_arith_imm0<0b01, opc, asm, ZPR16, addsub_imm8_opt_lsl_i16>; def _S : sve_int_arith_imm0<0b10, opc, asm, ZPR32, addsub_imm8_opt_lsl_i32>; def _D : sve_int_arith_imm0<0b11, opc, asm, ZPR64, addsub_imm8_opt_lsl_i64>; + + def : SVE_1_Op_Imm_OptLsl_Pat<nxv16i8, op, ZPR8, SVEAddSubImm8Pat, !cast<Instruction>(NAME # _B)>; + def : SVE_1_Op_Imm_OptLsl_Pat<nxv8i16, op, ZPR16, SVEAddSubImm16Pat, !cast<Instruction>(NAME # _H)>; + def : SVE_1_Op_Imm_OptLsl_Pat<nxv4i32, op, ZPR32, SVEAddSubImm32Pat, !cast<Instruction>(NAME # _S)>; + def : SVE_1_Op_Imm_OptLsl_Pat<nxv2i64, op, ZPR64, SVEAddSubImm64Pat, !cast<Instruction>(NAME # _D)>; } class sve_int_arith_imm<bits<2> sz8_64, bits<6> opc, string asm, diff --git a/llvm/test/CodeGen/AArch64/sve-int-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-imm.ll new file mode 100644 index 00000000000..30002771733 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-int-imm.ll @@ -0,0 +1,471 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +define <vscale x 16 x i8> @add_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: add_imm_i8_low +; CHECK: add z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.add.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @add_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: add_imm_i16_low +; CHECK: add z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @add_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: add_imm_i16_high +; CHECK: add z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @add_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: add_imm_i32_low +; CHECK: add z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @add_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: add_imm_i32_high +; CHECK: add z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @add_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: add_imm_i64_low +; CHECK: add z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @add_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: add_imm_i64_high +; CHECK: add z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +define <vscale x 16 x i8> @sub_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: sub_imm_i8_low +; CHECK: sub z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @sub_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sub_imm_i16_low +; CHECK: sub z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @sub_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sub_imm_i16_high +; CHECK: sub z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @sub_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sub_imm_i32_low +; CHECK: sub z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @sub_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sub_imm_i32_high +; CHECK: sub z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @sub_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: sub_imm_i64_low +; CHECK: sub z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @sub_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: sub_imm_i64_high +; CHECK: sub z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +define <vscale x 16 x i8> @subr_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: subr_imm_i8_low +; CHECK: subr z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @subr_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: subr_imm_i16_low +; CHECK: subr z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @subr_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: subr_imm_i16_high +; CHECK: subr z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @subr_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: subr_imm_i32_low +; CHECK: subr z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @subr_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: subr_imm_i32_high +; CHECK: subr z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @subr_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: subr_imm_i64_low +; CHECK: subr z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @subr_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: subr_imm_i64_high +; CHECK: subr z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +define <vscale x 16 x i8> @sqadd_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: sqadd_imm_i8_low +; CHECK: sqadd z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @sqadd_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sqadd_imm_i16_low +; CHECK: sqadd z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @sqadd_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sqadd_imm_i16_high +; CHECK: sqadd z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @sqadd_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sqadd_imm_i32_low +; CHECK: sqadd z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @sqadd_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sqadd_imm_i32_high +; CHECK: sqadd z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @sqadd_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: sqadd_imm_i64_low +; CHECK: sqadd z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @sqadd_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: sqadd_imm_i64_high +; CHECK: sqadd z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +define <vscale x 16 x i8> @uqadd_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: uqadd_imm_i8_low +; CHECK: uqadd z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @uqadd_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: uqadd_imm_i16_low +; CHECK: uqadd z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @uqadd_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: uqadd_imm_i16_high +; CHECK: uqadd z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @uqadd_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: uqadd_imm_i32_low +; CHECK: uqadd z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @uqadd_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: uqadd_imm_i32_high +; CHECK: uqadd z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @uqadd_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: uqadd_imm_i64_low +; CHECK: uqadd z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @uqadd_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: uqadd_imm_i64_high +; CHECK: uqadd z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +define <vscale x 16 x i8> @sqsub_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: sqsub_imm_i8_low +; CHECK: sqsub z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @sqsub_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sqsub_imm_i16_low +; CHECK: sqsub z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @sqsub_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: sqsub_imm_i16_high +; CHECK: sqsub z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @sqsub_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sqsub_imm_i32_low +; CHECK: sqsub z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @sqsub_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: sqsub_imm_i32_high +; CHECK: sqsub z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @sqsub_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: sqsub_imm_i64_low +; CHECK: sqsub z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @sqsub_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: sqsub_imm_i64_high +; CHECK: sqsub z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +define <vscale x 16 x i8> @uqsub_imm_i8_low(<vscale x 16 x i8> %a) { +; CHECK-LABEL: uqsub_imm_i8_low +; CHECK: uqsub z0.b, z0.b, #30 +; CHECK-NEXT: ret + %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.imm.nxv16i8(<vscale x 16 x i8> %a, + i32 30) + ret <vscale x 16 x i8> %res +} + +define <vscale x 8 x i16> @uqsub_imm_i16_low(<vscale x 8 x i16> %a) { +; CHECK-LABEL: uqsub_imm_i16_low +; CHECK: uqsub z0.h, z0.h, #30 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 30) + ret <vscale x 8 x i16> %res +} + +define <vscale x 8 x i16> @uqsub_imm_i16_high(<vscale x 8 x i16> %a) { +; CHECK-LABEL: uqsub_imm_i16_high +; CHECK: uqsub z0.h, z0.h, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16> %a, + i32 1024) + ret <vscale x 8 x i16> %res +} + +define <vscale x 4 x i32> @uqsub_imm_i32_low(<vscale x 4 x i32> %a) { +; CHECK-LABEL: uqsub_imm_i32_low +; CHECK: uqsub z0.s, z0.s, #30 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 30) + ret <vscale x 4 x i32> %res +} + +define <vscale x 4 x i32> @uqsub_imm_i32_high(<vscale x 4 x i32> %a) { +; CHECK-LABEL: uqsub_imm_i32_high +; CHECK: uqsub z0.s, z0.s, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32> %a, + i32 1024) + ret <vscale x 4 x i32> %res +} + +define <vscale x 2 x i64> @uqsub_imm_i64_low(<vscale x 2 x i64> %a) { +; CHECK-LABEL: uqsub_imm_i64_low +; CHECK: uqsub z0.d, z0.d, #30 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 30) + ret <vscale x 2 x i64> %res +} + +define <vscale x 2 x i64> @uqsub_imm_i64_high(<vscale x 2 x i64> %a) { +; CHECK-LABEL: uqsub_imm_i64_high +; CHECK: uqsub z0.d, z0.d, #1024 +; CHECK-NEXT: ret + %res = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64> %a, + i32 1024) + ret <vscale x 2 x i64> %res +} + +declare <vscale x 16 x i8> @llvm.aarch64.sve.add.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.add.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.add.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.add.imm.nxv2i64(<vscale x 2 x i64>, i32) +declare <vscale x 16 x i8> @llvm.aarch64.sve.sub.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.sub.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.sub.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.sub.imm.nxv2i64(<vscale x 2 x i64>, i32) +declare <vscale x 16 x i8> @llvm.aarch64.sve.subr.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.subr.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.subr.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.subr.imm.nxv2i64(<vscale x 2 x i64>, i32) +declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.imm.nxv2i64(<vscale x 2 x i64>, i32) +declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.imm.nxv2i64(<vscale x 2 x i64>, i32) +declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.imm.nxv2i64(<vscale x 2 x i64>, i32) +declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.imm.nxv16i8(<vscale x 16 x i8>, i32) +declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.imm.nxv8i16(<vscale x 8 x i16>, i32) +declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.imm.nxv4i32(<vscale x 4 x i32>, i32) +declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.imm.nxv2i64(<vscale x 2 x i64>, i32) |