diff options
author | Kerry McLaughlin <kerry.mclaughlin@arm.com> | 2019-09-30 17:10:21 +0000 |
---|---|---|
committer | Kerry McLaughlin <kerry.mclaughlin@arm.com> | 2019-09-30 17:10:21 +0000 |
commit | 01b84e175c500dd85c522920de992c0b2c5b1060 (patch) | |
tree | ba0a997f957797ed02077bdc259dc0083d50c538 | |
parent | 8299fd9dee7df7c5f92ab2572aad04ce2fbbf83e (diff) | |
download | bcm5719-llvm-01b84e175c500dd85c522920de992c0b2c5b1060.tar.gz bcm5719-llvm-01b84e175c500dd85c522920de992c0b2c5b1060.zip |
[AArch64][SVE] Implement punpk[hi|lo] intrinsics
Summary:
Adds the following two intrinsics:
- int_aarch64_sve_punpkhi
- int_aarch64_sve_punpklo
This patch also contains a fix which allows LLVMHalfElementsVectorType
to forward reference overloadable arguments.
Reviewers: sdesmalen, rovka, rengolin
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, greened, cfe-commits, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67830
llvm-svn: 373232
-rw-r--r-- | llvm/include/llvm/IR/IntrinsicsAArch64.td | 12 | ||||
-rw-r--r-- | llvm/lib/IR/Function.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/SVEInstrFormats.td | 13 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/sve-intrinsics-pred-operations.ll | 65 |
5 files changed, 95 insertions, 4 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index a81127e75e2..2a69a51603f 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -768,6 +768,11 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". LLVMMatchType<0>], [IntrNoMem]>; + class AdvSIMD_SVE_PUNPKHI_Intrinsic + : Intrinsic<[LLVMHalfElementsVectorType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; + // This class of intrinsics are not intended to be useful within LLVM IR but // are instead here to support some of the more regid parts of the ACLE. class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN> @@ -792,4 +797,11 @@ def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic; // def int_aarch64_sve_fcvtzs_i32f16 : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>; + +// +// Predicate operations +// + +def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic; +def int_aarch64_sve_punpklo : AdvSIMD_SVE_PUNPKHI_Intrinsic; } diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index a4632762c20..5c22109ffd5 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -1211,8 +1211,9 @@ static bool matchIntrinsicType( } case IITDescriptor::HalfVecArgument: // If this is a forward reference, defer the check for later. - return D.getArgumentNumber() >= ArgTys.size() || - !isa<VectorType>(ArgTys[D.getArgumentNumber()]) || + if (D.getArgumentNumber() >= ArgTys.size()) + return IsDeferredCheck || DeferCheck(Ty); + return !isa<VectorType>(ArgTys[D.getArgumentNumber()]) || VectorType::getHalfElementsVectorType( cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty; case IITDescriptor::SameVecWidthArgument: { diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 0813c41dc66..cdf313db1b9 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -216,8 +216,8 @@ let Predicates = [HasSVE] in { defm UUNPKLO_ZZ : sve_int_perm_unpk<0b10, "uunpklo">; defm UUNPKHI_ZZ : sve_int_perm_unpk<0b11, "uunpkhi">; - def PUNPKLO_PP : sve_int_perm_punpk<0b0, "punpklo">; - def PUNPKHI_PP : sve_int_perm_punpk<0b1, "punpkhi">; + defm PUNPKLO_PP : sve_int_perm_punpk<0b0, "punpklo", int_aarch64_sve_punpklo>; + defm PUNPKHI_PP : sve_int_perm_punpk<0b1, "punpkhi", int_aarch64_sve_punpkhi>; defm MOVPRFX_ZPzZ : sve_int_movprfx_pred_zero<0b000, "movprfx">; defm MOVPRFX_ZPmZ : sve_int_movprfx_pred_merge<0b001, "movprfx">; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index 8c8713b464e..f57e111b7e1 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -283,6 +283,11 @@ let Predicates = [HasSVE] in { // SVE pattern match helpers. //===----------------------------------------------------------------------===// +class SVE_1_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1, + Instruction inst> +: Pat<(vtd (op vt1:$Op1)), + (inst $Op1)>; + class SVE_3_Op_Pat<ValueType vtd, SDPatternOperator op, ValueType vt1, ValueType vt2, ValueType vt3, Instruction inst> : Pat<(vtd (op vt1:$Op1, vt2:$Op2, vt3:$Op3)), @@ -4280,6 +4285,14 @@ class sve_int_perm_punpk<bit opc, string asm> let Inst{3-0} = Pd; } +multiclass sve_int_perm_punpk<bit opc, string asm, SDPatternOperator op> { + def NAME : sve_int_perm_punpk<opc, asm>; + + def : SVE_1_Op_Pat<nxv8i1, op, nxv16i1, !cast<Instruction>(NAME)>; + def : SVE_1_Op_Pat<nxv4i1, op, nxv8i1, !cast<Instruction>(NAME)>; + def : SVE_1_Op_Pat<nxv2i1, op, nxv4i1, !cast<Instruction>(NAME)>; +} + class sve_int_rdffr_pred<bit s, string asm> : I<(outs PPR8:$Pd), (ins PPRAny:$Pg), asm, "\t$Pd, $Pg/z", diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-pred-operations.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-pred-operations.ll new file mode 100644 index 00000000000..d918d432ae6 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-pred-operations.ll @@ -0,0 +1,65 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; PUNPKHI +; + +define <vscale x 8 x i1> @punpkhi_b16(<vscale x 16 x i1> %a) { +; CHECK-LABEL: punpkhi_b16 +; CHECK: punpkhi p0.h, p0.b +; CHECK-NEXT: ret + %res = call <vscale x 8 x i1> @llvm.aarch64.sve.punpkhi.nxv8i1(<vscale x 16 x i1> %a) + ret <vscale x 8 x i1> %res +} + +define <vscale x 4 x i1> @punpkhi_b8(<vscale x 8 x i1> %a) { +; CHECK-LABEL: punpkhi_b8 +; CHECK: punpkhi p0.h, p0.b +; CHECK-NEXT: ret + %res = call <vscale x 4 x i1> @llvm.aarch64.sve.punpkhi.nxv4i1(<vscale x 8 x i1> %a) + ret <vscale x 4 x i1> %res +} + +define <vscale x 2 x i1> @punpkhi_b4(<vscale x 4 x i1> %a) { +; CHECK-LABEL: punpkhi_b4 +; CHECK: punpkhi p0.h, p0.b +; CHECK-NEXT: ret + %res = call <vscale x 2 x i1> @llvm.aarch64.sve.punpkhi.nxv2i1(<vscale x 4 x i1> %a) + ret <vscale x 2 x i1> %res +} + +; +; PUNPKLO +; + +define <vscale x 8 x i1> @punpklo_b16(<vscale x 16 x i1> %a) { +; CHECK-LABEL: punpklo_b16 +; CHECK: punpklo p0.h, p0.b +; CHECK-NEXT: ret + %res = call <vscale x 8 x i1> @llvm.aarch64.sve.punpklo.nxv8i1(<vscale x 16 x i1> %a) + ret <vscale x 8 x i1> %res +} + +define <vscale x 4 x i1> @punpklo_b8(<vscale x 8 x i1> %a) { +; CHECK-LABEL: punpklo_b8 +; CHECK: punpklo p0.h, p0.b +; CHECK-NEXT: ret + %res = call <vscale x 4 x i1> @llvm.aarch64.sve.punpklo.nxv4i1(<vscale x 8 x i1> %a) + ret <vscale x 4 x i1> %res +} + +define <vscale x 2 x i1> @punpklo_b4(<vscale x 4 x i1> %a) { +; CHECK-LABEL: punpklo_b4 +; CHECK: punpklo p0.h, p0.b +; CHECK-NEXT: ret + %res = call <vscale x 2 x i1> @llvm.aarch64.sve.punpklo.nxv2i1(<vscale x 4 x i1> %a) + ret <vscale x 2 x i1> %res +} + +declare <vscale x 8 x i1> @llvm.aarch64.sve.punpkhi.nxv8i1(<vscale x 16 x i1>) +declare <vscale x 4 x i1> @llvm.aarch64.sve.punpkhi.nxv4i1(<vscale x 8 x i1>) +declare <vscale x 2 x i1> @llvm.aarch64.sve.punpkhi.nxv2i1(<vscale x 4 x i1>) + +declare <vscale x 8 x i1> @llvm.aarch64.sve.punpklo.nxv8i1(<vscale x 16 x i1>) +declare <vscale x 4 x i1> @llvm.aarch64.sve.punpklo.nxv4i1(<vscale x 8 x i1>) +declare <vscale x 2 x i1> @llvm.aarch64.sve.punpklo.nxv2i1(<vscale x 4 x i1>) |