summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome')
-rw-r--r--drivers/net/ethernet/netronome/Kconfig3
-rw-r--r--drivers/net/ethernet/netronome/Makefile1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile11
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c419
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c147
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c217
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h131
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c743
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c522
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c255
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c585
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c68
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h104
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c163
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h140
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c154
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c137
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c1151
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c366
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c239
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c170
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h90
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c344
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h31
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c157
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c146
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/Makefile2
56 files changed, 6127 insertions, 966 deletions
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 549898d5d450..bac5be4d4f43 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Netronome device configuration
#
@@ -19,6 +20,8 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
+ select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/Makefile b/drivers/net/ethernet/netronome/Makefile
index 7fb3b84b5556..d9a3948e8bde 100644
--- a/drivers/net/ethernet/netronome/Makefile
+++ b/drivers/net/ethernet/netronome/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the Netronome network device drivers
#
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 47c708f08ade..d31772ae511d 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -15,6 +15,9 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ ccm.o \
+ ccm_mbox.o \
+ devlink_param.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
@@ -33,6 +36,11 @@ nfp-objs := \
nfp_shared_buf.o \
nic/main.o
+ifeq ($(CONFIG_TLS_DEVICE),y)
+nfp-objs += \
+ crypto/tls.o
+endif
+
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \
flower/action.o \
@@ -42,7 +50,8 @@ nfp-objs += \
flower/match.o \
flower/metadata.o \
flower/offload.o \
- flower/tunnel_conf.o
+ flower/tunnel_conf.o \
+ flower/qos_conf.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index 9852080cf454..9f8a1f69c0c4 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
}
if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
knode->sel->offoff || knode->fshift) {
- NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
return false;
}
if (knode->sel->hoff || knode->sel->hmask) {
@@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
k = &knode->sel->keys[0];
if (k->offmask) {
- NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
return false;
}
if (k->off) {
@@ -176,8 +176,10 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
u8 mask, val;
int err;
- if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
+ err = -EOPNOTSUPP;
goto err_delete;
+ }
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
@@ -198,14 +200,18 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
if ((iter->val & cmask) == (val & cmask) &&
iter->band != knode->res->classid) {
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+ err = -EOPNOTSUPP;
goto err_delete;
}
}
if (!match) {
match = kzalloc(sizeof(*match), GFP_KERNEL);
- if (!match)
- return -ENOMEM;
+ if (!match) {
+ err = -ENOMEM;
+ goto err_delete;
+ }
+
list_add(&match->list, &alink->dscp_map);
}
match->handle = knode->handle;
@@ -221,7 +227,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
err_delete:
nfp_abm_u32_knode_delete(alink, knode);
- return -EOPNOTSUPP;
+ return err;
}
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
@@ -262,22 +268,12 @@ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
}
}
+static LIST_HEAD(nfp_abm_block_cb_list);
+
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_abm_setup_tc_block_cb,
- repr, repr, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
- repr);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, true);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 9584f03f3efa..69e84ff7f2e5 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
struct nfp_net *nn = alink->vnic;
unsigned int i;
int err;
+ err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
+ if (err)
+ return err;
+
/* Write data_len and wipe reserved */
nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
alink->abm->prio_map_len);
@@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
packed[i / sizeof(u32)]);
- err = nfp_net_reconfig_mbox(nn,
- NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
if (err)
nfp_err(alink->abm->app->cpp,
"setting DSCP -> VQ map failed with error %d\n", err);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 4d4ff5844c47..9183b3e85d21 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
}
}
-static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id)
+static struct net_device *
+nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{
enum nfp_repr_type rtype;
struct nfp_reprs *reprs;
@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = {
.eswitch_mode_get = nfp_abm_eswitch_mode_get,
.eswitch_mode_set = nfp_abm_eswitch_mode_set,
- .repr_get = nfp_abm_repr_get,
+ .dev_get = nfp_abm_repr_get,
};
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 49749c60885e..48746c9c6224 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -247,7 +247,7 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt);
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
- struct tc_block_offload *opt);
+ struct flow_block_offload *opt);
int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/Makefile b/drivers/net/ethernet/netronome/nfp/bpf/Makefile
deleted file mode 100644
index 805fa28f391a..000000000000
--- a/drivers/net/ethernet/netronome/nfp/bpf/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 9b6cfa697879..0e2db6ea79e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -6,48 +6,14 @@
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
-#include <linux/wait.h>
+#include <linux/timekeeping.h>
+#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net.h"
#include "fw.h"
#include "main.h"
-#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
-
-static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
-{
- u16 used_tags;
-
- used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
-
- return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
-}
-
-static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
-{
- /* All FW communication for BPF is request-reply. To make sure we
- * don't reuse the message ID too early after timeout - limit the
- * number of requests in flight.
- */
- if (nfp_bpf_all_tags_busy(bpf)) {
- cmsg_warn(bpf, "all FW request contexts busy!\n");
- return -EAGAIN;
- }
-
- WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
- return bpf->tag_alloc_next++;
-}
-
-static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
- WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
-
- while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
- bpf->tag_alloc_last != bpf->tag_alloc_next)
- bpf->tag_alloc_last++;
-}
-
static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{
@@ -87,149 +53,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
return size;
}
-static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
-{
- struct cmsg_hdr *hdr;
-
- hdr = (struct cmsg_hdr *)skb->data;
-
- return hdr->type;
-}
-
-static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
-{
- struct cmsg_hdr *hdr;
-
- hdr = (struct cmsg_hdr *)skb->data;
-
- return be16_to_cpu(hdr->tag);
-}
-
-static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
- unsigned int msg_tag;
- struct sk_buff *skb;
-
- skb_queue_walk(&bpf->cmsg_replies, skb) {
- msg_tag = nfp_bpf_cmsg_get_tag(skb);
- if (msg_tag == tag) {
- nfp_bpf_free_tag(bpf, tag);
- __skb_unlink(skb, &bpf->cmsg_replies);
- return skb;
- }
- }
-
- return NULL;
-}
-
-static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
- struct sk_buff *skb;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- skb = __nfp_bpf_reply(bpf, tag);
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return skb;
-}
-
-static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
- struct sk_buff *skb;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- skb = __nfp_bpf_reply(bpf, tag);
- if (!skb)
- nfp_bpf_free_tag(bpf, tag);
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
- int tag)
-{
- struct sk_buff *skb;
- int i, err;
-
- for (i = 0; i < 50; i++) {
- udelay(4);
- skb = nfp_bpf_reply(bpf, tag);
- if (skb)
- return skb;
- }
-
- err = wait_event_interruptible_timeout(bpf->cmsg_wq,
- skb = nfp_bpf_reply(bpf, tag),
- msecs_to_jiffies(5000));
- /* We didn't get a response - try last time and atomically drop
- * the tag even if no response is matched.
- */
- if (!skb)
- skb = nfp_bpf_reply_drop_tag(bpf, tag);
- if (err < 0) {
- cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
- err == ERESTARTSYS ? "interrupted" : "error",
- type, err);
- return ERR_PTR(err);
- }
- if (!skb) {
- cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
- type);
- return ERR_PTR(-ETIMEDOUT);
- }
-
- return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
- enum nfp_bpf_cmsg_type type, unsigned int reply_size)
-{
- struct cmsg_hdr *hdr;
- int tag;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- tag = nfp_bpf_alloc_tag(bpf);
- if (tag < 0) {
- nfp_ctrl_unlock(bpf->app->ctrl);
- dev_kfree_skb_any(skb);
- return ERR_PTR(tag);
- }
-
- hdr = (void *)skb->data;
- hdr->ver = CMSG_MAP_ABI_VERSION;
- hdr->type = type;
- hdr->tag = cpu_to_be16(tag);
-
- __nfp_app_ctrl_tx(bpf->app, skb);
-
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
- if (IS_ERR(skb))
- return skb;
-
- hdr = (struct cmsg_hdr *)skb->data;
- if (hdr->type != __CMSG_REPLY(type)) {
- cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
- hdr->type, __CMSG_REPLY(type));
- goto err_free;
- }
- /* 0 reply_size means caller will do the validation */
- if (reply_size && skb->len != reply_size) {
- cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
- type, skb->len, reply_size);
- goto err_free;
- }
-
- return skb;
-err_free:
- dev_kfree_skb_any(skb);
- return ERR_PTR(-EIO);
-}
-
static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply)
@@ -275,8 +98,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0;
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
- sizeof(*reply));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
+ sizeof(*reply));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -310,8 +133,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
- sizeof(*reply));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
+ sizeof(*reply));
if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n");
return;
@@ -353,30 +176,151 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
}
+static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)
+{
+ return op == NFP_CCM_TYPE_BPF_MAP_UPDATE ||
+ op == NFP_CCM_TYPE_BPF_MAP_DELETE;
+}
+
+static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)
+{
+ return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP ||
+ op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
+}
+
+static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)
+{
+ return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST ||
+ op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
+}
+
+static unsigned int
+nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
+ const u8 *key, u8 *out_key, u8 *out_value,
+ u32 *cache_gen)
+{
+ struct bpf_map *map = &nfp_map->offmap->map;
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
+ unsigned int i, count, n_entries;
+ struct cmsg_reply_map_op *reply;
+
+ n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1;
+
+ spin_lock(&nfp_map->cache_lock);
+ *cache_gen = nfp_map->cache_gen;
+ if (nfp_map->cache_blockers)
+ n_entries = 1;
+
+ if (nfp_bpf_ctrl_op_cache_invalidate(op))
+ goto exit_block;
+ if (!nfp_bpf_ctrl_op_cache_capable(op))
+ goto exit_unlock;
+
+ if (!nfp_map->cache)
+ goto exit_unlock;
+ if (nfp_map->cache_to < ktime_get_ns())
+ goto exit_invalidate;
+
+ reply = (void *)nfp_map->cache->data;
+ count = be32_to_cpu(reply->count);
+
+ for (i = 0; i < count; i++) {
+ void *cached_key;
+
+ cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i);
+ if (memcmp(cached_key, key, map->key_size))
+ continue;
+
+ if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP)
+ memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i),
+ map->value_size);
+ if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) {
+ if (i + 1 == count)
+ break;
+
+ memcpy(out_key,
+ nfp_bpf_ctrl_reply_key(bpf, reply, i + 1),
+ map->key_size);
+ }
+
+ n_entries = 0;
+ goto exit_unlock;
+ }
+ goto exit_unlock;
+
+exit_block:
+ nfp_map->cache_blockers++;
+exit_invalidate:
+ dev_consume_skb_any(nfp_map->cache);
+ nfp_map->cache = NULL;
+exit_unlock:
+ spin_unlock(&nfp_map->cache_lock);
+ return n_entries;
+}
+
+static void
+nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
+ struct sk_buff *skb, u32 cache_gen)
+{
+ bool blocker, filler;
+
+ blocker = nfp_bpf_ctrl_op_cache_invalidate(op);
+ filler = nfp_bpf_ctrl_op_cache_fill(op);
+ if (blocker || filler) {
+ u64 to = 0;
+
+ if (filler)
+ to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
+
+ spin_lock(&nfp_map->cache_lock);
+ if (blocker) {
+ nfp_map->cache_blockers--;
+ nfp_map->cache_gen++;
+ }
+ if (filler && !nfp_map->cache_blockers &&
+ nfp_map->cache_gen == cache_gen) {
+ nfp_map->cache_to = to;
+ swap(nfp_map->cache, skb);
+ }
+ spin_unlock(&nfp_map->cache_lock);
+ }
+
+ dev_consume_skb_any(skb);
+}
+
static int
-nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
- enum nfp_bpf_cmsg_type op,
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+ unsigned int n_entries, reply_entries, count;
struct nfp_app_bpf *bpf = nfp_map->bpf;
struct bpf_map *map = &offmap->map;
struct cmsg_reply_map_op *reply;
struct cmsg_req_map_op *req;
struct sk_buff *skb;
+ u32 cache_gen;
int err;
/* FW messages have no space for more than 32 bits of flags */
if (flags >> 32)
return -EOPNOTSUPP;
+ /* Handle op cache */
+ n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key,
+ out_value, &cache_gen);
+ if (!n_entries)
+ return 0;
+
skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_cache_put;
+ }
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
- req->count = cpu_to_be32(1);
+ req->count = cpu_to_be32(n_entries);
req->flags = cpu_to_be32(flags);
/* Copy inputs */
@@ -386,16 +330,38 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size);
- skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
- nfp_bpf_cmsg_map_reply_size(bpf, 1));
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto err_cache_put;
+ }
+
+ if (skb->len < sizeof(*reply)) {
+ cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n",
+ op, skb->len);
+ err = -EIO;
+ goto err_free;
+ }
reply = (void *)skb->data;
+ count = be32_to_cpu(reply->count);
err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ /* FW responds with message sized to hold the good entries,
+ * plus one extra entry if there was an error.
+ */
+ reply_entries = count + !!err;
+ if (n_entries > 1 && count)
+ err = 0;
if (err)
goto err_free;
+ if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
+ cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
+ op, skb->len, reply_entries);
+ err = -EIO;
+ goto err_free;
+ }
+
/* Copy outputs */
if (out_key)
memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
@@ -404,106 +370,107 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
map->value_size);
- dev_consume_skb_any(skb);
+ nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
return 0;
err_free:
dev_kfree_skb_any(skb);
+err_cache_put:
+ nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen);
return err;
}
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
key, value, flags, NULL, NULL);
}
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
key, NULL, 0, NULL, NULL);
}
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
key, NULL, 0, NULL, value);
}
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL);
}
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
key, NULL, 0, next_key, NULL);
}
+unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf)
+{
+ return max(nfp_bpf_cmsg_map_req_size(bpf, 1),
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
+}
+
unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
{
- return max3((unsigned int)NFP_NET_DEFAULT_MTU,
- nfp_bpf_cmsg_map_req_size(bpf, 1),
- nfp_bpf_cmsg_map_reply_size(bpf, 1));
+ return max3(NFP_NET_DEFAULT_MTU,
+ nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT),
+ nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT));
+}
+
+unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf)
+{
+ unsigned int mtu, req_max, reply_max, entry_sz;
+
+ mtu = bpf->app->ctrl->dp.mtu;
+ entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz;
+ req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz;
+ reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz;
+
+ return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT);
}
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
- unsigned int tag;
if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
- goto err_free;
+ dev_kfree_skb_any(skb);
+ return;
}
- if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
+ if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
- return;
- }
-
- nfp_ctrl_lock(bpf->app->ctrl);
-
- tag = nfp_bpf_cmsg_get_tag(skb);
- if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
- cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
- tag);
- goto err_unlock;
}
- __skb_queue_tail(&bpf->cmsg_replies, skb);
- wake_up_interruptible_all(&bpf->cmsg_wq);
-
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return;
-err_unlock:
- nfp_ctrl_unlock(bpf->app->ctrl);
-err_free:
- dev_kfree_skb_any(skb);
+ nfp_ccm_rx(&bpf->ccm, skb);
}
void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
{
+ const struct nfp_ccm_hdr *hdr = data;
struct nfp_app_bpf *bpf = app->priv;
- const struct cmsg_hdr *hdr = data;
if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
return;
}
- if (hdr->type == CMSG_TYPE_BPF_EVENT)
+ if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
nfp_bpf_event_output(bpf, data, len);
else
cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 721921bcf120..a83a0ad5e27d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
+#include "../ccm.h"
/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
* our FW ABI. In that case we will do translation in the driver.
@@ -23,6 +24,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
NFP_BPF_CAP_TYPE_ABI_VERSION = 7,
+ NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT = 8,
};
struct nfp_bpf_cap_tlv_func {
@@ -52,22 +54,6 @@ struct nfp_bpf_cap_tlv_maps {
/*
* Types defined for map related control messages
*/
-#define CMSG_MAP_ABI_VERSION 1
-
-enum nfp_bpf_cmsg_type {
- CMSG_TYPE_MAP_ALLOC = 1,
- CMSG_TYPE_MAP_FREE = 2,
- CMSG_TYPE_MAP_LOOKUP = 3,
- CMSG_TYPE_MAP_UPDATE = 4,
- CMSG_TYPE_MAP_DELETE = 5,
- CMSG_TYPE_MAP_GETNEXT = 6,
- CMSG_TYPE_MAP_GETFIRST = 7,
- CMSG_TYPE_BPF_EVENT = 8,
- __CMSG_TYPE_MAP_MAX,
-};
-
-#define CMSG_TYPE_MAP_REPLY_BIT 7
-#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
/* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16
@@ -84,19 +70,13 @@ enum nfp_bpf_cmsg_status {
CMSG_RC_ERR_MAP_E2BIG = 7,
};
-struct cmsg_hdr {
- u8 type;
- u8 ver;
- __be16 tag;
-};
-
struct cmsg_reply_map_simple {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 rc;
};
struct cmsg_req_map_alloc_tbl {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 key_size; /* in bytes */
__be32 value_size; /* in bytes */
__be32 max_entries;
@@ -110,7 +90,7 @@ struct cmsg_reply_map_alloc_tbl {
};
struct cmsg_req_map_free_tbl {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 tid;
};
@@ -120,7 +100,7 @@ struct cmsg_reply_map_free_tbl {
};
struct cmsg_req_map_op {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 tid;
__be32 count;
__be32 flags;
@@ -135,7 +115,7 @@ struct cmsg_reply_map_op {
};
struct cmsg_bpf_event {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 cpu_id;
__be64 map_ptr;
__be32 data_size;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index f272247d1708..c80bb83c8ac9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -328,7 +328,18 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
return;
}
- if (sc == SHF_SC_L_SHF)
+ /* NFP shift instruction has something special. If shift direction is
+ * left then shift amount of 1 to 31 is specified as 32 minus the amount
+ * to shift.
+ *
+ * But no need to do this for indirect shift which has shift amount be
+ * 0. Even after we do this subtraction, shift amount 0 will be turned
+ * into 32 which will eventually be encoded the same as 0 because only
+ * low 5 bits are encoded, but shift amount be 32 will fail the
+ * FIELD_PREP check done later on shift mask (0x1f), due to 32 is out of
+ * mask range.
+ */
+ if (sc == SHF_SC_L_SHF && shift)
shift = 32 - shift;
insn = OP_SHF_BASE |
@@ -612,6 +623,13 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
static void
+wrp_zext(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst)
+{
+ if (meta->flags & FLAG_INSN_DO_ZEXT)
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+}
+
+static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
enum nfp_relo_type relo)
{
@@ -847,7 +865,8 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
static int
-data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
+data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg offset,
+ u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
@@ -870,14 +889,15 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
- swreg lreg, swreg rreg, int size, enum cmd_mode mode)
+data_ld_host_order(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 dst_gpr, swreg lreg, swreg rreg, int size,
+ enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
@@ -900,33 +920,34 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
- return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
- size, CMD_MODE_32b);
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, reg_a(src_gpr),
+ offset, size, CMD_MODE_32b);
}
static int
-data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
swreg rega, regb;
addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
- return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, rega, regb,
size, CMD_MODE_40b_BA);
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+construct_data_ind_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -942,10 +963,12 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
- return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+ return data_ld(nfp_prog, meta, imm_b(nfp_prog), 0, size);
}
-static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+static int
+construct_data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u8 size)
{
swreg tmp_reg;
@@ -956,7 +979,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- return data_ld(nfp_prog, tmp_reg, 0, size);
+ return data_ld(nfp_prog, meta, tmp_reg, 0, size);
}
static int
@@ -1140,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
bool clr_gpr, lmem_step step)
{
s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
- bool first = true, last;
+ bool first = true, narrow_ld, last;
bool needs_inc = false;
swreg stack_off_reg;
u8 prev_gpr = 255;
@@ -1186,14 +1209,23 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
needs_inc = true;
}
+
+ narrow_ld = clr_gpr && size < 8;
+
if (lm3) {
+ unsigned int nop_cnt;
+
emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
- /* For size < 4 one slot will be filled by zeroing of upper. */
- wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+ /* For size < 4 one slot will be filled by zeroing of upper,
+ * but be careful, that zeroing could be eliminated by zext
+ * optimization.
+ */
+ nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
+ wrp_nops(nfp_prog, nop_cnt);
}
- if (clr_gpr && size < 8)
- wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ if (narrow_ld)
+ wrp_zext(nfp_prog, meta, gpr);
while (size) {
u32 slice_end;
@@ -1294,9 +1326,10 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
- wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+ wrp_alu_imm(nfp_prog, dst, alu_op, insn->imm);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -1308,7 +1341,7 @@ wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2385,12 +2418,14 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst = meta->insn.dst_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt) {
/* Set signedness bit (MSB of result). */
@@ -2399,7 +2434,7 @@ static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
}
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2414,7 +2449,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __ashr_imm(nfp_prog, dst, umin);
+ return __ashr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
/* NOTE: the first insn will set both indirect shift amount (source A)
@@ -2423,7 +2458,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2433,15 +2468,17 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __ashr_imm(nfp_prog, dst, insn->imm);
+ return __ashr_imm(nfp_prog, meta, dst, insn->imm);
}
-static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2450,7 +2487,7 @@ static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shr_imm(nfp_prog, dst, insn->imm);
+ return __shr_imm(nfp_prog, meta, dst, insn->imm);
}
static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2463,22 +2500,24 @@ static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shr_imm(nfp_prog, dst, umin);
+ return __shr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_L_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2487,7 +2526,7 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shl_imm(nfp_prog, dst, insn->imm);
+ return __shl_imm(nfp_prog, meta, dst, insn->imm);
}
static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2500,11 +2539,11 @@ static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shl_imm(nfp_prog, dst, umin);
+ return __shl_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
shl_reg64_lt32_low(nfp_prog, dst, src);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2566,34 +2605,34 @@ static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 1);
}
static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 2);
}
static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 4);
}
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 4);
}
@@ -2671,7 +2710,7 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr32(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2683,7 +2722,7 @@ mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr40(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2744,7 +2783,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
if (!len_mid) {
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
@@ -2752,7 +2791,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
if (size <= REG_WIDTH) {
wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 2);
@@ -2783,10 +2822,10 @@ mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
if (size < REG_WIDTH) {
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else if (size == REG_WIDTH) {
wrp_mov(nfp_prog, dst_lo, src_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 1);
@@ -3913,7 +3952,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta1, *meta2;
- const s32 exp_mask[] = {
+ static const s32 exp_mask[] = {
[BPF_B] = 0x000000ffU,
[BPF_H] = 0x0000ffffU,
[BPF_W] = 0xffffffffU,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 275de9f4c61c..8f732771d3fa 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -160,35 +160,19 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return 0;
}
-static int nfp_bpf_setup_tc_block(struct net_device *netdev,
- struct tc_block_offload *f)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_bpf_setup_tc_block_cb,
- nn, nn, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- nfp_bpf_setup_tc_block_cb,
- nn);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(nfp_bpf_block_cb_list);
static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
+ struct nfp_net *nn = netdev_priv(netdev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return nfp_bpf_setup_tc_block(netdev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &nfp_bpf_block_cb_list,
+ nfp_bpf_setup_tc_block_cb,
+ nn, nn, true);
default:
return -EOPNOTSUPP;
}
@@ -316,6 +300,14 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
}
static int
+nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ bpf->cmsg_multi_ent = true;
+ return 0;
+}
+
+static int
nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
u32 length)
{
@@ -391,6 +383,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT:
+ if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -431,6 +428,25 @@ static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
}
+static int nfp_bpf_start(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) {
+ nfp_err(bpf->app->cpp,
+ "ctrl channel MTU below min required %u < %u\n",
+ app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf));
+ return -EINVAL;
+ }
+
+ if (bpf->cmsg_multi_ent)
+ bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf);
+ else
+ bpf->cmsg_cache_cnt = 1;
+
+ return 0;
+}
+
static int nfp_bpf_init(struct nfp_app *app)
{
struct nfp_app_bpf *bpf;
@@ -442,14 +458,16 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app;
app->priv = bpf;
- skb_queue_head_init(&bpf->cmsg_replies);
- init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list);
- err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+ err = nfp_ccm_init(&bpf->ccm, app);
if (err)
goto err_free_bpf;
+ err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+ if (err)
+ goto err_clean_ccm;
+
nfp_bpf_init_capabilities(bpf);
err = nfp_bpf_parse_capabilities(app);
@@ -474,6 +492,8 @@ static int nfp_bpf_init(struct nfp_app *app)
err_free_neutral_maps:
rhashtable_destroy(&bpf->maps_neutral);
+err_clean_ccm:
+ nfp_ccm_clean(&bpf->ccm);
err_free_bpf:
kfree(bpf);
return err;
@@ -484,7 +504,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_destroy(bpf->bpf_dev);
- WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ nfp_ccm_clean(&bpf->ccm);
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
rhashtable_free_and_destroy(&bpf->maps_neutral,
@@ -500,6 +520,7 @@ const struct nfp_app_type app_bpf = {
.init = nfp_bpf_init,
.clean = nfp_bpf_clean,
+ .start = nfp_bpf_start,
.check_mtu = nfp_bpf_check_mtu,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index b25a48218bcf..fac9c6f9e197 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/wait.h>
+#include "../ccm.h"
#include "../nfp_asm.h"
#include "fw.h"
@@ -84,16 +85,10 @@ enum pkt_vec {
/**
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
+ * @ccm: common control message handler data
*
* @bpf_dev: BPF offload device handle
*
- * @tag_allocator: bitmap of control message tags in use
- * @tag_alloc_next: next tag bit to allocate
- * @tag_alloc_last: next tag bit to be freed
- *
- * @cmsg_replies: received cmsg replies waiting to be consumed
- * @cmsg_wq: work queue for waiting for cmsg replies
- *
* @cmsg_key_sz: size of key in cmsg element array
* @cmsg_val_sz: size of value in cmsg element array
*
@@ -104,6 +99,7 @@ enum pkt_vec {
* @maps_neutral: hash table of offload-neutral maps (on pointer)
*
* @abi_version: global BPF ABI version
+ * @cmsg_cache_cnt: number of entries to read for caching
*
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
@@ -129,22 +125,19 @@ enum pkt_vec {
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
* @queue_select: BPF can set the RX queue ID in packet vector
* @adjust_tail: BPF can simply trunc packet size for adjust tail
+ * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ struct nfp_ccm ccm;
struct bpf_offload_dev *bpf_dev;
- DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
- u16 tag_alloc_next;
- u16 tag_alloc_last;
-
- struct sk_buff_head cmsg_replies;
- struct wait_queue_head cmsg_wq;
-
unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz;
+ unsigned int cmsg_cache_cnt;
+
struct list_head map_list;
unsigned int maps_in_use;
unsigned int map_elems_in_use;
@@ -180,6 +173,7 @@ struct nfp_app_bpf {
bool pseudo_random;
bool queue_select;
bool adjust_tail;
+ bool cmsg_multi_ent;
};
enum nfp_bpf_map_use {
@@ -194,11 +188,21 @@ struct nfp_bpf_map_word {
unsigned char non_zero_update :1;
};
+#define NFP_BPF_MAP_CACHE_CNT 4U
+#define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000)
+
/**
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload
* @offmap: pointer to the offloaded BPF map
* @bpf: back pointer to bpf app private structure
* @tid: table id identifying map on datapath
+ *
+ * @cache_lock: protects @cache_blockers, @cache_to, @cache
+ * @cache_blockers: number of ops in flight which block caching
+ * @cache_gen: counter incremented by every blocker on exit
+ * @cache_to: time when cache will no longer be valid (ns)
+ * @cache: skb with cached response
+ *
* @l: link on the nfp_app_bpf->map_list list
* @use_map: map of how the value is used (in 4B chunks)
*/
@@ -206,6 +210,13 @@ struct nfp_bpf_map {
struct bpf_offloaded_map *offmap;
struct nfp_app_bpf *bpf;
u32 tid;
+
+ spinlock_t cache_lock;
+ u32 cache_blockers;
+ u32 cache_gen;
+ u64 cache_to;
+ struct sk_buff *cache;
+
struct list_head l;
struct nfp_bpf_map_word use_map[];
};
@@ -249,6 +260,8 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
/* Instruction is optimized by the verifier */
#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
+/* Instruction needs to zero extend to high 32-bit */
+#define FLAG_INSN_DO_ZEXT BIT(6)
#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
FLAG_INSN_SKIP_PREC_DEPENDENT | \
@@ -573,7 +586,9 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf);
unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
+unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf);
long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
void
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 15dce97650a5..95a0d3910e31 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -22,6 +22,7 @@
#include <net/tc_act/tc_mirred.h>
#include "main.h"
+#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
@@ -45,9 +46,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
/* Grab a single ref to the map for our record. The prog destroy ndo
* happens after free_used_maps().
*/
- map = bpf_map_inc(map, false);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ bpf_map_inc(map);
record = kmalloc(sizeof(*record), GFP_KERNEL);
if (!record) {
@@ -384,6 +383,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
offmap->dev_priv = nfp_map;
nfp_map->offmap = offmap;
nfp_map->bpf = bpf;
+ spin_lock_init(&nfp_map->cache_lock);
res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
if (res < 0) {
@@ -406,6 +406,8 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
nfp_bpf_ctrl_free_map(bpf, nfp_map);
+ dev_consume_skb_any(nfp_map->cache);
+ WARN_ON_ONCE(nfp_map->cache_blockers);
list_del_init(&nfp_map->l);
bpf->map_elems_in_use -= offmap->map.max_entries;
bpf->maps_in_use--;
@@ -452,12 +454,12 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
- if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
+ if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
rcu_read_lock();
- record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
- nfp_bpf_maps_neutral_params);
+ record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
+ nfp_bpf_maps_neutral_params);
if (!record || map_id_full > U32_MAX) {
rcu_read_unlock();
cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 36f56eb4cbe2..e92ee510fd52 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -744,6 +744,17 @@ continue_subprog:
goto continue_subprog;
}
+static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog,
+ struct bpf_insn_aux_data *aux)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (aux[meta->n].zext_dst)
+ meta->flags |= FLAG_INSN_DO_ZEXT;
+ }
+}
+
int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
@@ -784,6 +795,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return -EOPNOTSUPP;
}
+ nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
new file mode 100644
index 000000000000..71afd111bae3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#include <linux/bitops.h>
+
+#include "ccm.h"
+#include "nfp_app.h"
+#include "nfp_net.h"
+
+#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
+
+#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
+{
+ u16 used_tags;
+
+ used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
+
+ return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
+}
+
+static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
+{
+ /* CCM is for FW communication which is request-reply. To make sure
+ * we don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
+ ccm_warn(ccm->app, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
+ return ccm->tag_alloc_next++;
+}
+
+static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
+
+ while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
+ ccm->tag_alloc_last != ccm->tag_alloc_next)
+ ccm->tag_alloc_last++;
+}
+
+static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&ccm->replies, skb) {
+ msg_tag = nfp_ccm_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_ccm_free_tag(ccm, tag);
+ __skb_unlink(skb, &ccm->replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *
+nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(app->ctrl);
+ skb = __nfp_ccm_reply(ccm, tag);
+ nfp_ctrl_unlock(app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(app->ctrl);
+ skb = __nfp_ccm_reply(ccm, tag);
+ if (!skb)
+ nfp_ccm_free_tag(ccm, tag);
+ nfp_ctrl_unlock(app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
+ enum nfp_ccm_type type, int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_ccm_reply(ccm, app, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(ccm->wq,
+ skb = nfp_ccm_reply(ccm, app,
+ tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
+ if (err < 0) {
+ ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int reply_size)
+{
+ struct nfp_app *app = ccm->app;
+ struct nfp_ccm_hdr *hdr;
+ int reply_type, tag;
+
+ nfp_ctrl_lock(app->ctrl);
+ tag = nfp_ccm_alloc_tag(ccm);
+ if (tag < 0) {
+ nfp_ctrl_unlock(app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(app, skb);
+
+ nfp_ctrl_unlock(app->ctrl);
+
+ skb = nfp_ccm_wait_reply(ccm, app, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ reply_type = nfp_ccm_get_type(skb);
+ if (reply_type != __NFP_CCM_REPLY(type)) {
+ ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ reply_type, __NFP_CCM_REPLY(type));
+ goto err_free;
+ }
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
+ type, skb->len, reply_size);
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
+{
+ struct nfp_app *app = ccm->app;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
+ ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(app->ctrl);
+
+ tag = nfp_ccm_get_tag(skb);
+ if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
+ ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&ccm->replies, skb);
+ wake_up_interruptible_all(&ccm->wq);
+
+ nfp_ctrl_unlock(app->ctrl);
+ return;
+
+err_unlock:
+ nfp_ctrl_unlock(app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
+{
+ ccm->app = app;
+ skb_queue_head_init(&ccm->replies);
+ init_waitqueue_head(&ccm->wq);
+ return 0;
+}
+
+void nfp_ccm_clean(struct nfp_ccm *ccm)
+{
+ WARN_ON(!skb_queue_empty(&ccm->replies));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
new file mode 100644
index 000000000000..a460c75522be
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CCM_H
+#define NFP_CCM_H 1
+
+#include <linux/bitmap.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+struct nfp_app;
+struct nfp_net;
+
+/* Firmware ABI */
+
+enum nfp_ccm_type {
+ NFP_CCM_TYPE_BPF_MAP_ALLOC = 1,
+ NFP_CCM_TYPE_BPF_MAP_FREE = 2,
+ NFP_CCM_TYPE_BPF_MAP_LOOKUP = 3,
+ NFP_CCM_TYPE_BPF_MAP_UPDATE = 4,
+ NFP_CCM_TYPE_BPF_MAP_DELETE = 5,
+ NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
+ NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
+ NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
+ NFP_CCM_TYPE_CRYPTO_RESET = 9,
+ NFP_CCM_TYPE_CRYPTO_ADD = 10,
+ NFP_CCM_TYPE_CRYPTO_DEL = 11,
+ NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
+ __NFP_CCM_TYPE_MAX,
+};
+
+#define NFP_CCM_ABI_VERSION 1
+
+#define NFP_CCM_TYPE_REPLY_BIT 7
+#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
+struct nfp_ccm_hdr {
+ union {
+ struct {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+ };
+ __be32 raw;
+ };
+};
+
+static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ hdr = (struct nfp_ccm_hdr *)skb->data;
+
+ return hdr->type;
+}
+
+static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ hdr = (struct nfp_ccm_hdr *)skb->data;
+
+ return hdr->tag;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ return be16_to_cpu(__nfp_ccm_get_tag(skb));
+}
+
+#define NFP_NET_MBOX_TLV_TYPE GENMASK(31, 16)
+#define NFP_NET_MBOX_TLV_LEN GENMASK(15, 0)
+
+enum nfp_ccm_mbox_tlv_type {
+ NFP_NET_MBOX_TLV_TYPE_UNKNOWN = 0,
+ NFP_NET_MBOX_TLV_TYPE_END = 1,
+ NFP_NET_MBOX_TLV_TYPE_MSG = 2,
+ NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP = 3,
+ NFP_NET_MBOX_TLV_TYPE_RESV = 4,
+};
+
+/* Implementation */
+
+/**
+ * struct nfp_ccm - common control message handling
+ * @app: APP handle
+ *
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @replies: received cmsg replies waiting to be consumed
+ * @wq: work queue for waiting for cmsg replies
+ */
+struct nfp_ccm {
+ struct nfp_app *app;
+
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head replies;
+ wait_queue_head_t wq;
+};
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
+void nfp_ccm_clean(struct nfp_ccm *ccm);
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int reply_size);
+
+int nfp_ccm_mbox_alloc(struct nfp_net *nn);
+void nfp_ccm_mbox_free(struct nfp_net *nn);
+int nfp_ccm_mbox_init(struct nfp_net *nn);
+void nfp_ccm_mbox_clean(struct nfp_net *nn);
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
+struct sk_buff *
+nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags);
+int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size, bool critical);
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size);
+int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int max_reply_size);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
new file mode 100644
index 000000000000..f0783aa9e66e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+
+#include "ccm.h"
+#include "nfp_net.h"
+
+/* CCM messages via the mailbox. CMSGs get wrapped into simple TLVs
+ * and copied into the mailbox. Multiple messages can be copied to
+ * form a batch. Threads come in with CMSG formed in an skb, then
+ * enqueue that skb onto the request queue. If threads skb is first
+ * in queue this thread will handle the mailbox operation. It copies
+ * up to 64 messages into the mailbox (making sure that both requests
+ * and replies will fit. After FW is done processing the batch it
+ * copies the data out and wakes waiting threads.
+ * If a thread is waiting it either gets its the message completed
+ * (response is copied into the same skb as the request, overwriting
+ * it), or becomes the first in queue.
+ * Completions and next-to-run are signaled via the control buffer
+ * to limit potential cache line bounces.
+ */
+
+#define NFP_CCM_MBOX_BATCH_LIMIT 64
+#define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
+#define NFP_CCM_MAX_QLEN 1024
+
+enum nfp_net_mbox_cmsg_state {
+ NFP_NET_MBOX_CMSG_STATE_QUEUED,
+ NFP_NET_MBOX_CMSG_STATE_NEXT,
+ NFP_NET_MBOX_CMSG_STATE_BUSY,
+ NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
+ NFP_NET_MBOX_CMSG_STATE_DONE,
+};
+
+/**
+ * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
+ * @state: processing state (/stage) of the message
+ * @err: error encountered during processing if any
+ * @max_len: max(request_len, reply_len)
+ * @exp_reply: expected reply length (0 means don't validate)
+ * @posted: the message was posted and nobody waits for the reply
+ */
+struct nfp_ccm_mbox_cmsg_cb {
+ enum nfp_net_mbox_cmsg_state state;
+ int err;
+ unsigned int max_len;
+ unsigned int exp_reply;
+ bool posted;
+};
+
+static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
+{
+ return round_down(nn->tlv_caps.mbox_len, 4) -
+ NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */
+ 4 * 2; /* Msg TLV plus End TLV headers */
+}
+
+static void
+nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
+ cb->err = 0;
+ cb->max_len = max_len;
+ cb->exp_reply = exp_reply;
+ cb->posted = false;
+}
+
+static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->max_len;
+}
+
+static bool nfp_ccm_mbox_done(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
+}
+
+static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
+ cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
+}
+
+static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->posted;
+}
+
+static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->posted = true;
+}
+
+static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
+{
+ return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
+}
+
+static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (!skb)
+ return;
+
+ cb = (void *)skb->cb;
+ cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
+ if (cb->posted)
+ queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work);
+}
+
+static void
+nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
+{
+ nn_writel(nn, off,
+ FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
+ FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
+}
+
+static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+ int reserve, i, cnt;
+ __be32 *data;
+ u32 off, len;
+
+ off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
+ skb->len);
+ off += 4;
+
+ /* Write data word by word, skb->data should be aligned */
+ data = (__be32 *)skb->data;
+ cnt = skb->len / 4;
+ for (i = 0 ; i < cnt; i++) {
+ nn_writel(nn, off, be32_to_cpu(data[i]));
+ off += 4;
+ }
+ if (skb->len & 3) {
+ __be32 tmp = 0;
+
+ memcpy(&tmp, &data[i], skb->len & 3);
+ nn_writel(nn, off, be32_to_cpu(tmp));
+ off += 4;
+ }
+
+ /* Reserve space if reply is bigger */
+ len = round_up(skb->len, 4);
+ reserve = nfp_ccm_mbox_maxlen(skb) - len;
+ if (reserve > 0) {
+ nfp_ccm_mbox_write_tlv(nn, off,
+ NFP_NET_MBOX_TLV_TYPE_RESV,
+ reserve);
+ off += 4 + reserve;
+ }
+
+ if (skb == last)
+ break;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
+}
+
+static struct sk_buff *
+nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ if (__nfp_ccm_get_tag(skb) == tag)
+ return skb;
+
+ if (skb == last)
+ return NULL;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+}
+
+static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ u8 __iomem *data, *end;
+ struct sk_buff *skb;
+
+ data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ end = data + nn->tlv_caps.mbox_len;
+
+ while (true) {
+ unsigned int length, offset, type;
+ struct nfp_ccm_hdr hdr;
+ u32 tlv_hdr;
+
+ tlv_hdr = readl(data);
+ type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
+ length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
+ offset = data - nn->dp.ctrl_bar;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (data + length > end) {
+ nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_END)
+ break;
+ if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
+ goto next_tlv;
+ if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
+ type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (length < 4) {
+ nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ hdr.raw = cpu_to_be32(readl(data));
+
+ skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
+ if (!skb) {
+ nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
+ be16_to_cpu(hdr.tag));
+ break;
+ }
+ cb = (void *)skb->cb;
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp,
+ "mailbox msg not supported type:%d\n",
+ nfp_ccm_get_type(skb));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
+ hdr.type,
+ __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (cb->exp_reply && length != cb->exp_reply) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
+ hdr.type, length, cb->exp_reply);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (length > cb->max_len) {
+ nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
+ hdr.type, cb->max_len, length);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (!cb->posted) {
+ __be32 *skb_data;
+ int i, cnt;
+
+ if (length <= skb->len)
+ __skb_trim(skb, length);
+ else
+ skb_put(skb, length - skb->len);
+
+ /* We overcopy here slightly, but that's okay,
+ * the skb is large enough, and the garbage will
+ * be ignored (beyond skb->len).
+ */
+ skb_data = (__be32 *)skb->data;
+ memcpy(skb_data, &hdr, 4);
+
+ cnt = DIV_ROUND_UP(length, 4);
+ for (i = 1 ; i < cnt; i++)
+ skb_data[i] = cpu_to_be32(readl(data + i * 4));
+ }
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
+next_tlv:
+ data += round_up(length, 4);
+ if (data + 4 > end) {
+ nn_dp_warn(&nn->dp,
+ "reached end of MBOX without END TLV\n");
+ break;
+ }
+ }
+
+ smp_wmb(); /* order the skb->data vs. cb->state */
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
+ cb->err = -ENOENT;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ }
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+
+ if (cb->posted) {
+ if (cb->err)
+ nn_dp_warn(&nn->dp,
+ "mailbox posted msg failed type:%u err:%d\n",
+ nfp_ccm_get_type(skb), cb->err);
+ dev_consume_skb_any(skb);
+ }
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void
+nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ cb->err = err;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct sk_buff *skb, *last;
+ int cnt, err;
+
+ space -= 4; /* for End TLV */
+
+ /* First skb must fit, because it's ours and we checked it fits */
+ cnt = 1;
+ last = skb = __skb_peek(&nn->mbox_cmsg.queue);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+
+ while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+ if (space < 0)
+ break;
+ last = skb;
+ nfp_ccm_mbox_set_busy(skb);
+ cnt++;
+ if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
+ break;
+ }
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ /* Now we own all skb's marked in progress, new requests may arrive
+ * at the end of the queue.
+ */
+
+ nn_ctrl_bar_lock(nn);
+
+ nfp_ccm_mbox_copy_in(nn, last);
+
+ err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, last);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
+
+ nn_ctrl_bar_unlock(nn);
+
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ if (cb->err)
+ dev_kfree_skb_any(skb);
+ return cb->err;
+}
+
+/* If wait timed out but the command is already in progress we have
+ * to wait until it finishes. Runners has ownership of the skbs marked
+ * as busy.
+ */
+static int
+nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ bool was_first;
+
+ if (nfp_ccm_mbox_in_progress(skb)) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
+ smp_rmb(); /* pairs with smp_wmb() after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ was_first = nfp_ccm_mbox_should_run(nn, skb);
+ __skb_unlink(skb, &nn->mbox_cmsg.queue);
+ if (was_first)
+ nfp_ccm_mbox_mark_next_runner(nn);
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (was_first)
+ wake_up_all(&nn->mbox_cmsg.wq);
+
+ nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
+ type);
+ return -ETIMEDOUT;
+}
+
+static int
+nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size, unsigned int max_reply_size,
+ gfp_t flags)
+{
+ const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
+ unsigned int max_len;
+ ssize_t undersize;
+ int err;
+
+ if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
+ nn_dp_warn(&nn->dp,
+ "message type %d not supported by mailbox\n", type);
+ return -EINVAL;
+ }
+
+ /* If the reply size is unknown assume it will take the entire
+ * mailbox, the callers should do their best for this to never
+ * happen.
+ */
+ if (!max_reply_size)
+ max_reply_size = mbox_max;
+ max_reply_size = round_up(max_reply_size, 4);
+
+ /* Make sure we can fit the entire reply into the skb,
+ * and that we don't have to slow down the mbox handler
+ * with allocations.
+ */
+ undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
+ if (undersize > 0) {
+ err = pskb_expand_head(skb, 0, undersize, flags);
+ if (err) {
+ nn_dp_warn(&nn->dp,
+ "can't allocate reply buffer for mailbox\n");
+ return err;
+ }
+ }
+
+ /* Make sure that request and response both fit into the mailbox */
+ max_len = max(max_reply_size, round_up(skb->len, 4));
+ if (max_len > mbox_max) {
+ nn_dp_warn(&nn->dp,
+ "message too big for tha mailbox: %u/%u vs %u\n",
+ skb->len, max_reply_size, mbox_max);
+ return -EMSGSIZE;
+ }
+
+ nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
+
+ return 0;
+}
+
+static int
+nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, bool critical)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ assert_spin_locked(&nn->mbox_cmsg.queue.lock);
+
+ if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
+ nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
+ return -EBUSY;
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
+
+ __skb_queue_tail(&nn->mbox_cmsg.queue, skb);
+
+ return 0;
+}
+
+int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size, bool critical)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
+ max_reply_size, GFP_KERNEL);
+ if (err)
+ goto err_free_skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
+ if (err)
+ goto err_unlock;
+
+ /* First in queue takes the mailbox lock and processes the batch */
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ bool to;
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ to = !wait_event_timeout(nn->mbox_cmsg.wq,
+ nfp_ccm_mbox_done(skb) ||
+ nfp_ccm_mbox_should_run(nn, skb),
+ msecs_to_jiffies(NFP_CCM_TIMEOUT));
+
+ /* fast path for those completed by another thread */
+ if (nfp_ccm_mbox_done(skb)) {
+ smp_rmb(); /* pairs with wmb after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ WARN_ON(!to);
+
+ err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
+ if (err)
+ goto err_free_skb;
+ return 0;
+ }
+ }
+
+ /* run queue expects the lock held */
+ nfp_ccm_mbox_run_queue_unlock(nn);
+ return nfp_ccm_mbox_skb_return(skb);
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size)
+{
+ return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
+ max_reply_size, false);
+}
+
+static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct nfp_net *nn;
+
+ nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
+ !nfp_ccm_mbox_should_run(nn, skb))) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+ return;
+ }
+
+ nfp_ccm_mbox_run_queue_unlock(nn);
+}
+
+static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct nfp_net *nn;
+ int err;
+
+ nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
+ /* Should never happen so it's unclear what to do here.. */
+ goto exit_unlock_wake;
+
+ err = nfp_net_mbox_reconfig_wait_posted(nn);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, skb);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
+exit_unlock_wake:
+ nn_ctrl_bar_unlock(nn);
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int max_reply_size)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
+ GFP_ATOMIC);
+ if (err)
+ goto err_free_skb;
+
+ nfp_ccm_mbox_mark_posted(skb);
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
+ if (err)
+ goto err_unlock;
+
+ if (nfp_ccm_mbox_is_first(nn, skb)) {
+ if (nn_ctrl_bar_trylock(nn)) {
+ nfp_ccm_mbox_copy_in(nn, skb);
+ nfp_net_mbox_reconfig_post(nn,
+ NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ queue_work(nn->mbox_cmsg.workq,
+ &nn->mbox_cmsg.wait_work);
+ } else {
+ nfp_ccm_mbox_mark_next_runner(nn);
+ }
+ }
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+struct sk_buff *
+nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags)
+{
+ unsigned int max_size;
+ struct sk_buff *skb;
+
+ if (!reply_size)
+ max_size = nfp_ccm_mbox_max_msg(nn);
+ else
+ max_size = max(req_size, reply_size);
+ max_size = round_up(max_size, 4);
+
+ skb = alloc_skb(max_size, flags);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, req_size);
+
+ return skb;
+}
+
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
+{
+ return nfp_ccm_mbox_max_msg(nn) >= size;
+}
+
+int nfp_ccm_mbox_init(struct nfp_net *nn)
+{
+ return 0;
+}
+
+void nfp_ccm_mbox_clean(struct nfp_net *nn)
+{
+ drain_workqueue(nn->mbox_cmsg.workq);
+}
+
+int nfp_ccm_mbox_alloc(struct nfp_net *nn)
+{
+ skb_queue_head_init(&nn->mbox_cmsg.queue);
+ init_waitqueue_head(&nn->mbox_cmsg.wq);
+ INIT_WORK(&nn->mbox_cmsg.wait_work, nfp_ccm_mbox_post_wait_work);
+ INIT_WORK(&nn->mbox_cmsg.runq_work, nfp_ccm_mbox_post_runq_work);
+
+ nn->mbox_cmsg.workq = alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND, 0);
+ if (!nn->mbox_cmsg.workq)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfp_ccm_mbox_free(struct nfp_net *nn)
+{
+ destroy_workqueue(nn->mbox_cmsg.workq);
+ WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
new file mode 100644
index 000000000000..60372ddf69f0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_H
+#define NFP_CRYPTO_H 1
+
+struct nfp_net_tls_offload_ctx {
+ __be32 fw_handle[2];
+
+ u8 rx_end[0];
+ /* Tx only fields follow - Rx side does not have enough driver state
+ * to fit these
+ */
+
+ u32 next_seq;
+};
+
+#ifdef CONFIG_TLS_DEVICE
+int nfp_net_tls_init(struct nfp_net *nn);
+#else
+static inline int nfp_net_tls_init(struct nfp_net *nn)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
new file mode 100644
index 000000000000..67413d946c4a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_FW_H
+#define NFP_CRYPTO_FW_H 1
+
+#include "../ccm.h"
+
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+
+struct nfp_crypto_reply_simple {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+};
+
+struct nfp_crypto_req_reset {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+};
+
+#define NFP_NET_TLS_IPVER GENMASK(15, 12)
+#define NFP_NET_TLS_VLAN GENMASK(11, 0)
+#define NFP_NET_TLS_VLAN_UNUSED 4095
+
+struct nfp_crypto_req_add_front {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ u8 key_len;
+ __be16 ipver_vlan __packed;
+ u8 l4_proto;
+#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
+ u8 l3_addrs[0];
+};
+
+struct nfp_crypto_req_add_back {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 key[8];
+ __be32 salt;
+ __be32 iv[2];
+ __be32 counter;
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+
+struct nfp_crypto_req_add_v4 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip;
+ __be32 dst_ip;
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_req_add_v6 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_reply_add {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_del {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_update {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ __be32 handle[2];
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
new file mode 100644
index 000000000000..96a96b35c0ca
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <net/tls.h>
+
+#include "../ccm.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+#include "fw.h"
+
+#define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
+ (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
+
+#define NFP_NET_TLS_OPCODE_MASK_RX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
+
+#define NFP_NET_TLS_OPCODE_MASK_TX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
+
+#define NFP_NET_TLS_OPCODE_MASK \
+ (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
+
+static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
+{
+ u32 off, val;
+
+ off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
+
+ val = nn_readl(nn, off);
+ if (on)
+ val |= BIT(opcode & 31);
+ else
+ val &= ~BIT(opcode & 31);
+ nn_writel(nn, off, val);
+}
+
+static bool
+__nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 opcode;
+ int cnt;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ nn->ktls_tx_conn_cnt += add;
+ cnt = nn->ktls_tx_conn_cnt;
+ nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
+ } else {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ nn->ktls_rx_conn_cnt += add;
+ cnt = nn->ktls_rx_conn_cnt;
+ }
+
+ /* Care only about 0 -> 1 and 1 -> 0 transitions */
+ if (cnt > 1)
+ return false;
+
+ nfp_net_crypto_set_op(nn, opcode, cnt);
+ return true;
+}
+
+static int
+nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ int ret = 0;
+
+ /* Use the BAR lock to protect the connection counts */
+ nn_ctrl_bar_lock(nn);
+ if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ /* Undo the cnt adjustment if failed */
+ if (ret)
+ __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
+ }
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+static int
+nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
+}
+
+static int
+nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
+}
+
+static struct sk_buff *
+nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
+{
+ return nfp_ccm_mbox_msg_alloc(nn, req_sz,
+ sizeof(struct nfp_crypto_reply_simple),
+ flags);
+}
+
+static int
+nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
+ const char *name, enum nfp_ccm_type type)
+{
+ struct nfp_crypto_reply_simple *reply;
+ int err;
+
+ err = __nfp_ccm_mbox_communicate(nn, skb, type,
+ sizeof(*reply), sizeof(*reply),
+ type == NFP_CCM_TYPE_CRYPTO_DEL);
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
+ return err;
+ }
+
+ reply = (void *)skb->data;
+ err = -be32_to_cpu(reply->error);
+ if (err)
+ nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
+ name, err);
+ dev_consume_skb_any(skb);
+
+ return err;
+}
+
+static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
+{
+ struct nfp_crypto_req_del *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ memcpy(req->handle, fw_handle, sizeof(req->handle));
+
+ nfp_net_tls_communicate_simple(nn, skb, "delete",
+ NFP_CCM_TYPE_CRYPTO_DEL);
+}
+
+static void
+nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
+{
+ front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
+ FIELD_PREP(NFP_NET_TLS_VLAN,
+ NFP_NET_TLS_VLAN_UNUSED));
+}
+
+static void
+nfp_net_tls_assign_conn_id(struct nfp_net *nn,
+ struct nfp_crypto_req_add_front *front)
+{
+ u32 len;
+ u64 id;
+
+ id = atomic64_inc_return(&nn->ktls_conn_id_gen);
+ len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
+
+ memcpy(front->l3_addrs, &id, sizeof(id));
+ memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
+ struct sock *sk, int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ req->front.key_len += sizeof(__be32) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ req->src_ip = inet->inet_daddr;
+ req->dst_ip = inet->inet_saddr;
+ }
+
+ return &req->back;
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
+ struct sock *sk, int direction)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ req->front.key_len += sizeof(struct in6_addr) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
+ memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
+ }
+
+#endif
+ return &req->back;
+}
+
+static void
+nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
+ struct nfp_crypto_req_add_back *back, struct sock *sk,
+ int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ front->l4_proto = IPPROTO_TCP;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ back->src_port = 0;
+ back->dst_port = 0;
+ } else {
+ back->src_port = inet->inet_dport;
+ back->dst_port = inet->inet_sport;
+ }
+}
+
+static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
+{
+ switch (direction) {
+ case TLS_OFFLOAD_CTX_DIR_TX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ case TLS_OFFLOAD_CTX_DIR_RX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static bool
+nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 bit;
+
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ else
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ break;
+ default:
+ return false;
+ }
+
+ return nn->tlv_caps.crypto_ops & BIT(bit);
+}
+
+static int
+nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *tls_ci;
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_crypto_req_add_front *front;
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_add_back *back;
+ struct nfp_crypto_reply_add *reply;
+ struct sk_buff *skb;
+ size_t req_sz;
+ void *req;
+ bool ipv6;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
+ TLS_DRIVER_STATE_SIZE_TX);
+ BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
+ TLS_DRIVER_STATE_SIZE_RX);
+
+ if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
+ return -EOPNOTSUPP;
+
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ req_sz = sizeof(struct nfp_crypto_req_add_v6);
+ ipv6 = true;
+ break;
+ }
+#endif
+ /* fall through */
+ case AF_INET:
+ req_sz = sizeof(struct nfp_crypto_req_add_v4);
+ ipv6 = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_net_tls_conn_add(nn, direction);
+ if (err)
+ return err;
+
+ skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_conn_remove;
+ }
+
+ front = (void *)skb->data;
+ front->ep_id = 0;
+ front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
+ front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(front->resv, 0, sizeof(front->resv));
+
+ nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
+
+ req = (void *)skb->data;
+ if (ipv6)
+ back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
+ else
+ back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
+
+ nfp_net_tls_set_l4(front, back, sk, direction);
+
+ back->counter = 0;
+ back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
+
+ tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
+ sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
+
+ /* Get an extra ref on the skb so we can wipe the key after */
+ skb_get(skb);
+
+ err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
+ sizeof(*reply), sizeof(*reply));
+ reply = (void *)skb->data;
+
+ /* We depend on CCM MBOX code not reallocating skb we sent
+ * so we can clear the key material out of the memory.
+ */
+ if (!WARN_ON_ONCE((u8 *)back < skb->head ||
+ (u8 *)back > skb_end_pointer(skb)) &&
+ !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
+ memzero_explicit(back, sizeof(*back));
+ dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
+
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
+ err, direction == TLS_OFFLOAD_CTX_DIR_TX);
+ /* communicate frees skb on error */
+ goto err_conn_remove;
+ }
+
+ err = -be32_to_cpu(reply->error);
+ if (err) {
+ if (err == -ENOSPC) {
+ if (!atomic_fetch_inc(&nn->ktls_no_space))
+ nn_info(nn, "HW TLS table full\n");
+ } else {
+ nn_dp_warn(&nn->dp,
+ "failed to add TLS, FW replied: %d\n", err);
+ }
+ goto err_free_skb;
+ }
+
+ if (!reply->handle[0] && !reply->handle[1]) {
+ nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
+ err = -EINVAL;
+ goto err_fw_remove;
+ }
+
+ ntls = tls_driver_ctx(sk, direction);
+ memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ ntls->next_seq = start_offload_tcp_sn;
+ dev_consume_skb_any(skb);
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return 0;
+
+ tls_offload_rx_resync_set_type(sk,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ return 0;
+
+err_fw_remove:
+ nfp_net_tls_del_fw(nn, reply->handle);
+err_free_skb:
+ dev_consume_skb_any(skb);
+err_conn_remove:
+ nfp_net_tls_conn_remove(nn, direction);
+ return err;
+}
+
+static void
+nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+
+ nfp_net_tls_conn_remove(nn, direction);
+
+ ntls = __tls_driver_ctx(tls_ctx, direction);
+ nfp_net_tls_del_fw(nn, ntls->fw_handle);
+}
+
+static int
+nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_update *req;
+ struct sk_buff *skb;
+ gfp_t flags;
+ int err;
+
+ flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
+ if (!skb)
+ return -ENOMEM;
+
+ ntls = tls_driver_ctx(sk, direction);
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(req->resv, 0, sizeof(req->resv));
+ memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ req->tcp_seq = cpu_to_be32(seq);
+ memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync",
+ NFP_CCM_TYPE_CRYPTO_UPDATE);
+ if (err)
+ return err;
+ ntls->next_seq = seq;
+ } else {
+ nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ sizeof(struct nfp_crypto_reply_simple));
+ }
+
+ return 0;
+}
+
+static const struct tlsdev_ops nfp_net_tls_ops = {
+ .tls_dev_add = nfp_net_tls_add,
+ .tls_dev_del = nfp_net_tls_del,
+ .tls_dev_resync = nfp_net_tls_resync,
+};
+
+static int nfp_net_tls_reset(struct nfp_net *nn)
+{
+ struct nfp_crypto_req_reset *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+
+ return nfp_net_tls_communicate_simple(nn, skb, "reset",
+ NFP_CCM_TYPE_CRYPTO_RESET);
+}
+
+int nfp_net_tls_init(struct nfp_net *nn)
+{
+ struct net_device *netdev = nn->dp.netdev;
+ int err;
+
+ if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
+ return 0;
+
+ if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
+ NFP_NET_TLS_CCM_MBOX_OPS_MASK)
+ return 0;
+
+ if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
+ nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
+ nn->tlv_caps.mbox_len);
+ return 0;
+ }
+
+ err = nfp_net_tls_reset(nn);
+ if (err)
+ return err;
+
+ nn_ctrl_bar_lock(nn);
+ nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
+ err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ nn_ctrl_bar_unlock(nn);
+ if (err)
+ return err;
+
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ }
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ }
+
+ netdev->tlsdev_ops = &nfp_net_tls_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
new file mode 100644
index 000000000000..36491835ac65
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <net/devlink.h>
+
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_main.h"
+
+/**
+ * struct nfp_devlink_param_u8_arg - Devlink u8 parameter get/set arguments
+ * @hwinfo_name: HWinfo key name
+ * @default_hi_val: Default HWinfo value if HWinfo doesn't exist
+ * @invalid_dl_val: Devlink value to use if HWinfo is unknown/invalid.
+ * -errno if there is no unknown/invalid value available
+ * @hi_to_dl: HWinfo to devlink value mapping
+ * @dl_to_hi: Devlink to hwinfo value mapping
+ * @max_dl_val: Maximum devlink value supported, for validation only
+ * @max_hi_val: Maximum HWinfo value supported, for validation only
+ */
+struct nfp_devlink_param_u8_arg {
+ const char *hwinfo_name;
+ const char *default_hi_val;
+ int invalid_dl_val;
+ u8 hi_to_dl[4];
+ u8 dl_to_hi[4];
+ u8 max_dl_val;
+ u8 max_hi_val;
+};
+
+static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = {
+ [DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY] = {
+ .hwinfo_name = "app_fw_from_flash",
+ .default_hi_val = NFP_NSP_APP_FW_LOAD_DEFAULT,
+ .invalid_dl_val =
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_UNKNOWN,
+ .hi_to_dl = {
+ [NFP_NSP_APP_FW_LOAD_DISK] =
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK,
+ [NFP_NSP_APP_FW_LOAD_FLASH] =
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH,
+ [NFP_NSP_APP_FW_LOAD_PREF] =
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER,
+ },
+ .dl_to_hi = {
+ [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER] =
+ NFP_NSP_APP_FW_LOAD_PREF,
+ [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH] =
+ NFP_NSP_APP_FW_LOAD_FLASH,
+ [DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK] =
+ NFP_NSP_APP_FW_LOAD_DISK,
+ },
+ .max_dl_val = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK,
+ .max_hi_val = NFP_NSP_APP_FW_LOAD_PREF,
+ },
+ [DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE] = {
+ .hwinfo_name = "abi_drv_reset",
+ .default_hi_val = NFP_NSP_DRV_RESET_DEFAULT,
+ .invalid_dl_val =
+ DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN,
+ .hi_to_dl = {
+ [NFP_NSP_DRV_RESET_ALWAYS] =
+ DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS,
+ [NFP_NSP_DRV_RESET_NEVER] =
+ DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER,
+ [NFP_NSP_DRV_RESET_DISK] =
+ DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK,
+ },
+ .dl_to_hi = {
+ [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS] =
+ NFP_NSP_DRV_RESET_ALWAYS,
+ [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER] =
+ NFP_NSP_DRV_RESET_NEVER,
+ [DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK] =
+ NFP_NSP_DRV_RESET_DISK,
+ },
+ .max_dl_val = DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK,
+ .max_hi_val = NFP_NSP_DRV_RESET_NEVER,
+ }
+};
+
+static int
+nfp_devlink_param_u8_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ const struct nfp_devlink_param_u8_arg *arg;
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_nsp *nsp;
+ char hwinfo[32];
+ long value;
+ int err;
+
+ if (id >= ARRAY_SIZE(nfp_devlink_u8_args))
+ return -EOPNOTSUPP;
+
+ arg = &nfp_devlink_u8_args[id];
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ nfp_warn(pf->cpp, "can't access NSP: %d\n", err);
+ return err;
+ }
+
+ snprintf(hwinfo, sizeof(hwinfo), arg->hwinfo_name);
+ err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo),
+ arg->default_hi_val);
+ if (err) {
+ nfp_warn(pf->cpp, "HWinfo lookup failed: %d\n", err);
+ goto exit_close_nsp;
+ }
+
+ err = kstrtol(hwinfo, 0, &value);
+ if (err || value < 0 || value > arg->max_hi_val) {
+ nfp_warn(pf->cpp, "HWinfo '%s' value %li invalid\n",
+ arg->hwinfo_name, value);
+
+ if (arg->invalid_dl_val >= 0)
+ ctx->val.vu8 = arg->invalid_dl_val;
+ else
+ err = arg->invalid_dl_val;
+
+ goto exit_close_nsp;
+ }
+
+ ctx->val.vu8 = arg->hi_to_dl[value];
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_devlink_param_u8_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ const struct nfp_devlink_param_u8_arg *arg;
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_nsp *nsp;
+ char hwinfo[32];
+ int err;
+
+ if (id >= ARRAY_SIZE(nfp_devlink_u8_args))
+ return -EOPNOTSUPP;
+
+ arg = &nfp_devlink_u8_args[id];
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ nfp_warn(pf->cpp, "can't access NSP: %d\n", err);
+ return err;
+ }
+
+ /* Note the value has already been validated. */
+ snprintf(hwinfo, sizeof(hwinfo), "%s=%u",
+ arg->hwinfo_name, arg->dl_to_hi[ctx->val.vu8]);
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ if (err) {
+ nfp_warn(pf->cpp, "HWinfo set failed: %d\n", err);
+ goto exit_close_nsp;
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_devlink_param_u8_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct nfp_devlink_param_u8_arg *arg;
+
+ if (id >= ARRAY_SIZE(nfp_devlink_u8_args))
+ return -EOPNOTSUPP;
+
+ arg = &nfp_devlink_u8_args[id];
+
+ if (val.vu8 > arg->max_dl_val) {
+ NL_SET_ERR_MSG_MOD(extack, "parameter out of range");
+ return -EINVAL;
+ }
+
+ if (val.vu8 == arg->invalid_dl_val) {
+ NL_SET_ERR_MSG_MOD(extack, "unknown/invalid value specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param nfp_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ nfp_devlink_param_u8_get,
+ nfp_devlink_param_u8_set,
+ nfp_devlink_param_u8_validate),
+ DEVLINK_PARAM_GENERIC(RESET_DEV_ON_DRV_PROBE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ nfp_devlink_param_u8_get,
+ nfp_devlink_param_u8_set,
+ nfp_devlink_param_u8_validate),
+};
+
+static int nfp_devlink_supports_params(struct nfp_pf *pf)
+{
+ struct nfp_nsp *nsp;
+ bool supported;
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ dev_err(&pf->pdev->dev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ supported = nfp_nsp_has_hwinfo_lookup(nsp) &&
+ nfp_nsp_has_hwinfo_set(nsp);
+
+ nfp_nsp_close(nsp);
+ return supported;
+}
+
+int nfp_devlink_params_register(struct nfp_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ int err;
+
+ err = nfp_devlink_supports_params(pf);
+ if (err <= 0)
+ return err;
+
+ err = devlink_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
+ if (err)
+ return err;
+
+ devlink_params_publish(devlink);
+ return 0;
+}
+
+void nfp_devlink_params_unregister(struct nfp_pf *pf)
+{
+ int err;
+
+ err = nfp_devlink_supports_params(pf);
+ if (err <= 0)
+ return;
+
+ devlink_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/Makefile b/drivers/net/ethernet/netronome/nfp/flower/Makefile
deleted file mode 100644
index 805fa28f391a..000000000000
--- a/drivers/net/ethernet/netronome/nfp/flower/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index e336f6ee94f5..1b019fdfcd97 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,10 +2,12 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/mpls.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_mpls.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -25,6 +27,80 @@
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
+static int
+nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ size_t act_size = sizeof(struct nfp_fl_push_mpls);
+ u32 mpls_lse = 0;
+
+ push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
+ push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* BOS is optional in the TC action but required for offload. */
+ if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
+ return -EOPNOTSUPP;
+ }
+
+ /* Leave MPLS TC as a default value of 0 if not explicitly set. */
+ if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
+ mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
+
+ /* Proto, label and TTL are enforced and verified for MPLS push. */
+ mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
+ mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
+ push_mpls->ethtype = act->mpls_push.proto;
+ push_mpls->lse = cpu_to_be32(mpls_lse);
+
+ return 0;
+}
+
+static void
+nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_pop_mpls);
+
+ pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
+ pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ pop_mpls->ethtype = act->mpls_pop.proto;
+}
+
+static void
+nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_set_mpls);
+ u32 mpls_lse = 0, mpls_mask = 0;
+
+ set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
+ set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
+ mpls_mask |= MPLS_LS_LABEL_MASK;
+ }
+ if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
+ mpls_mask |= MPLS_LS_TC_MASK;
+ }
+ if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
+ mpls_mask |= MPLS_LS_S_MASK;
+ }
+ if (act->mpls_mangle.ttl) {
+ mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
+ mpls_mask |= MPLS_LS_TTL_MASK;
+ }
+
+ set_mpls->lse = cpu_to_be32(mpls_lse);
+ set_mpls->lse_mask = cpu_to_be32(mpls_mask);
+}
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -54,7 +130,8 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
static int
nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
- struct nfp_fl_payload *nfp_flow, int act_len)
+ struct nfp_fl_payload *nfp_flow, int act_len,
+ struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_pre_lag);
struct nfp_fl_pre_lag *pre_lag;
@@ -65,8 +142,10 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
if (!out_dev || !netif_is_lag_master(out_dev))
return 0;
- if (act_len + act_size > NFP_FL_MAX_A_SIZ)
+ if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
return -EOPNOTSUPP;
+ }
/* Pre_lag action must be first on action list.
* If other actions already exist they need pushed forward.
@@ -76,7 +155,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
nfp_flow->action_data, act_len);
pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
- err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
+ err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
if (err)
return err;
@@ -93,7 +172,8 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
- enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
+ enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
+ bool pkt_host, struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
@@ -104,18 +184,24 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
out_dev = act->dev;
- if (!out_dev)
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
return -EOPNOTSUPP;
+ }
tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
if (tun_type) {
/* Verify the egress netdev matches the tunnel type. */
- if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
return -EOPNOTSUPP;
+ }
- if (*tun_out_cnt)
+ if (*tun_out_cnt) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
return -EOPNOTSUPP;
+ }
(*tun_out_cnt)++;
output->flags = cpu_to_be16(tmp_flags |
@@ -127,42 +213,87 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->flags = cpu_to_be16(tmp_flags);
gid = nfp_flower_lag_get_output_id(app, out_dev);
- if (gid < 0)
+ if (gid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
return gid;
+ }
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
+ } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
+ return -EOPNOTSUPP;
+ }
+
+ if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_flow->pre_tun_rule.dev = out_dev;
+
+ return 0;
} else {
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
- if (!netdev_port_same_parent_id(in_dev, out_dev))
+ if (!netdev_port_same_parent_id(in_dev, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
return -EOPNOTSUPP;
+ }
}
- if (!nfp_netdev_is_nfp_repr(out_dev))
+ if (!nfp_netdev_is_nfp_repr(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
return -EOPNOTSUPP;
+ }
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ if (!output->port) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
return -EOPNOTSUPP;
+ }
}
nfp_flow->meta.shortcut = output->port;
return 0;
}
+static bool
+nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
+{
+ struct flow_action_entry *act = flow->rule->action.entries;
+ int num_act = flow->rule->action.num_entries;
+ int act_idx;
+
+ /* Preparse action list for next mirred or redirect action */
+ for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
+ if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
+ act[act_idx].id == FLOW_ACTION_MIRRED)
+ return netif_is_gretap(act[act_idx].dev);
+
+ return false;
+}
+
static enum nfp_flower_tun_type
-nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
- const struct flow_action_entry *act)
+nfp_fl_get_tun_from_act(struct nfp_app *app,
+ struct flow_cls_offload *flow,
+ const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
+ /* Determine the tunnel type based on the egress netdev
+ * in the mirred action for tunnels without l4.
+ */
+ if (nfp_flower_tun_is_gre(flow, act_idx))
+ return NFP_FL_TUNNEL_GRE;
+
switch (tun->key.tp_dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
/* FALLTHROUGH */
@@ -194,7 +325,8 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
static int
nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
- const struct flow_action_entry *act)
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
int opt_len, opt_cnt, act_start, tot_push_len;
@@ -212,20 +344,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
struct geneve_opt *opt = (struct geneve_opt *)src;
opt_cnt++;
- if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
return -EOPNOTSUPP;
+ }
tot_push_len += sizeof(struct nfp_fl_push_geneve) +
opt->length * 4;
- if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
return -EOPNOTSUPP;
+ }
opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
src += sizeof(struct geneve_opt) + opt->length * 4;
}
- if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
return -EOPNOTSUPP;
+ }
act_start = *list_len;
*list_len += tot_push_len;
@@ -256,14 +394,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
- struct nfp_fl_set_ipv4_udp_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev)
+nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
@@ -275,8 +412,10 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
- !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
return -EOPNOTSUPP;
+ }
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -316,8 +455,10 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
+ }
set_tun->tun_flags = ip_tun->key.tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
@@ -345,18 +486,22 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
static int
nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
- struct nfp_fl_set_eth *set_eth)
+ struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
{
u32 exact, mask;
- if (off + 4 > ETH_ALEN * 2)
+ if (off + 4 > ETH_ALEN * 2) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
return -EOPNOTSUPP;
+ }
mask = ~act->mangle.mask;
exact = act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
return -EOPNOTSUPP;
+ }
nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
&set_eth->eth_addr_mask[off]);
@@ -377,7 +522,8 @@ struct ipv4_ttl_word {
static int
nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ip4_addrs *set_ip_addr,
- struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
+ struct netlink_ext_ack *extack)
{
struct ipv4_ttl_word *ttl_word_mask;
struct ipv4_ttl_word *ttl_word;
@@ -389,8 +535,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
mask = (__force __be32)~act->mangle.mask;
exact = (__force __be32)act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
return -EOPNOTSUPP;
+ }
switch (off) {
case offsetof(struct iphdr, daddr):
@@ -413,8 +561,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
ttl_word_mask = (struct ipv4_ttl_word *)&mask;
ttl_word = (struct ipv4_ttl_word *)&exact;
- if (ttl_word_mask->protocol || ttl_word_mask->check)
+ if (ttl_word_mask->protocol || ttl_word_mask->check) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
return -EOPNOTSUPP;
+ }
set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
@@ -429,8 +579,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
tos_word = (struct iphdr *)&exact;
if (tos_word_mask->version || tos_word_mask->ihl ||
- tos_word_mask->tot_len)
+ tos_word_mask->tot_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
return -EOPNOTSUPP;
+ }
set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
@@ -441,6 +593,7 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
NFP_FL_LW_SIZ;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
return -EOPNOTSUPP;
}
@@ -468,7 +621,8 @@ struct ipv6_hop_limit_word {
static int
nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
- struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
+ struct netlink_ext_ack *extack)
{
struct ipv6_hop_limit_word *fl_hl_mask;
struct ipv6_hop_limit_word *fl_hl;
@@ -478,8 +632,10 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
fl_hl = (struct ipv6_hop_limit_word *)&exact;
- if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
return -EOPNOTSUPP;
+ }
ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
@@ -488,8 +644,10 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
break;
case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
if (mask & ~IPV6_FLOW_LABEL_MASK ||
- exact & ~IPV6_FLOW_LABEL_MASK)
+ exact & ~IPV6_FLOW_LABEL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
return -EOPNOTSUPP;
+ }
ip_hl_fl->ipv6_label_mask |= mask;
ip_hl_fl->ipv6_label &= ~mask;
@@ -507,7 +665,8 @@ static int
nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
struct nfp_fl_set_ipv6_addr *ip_src,
- struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
+ struct netlink_ext_ack *extack)
{
__be32 exact, mask;
int err = 0;
@@ -517,12 +676,14 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
mask = (__force __be32)~act->mangle.mask;
exact = (__force __be32)act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
return -EOPNOTSUPP;
+ }
if (off < offsetof(struct ipv6hdr, saddr)) {
err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
- ip_hl_fl);
+ ip_hl_fl, extack);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -533,6 +694,7 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
return -EOPNOTSUPP;
}
@@ -541,18 +703,23 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
static int
nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
- struct nfp_fl_set_tport *set_tport, int opcode)
+ struct nfp_fl_set_tport *set_tport, int opcode,
+ struct netlink_ext_ack *extack)
{
u32 exact, mask;
- if (off)
+ if (off) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
return -EOPNOTSUPP;
+ }
mask = ~act->mangle.mask;
exact = act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
return -EOPNOTSUPP;
+ }
nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
set_tport->tp_port_mask);
@@ -582,60 +749,23 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
}
}
-static int
-nfp_fl_pedit(const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
- char *nfp_action, int *a_len, u32 *csum_updated)
-{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+struct nfp_flower_pedit_acts {
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
- enum flow_action_mangle_base htype;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
+};
+
+static int
+nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
+ int *a_len, struct nfp_flower_pedit_acts *set_act,
+ u32 *csum_updated)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
- u32 offset;
- int err;
-
- memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
- memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
- memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
- memset(&set_ip6_src, 0, sizeof(set_ip6_src));
- memset(&set_ip_addr, 0, sizeof(set_ip_addr));
- memset(&set_tport, 0, sizeof(set_tport));
- memset(&set_eth, 0, sizeof(set_eth));
-
- htype = act->mangle.htype;
- offset = act->mangle.offset;
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- err = nfp_fl_set_eth(act, offset, &set_eth);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
- &set_ip_ttl_tos);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
- &set_ip6_src, &set_ip6_tc_hl_fl);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (err)
- return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -644,77 +774,82 @@ nfp_fl_pedit(const struct flow_action_entry *act,
ip_proto = match.key->ip_proto;
}
- if (set_eth.head.len_lw) {
- act_size = sizeof(set_eth);
- memcpy(nfp_action, &set_eth, act_size);
+ if (set_act->set_eth.head.len_lw) {
+ act_size = sizeof(set_act->set_eth);
+ memcpy(nfp_action, &set_act->set_eth, act_size);
*a_len += act_size;
}
- if (set_ip_ttl_tos.head.len_lw) {
+
+ if (set_act->set_ip_ttl_tos.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_ttl_tos);
- memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ act_size = sizeof(set_act->set_ip_ttl_tos);
+ memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip_addr.head.len_lw) {
+
+ if (set_act->set_ip_addr.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_addr);
- memcpy(nfp_action, &set_ip_addr, act_size);
+ act_size = sizeof(set_act->set_ip_addr);
+ memcpy(nfp_action, &set_act->set_ip_addr, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_tc_hl_fl.head.len_lw) {
+
+ if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_tc_hl_fl);
- memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ act_size = sizeof(set_act->set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+
+ if (set_act->set_ip6_dst.head.len_lw &&
+ set_act->set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
- act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
+ &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw) {
+ } else if (set_act->set_ip6_dst.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(nfp_action, &set_ip6_dst, act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_src.head.len_lw) {
+ } else if (set_act->set_ip6_src.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_tport.head.len_lw) {
+ if (set_act->set_tport.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_tport);
- memcpy(nfp_action, &set_tport, act_size);
+ act_size = sizeof(set_act->set_tport);
+ memcpy(nfp_action, &set_act->set_tport, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
@@ -725,11 +860,47 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
static int
-nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
+nfp_fl_pedit(const struct flow_action_entry *act,
+ struct flow_cls_offload *flow, char *nfp_action, int *a_len,
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
+ struct netlink_ext_ack *extack)
+{
+ enum flow_action_mangle_base htype;
+ u32 offset;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
+ &set_act->set_ip_ttl_tos, extack);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
+ &set_act->set_ip6_src,
+ &set_act->set_ip6_tc_hl_fl, extack);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP, extack);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP, extack);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_output_action(struct nfp_app *app,
+ const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated, bool pkt_host,
+ struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
@@ -738,15 +909,19 @@ nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *ac
/* If csum_updated has not been reset by now, it means HW will
* incorrectly update csums when they are not requested.
*/
- if (*csum_updated)
+ if (*csum_updated) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
return -EOPNOTSUPP;
+ }
- if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
return -EOPNOTSUPP;
+ }
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
- tun_out_cnt);
+ tun_out_cnt, pkt_host, extack);
if (err)
return err;
@@ -756,11 +931,13 @@ nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *ac
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
- prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
- if (prelag_size < 0)
+ prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
+ if (prelag_size < 0) {
return prelag_size;
- else if (prelag_size > 0 && (!last || *out_cnt))
+ } else if (prelag_size > 0 && (!last || *out_cnt)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
return -EOPNOTSUPP;
+ }
*a_len += prelag_size;
}
@@ -771,39 +948,51 @@ nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *ac
static int
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated,
+ struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
+ struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_udp_tun *set_tun;
+ struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
+ struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
+ struct nfp_fl_pop_mpls *pop_m;
+ struct nfp_fl_set_mpls *set_m;
int err;
switch (act->id) {
case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
break;
+ case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt, csum_updated);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt, csum_updated);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
case FLOW_ACTION_VLAN_POP:
- if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
return -EOPNOTSUPP;
+ }
pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
@@ -812,8 +1001,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*a_len += sizeof(struct nfp_fl_pop_vlan);
break;
case FLOW_ACTION_VLAN_PUSH:
- if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
+ if (*a_len +
+ sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
return -EOPNOTSUPP;
+ }
psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
@@ -824,35 +1016,41 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
- if (*tun_type == NFP_FL_TUNNEL_NONE)
+ *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
+ if (*tun_type == NFP_FL_TUNNEL_NONE) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
+ }
- if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+ if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
return -EOPNOTSUPP;
+ }
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
+ }
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
if (err)
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev);
+ err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
+ *tun_type, netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
@@ -860,34 +1058,124 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- a_len, csum_updated))
+ a_len, csum_updated, set_act, extack))
return -EOPNOTSUPP;
break;
case FLOW_ACTION_CSUM:
/* csum action requests recalc of something we have not fixed */
- if (act->csum_flags & ~*csum_updated)
+ if (act->csum_flags & ~*csum_updated) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
return -EOPNOTSUPP;
+ }
/* If we will correctly fix the csum we can remove it from the
* csum update list. Which will later be used to check support.
*/
*csum_updated &= ~act->csum_flags;
break;
+ case FLOW_ACTION_MPLS_PUSH:
+ if (*a_len +
+ sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ err = nfp_fl_push_mpls(psh_m, act, extack);
+ if (err)
+ return err;
+ *a_len += sizeof(struct nfp_fl_push_mpls);
+ break;
+ case FLOW_ACTION_MPLS_POP:
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_pop_mpls(pop_m, act);
+ *a_len += sizeof(struct nfp_fl_pop_mpls);
+ break;
+ case FLOW_ACTION_MPLS_MANGLE:
+ if (*a_len +
+ sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_set_mpls(set_m, act);
+ *a_len += sizeof(struct nfp_fl_set_mpls);
+ break;
+ case FLOW_ACTION_PTYPE:
+ /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
+ if (act->ptype != PACKET_HOST)
+ return -EOPNOTSUPP;
+
+ *pkt_host = true;
+ break;
default:
/* Currently we do not handle any other actions. */
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
return -EOPNOTSUPP;
}
return 0;
}
+static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry prev_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == 0)
+ return true;
+
+ prev_act = flow_act->entries[current_act_idx - 1];
+
+ return prev_act.id != FLOW_ACTION_MANGLE;
+}
+
+static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry next_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == flow_act->num_entries)
+ return true;
+
+ next_act = flow_act->entries[current_act_idx + 1];
+
+ return next_act.id != FLOW_ACTION_MANGLE;
+}
+
int nfp_flower_compile_action(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ struct netlink_ext_ack *extack)
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
+ struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
+ bool pkt_host = false;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -899,12 +1187,19 @@ int nfp_flower_compile_action(struct nfp_app *app,
out_cnt = 0;
flow_action_for_each(i, act, &flow->rule->action) {
+ if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
- &out_cnt, &csum_updated);
+ &out_cnt, &csum_updated,
+ &set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
+ if (nfp_fl_check_mangle_end(&flow->rule->action, i))
+ nfp_fl_commit_mangle(flow,
+ &nfp_flow->action_data[act_len],
+ &act_len, &set_act, &csum_updated);
}
/* We optimise when the action list is small, this can unfortunately
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index cf9e1118ee8f..05981b54eaab 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
rtnl_lock();
rcu_read_lock();
- netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock();
if (!netdev) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
msg = nfp_flower_cmsg_get_data(skb);
rcu_read_lock();
- exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock();
if (!exists) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -205,6 +205,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
+ struct nfp_flower_cmsg_merge_hint *msg;
+ struct nfp_fl_payload *sub_flows[2];
+ int err, i, flow_cnt;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ /* msg->count starts at 0 and always assumes at least 1 entry. */
+ flow_cnt = msg->count + 1;
+
+ if (msg_len < struct_size(msg, flow, flow_cnt)) {
+ nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n",
+ msg_len, struct_size(msg, flow, flow_cnt));
+ return;
+ }
+
+ if (flow_cnt != 2) {
+ nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
+ flow_cnt);
+ return;
+ }
+
+ rtnl_lock();
+ for (i = 0; i < flow_cnt; i++) {
+ u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
+
+ sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
+ if (!sub_flows[i]) {
+ nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
+ goto err_rtnl_unlock;
+ }
+ }
+
+ err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
+ /* Only warn on memory fail. Hint veto will not break functionality. */
+ if (err == -ENOMEM)
+ nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
+
+err_rtnl_unlock:
+ rtnl_unlock();
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *app_priv = app->priv;
@@ -216,18 +260,24 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
type = cmsg_hdr->type;
switch (type) {
- case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
- nfp_flower_cmsg_portreify_rx(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
+ nfp_flower_cmsg_merge_hint_rx(app, skb);
+ break;
+ }
+ goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
+ nfp_flower_stats_rlim_reply(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
@@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
/* fall through */
default:
+err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
goto out;
@@ -274,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
struct nfp_flower_priv *priv = app->priv;
struct sk_buff_head *skb_head;
- if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
- type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
+ if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
skb_head = &priv->cmsg_skbs_high;
else
skb_head = &priv->cmsg_skbs_low;
@@ -314,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
+ /* Handle REIFY acks outside wq to prevent RTNL conflict. */
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ dev_consume_skb_any(skb);
} else {
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 0ed51e79db00..7eb2ec8969c3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/gre.h>
#include <net/vxlan.h>
#include "../nfp_app.h"
@@ -22,6 +23,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
+#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
@@ -37,6 +39,9 @@
#define NFP_FL_IP_FRAG_FIRST BIT(7)
#define NFP_FL_IP_FRAGMENTED BIT(6)
+/* GRE Tunnel flags */
+#define NFP_FL_GRE_FLAG_KEY BIT(2)
+
/* Compressed HW representation of TCP Flags */
#define NFP_FL_TCP_FLAG_URG BIT(4)
#define NFP_FL_TCP_FLAG_PSH BIT(3)
@@ -63,8 +68,11 @@
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
+#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
@@ -107,6 +115,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_GRE = 1,
NFP_FL_TUNNEL_VXLAN = 2,
NFP_FL_TUNNEL_GENEVE = 4,
};
@@ -203,7 +212,7 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_ipv4_udp_tun {
+struct nfp_fl_set_ipv4_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -211,7 +220,8 @@ struct nfp_fl_set_ipv4_udp_tun {
__be16 tun_flags;
u8 ttl;
u8 tos;
- __be32 extra;
+ __be16 outer_vlan_tpid;
+ __be16 outer_vlan_tci;
u8 tun_len;
u8 res2;
__be16 tun_proto;
@@ -226,6 +236,24 @@ struct nfp_fl_push_geneve {
u8 opt_data[];
};
+struct nfp_fl_push_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+ __be32 lse;
+};
+
+struct nfp_fl_pop_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+};
+
+struct nfp_fl_set_mpls {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 lse_mask;
+ __be32 lse;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -354,6 +382,16 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+struct nfp_flower_tun_ipv4 {
+ __be32 src;
+ __be32 dst;
+};
+
+struct nfp_flower_tun_ip_ext {
+ u8 tos;
+ u8 ttl;
+};
+
/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
@@ -371,15 +409,42 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv4_udp_tun {
- __be32 ip_src;
- __be32 ip_dst;
+ struct nfp_flower_tun_ipv4 ipv4;
__be16 reserved1;
- u8 tos;
- u8 ttl;
+ struct nfp_flower_tun_ip_ext ip_ext;
__be32 reserved2;
__be32 tun_id;
};
+/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+struct nfp_flower_ipv4_gre_tun {
+ struct nfp_flower_tun_ipv4 ipv4;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -402,11 +467,13 @@ struct nfp_flower_cmsg_hdr {
/* Types defined for port related control messages */
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
@@ -414,6 +481,10 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
+ NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
@@ -451,6 +522,16 @@ struct nfp_flower_cmsg_portreify {
#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */
+struct nfp_flower_cmsg_merge_hint {
+ u8 reserved[3];
+ u8 count;
+ struct {
+ __be32 host_ctx;
+ __be64 host_cookie;
+ } __packed flow[0];
+};
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -473,6 +554,13 @@ enum nfp_flower_cmsg_port_vnic_type {
#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
+static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT);
+}
+
static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
@@ -508,6 +596,8 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_gretap(netdev))
+ return tun_type == NFP_FL_TUNNEL_GRE;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
@@ -524,6 +614,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_geneve(netdev))
return true;
+ if (netif_is_gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 5db838f45694..63907aeb3884 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -156,7 +156,8 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
- struct nfp_fl_pre_lag *pre_act)
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
@@ -167,6 +168,7 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
master);
if (!group) {
mutex_unlock(&priv->nfp_lag.lock);
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 408089133599..d8ad9346a26a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -22,6 +22,9 @@
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
+#define NFP_MIN_INT_PORT_ID 1
+#define NFP_MAX_INT_PORT_ID 256
+
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{
return "FLOWER";
@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
+static int
+nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
+ struct net_device *netdev)
+{
+ struct net_device *entry;
+ int i, id = 0;
+
+ rcu_read_lock();
+ idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
+ if (entry == netdev) {
+ id = i;
+ break;
+ }
+ rcu_read_unlock();
+
+ return id;
+}
+
+static int
+nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int id;
+
+ id = nfp_flower_lookup_internal_port_id(priv, netdev);
+ if (id > 0)
+ return id;
+
+ idr_preload(GFP_ATOMIC);
+ spin_lock_bh(&priv->internal_ports.lock);
+ id = idr_alloc(&priv->internal_ports.port_ids, netdev,
+ NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
+ spin_unlock_bh(&priv->internal_ports.lock);
+ idr_preload_end();
+
+ return id;
+}
+
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ int ext_port;
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ return nfp_repr_get_port_id(netdev);
+ } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
+ ext_port = nfp_flower_get_internal_port_id(app, netdev);
+ if (ext_port < 0)
+ return 0;
+
+ return nfp_flower_internal_port_get_port_id(ext_port);
+ }
+
+ return 0;
+}
+
+static struct net_device *
+nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+
+ rcu_read_lock();
+ netdev = idr_find(&priv->internal_ports.port_ids, port_id);
+ rcu_read_unlock();
+
+ return netdev;
+}
+
+static void
+nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int id;
+
+ id = nfp_flower_lookup_internal_port_id(priv, netdev);
+ if (!id)
+ return;
+
+ spin_lock_bh(&priv->internal_ports.lock);
+ idr_remove(&priv->internal_ports.port_ids, id);
+ spin_unlock_bh(&priv->internal_ports.lock);
+}
+
+static int
+nfp_flower_internal_port_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ if (event == NETDEV_UNREGISTER &&
+ nfp_flower_internal_port_can_offload(app, netdev))
+ nfp_flower_free_internal_port_id(app, netdev);
+
+ return NOTIFY_OK;
+}
+
+static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
+{
+ spin_lock_init(&priv->internal_ports.lock);
+ idr_init(&priv->internal_ports.port_ids);
+}
+
+static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
+{
+ idr_destroy(&priv->internal_ports.port_ids);
+}
+
static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
{
@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
}
static struct net_device *
-nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
+nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{
enum nfp_repr_type repr_type;
struct nfp_reprs *reprs;
u8 port = 0;
+ /* Check if the port is internal. */
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
+ if (redir_egress)
+ *redir_egress = true;
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
+ return nfp_flower_get_netdev_from_internal_port_id(app, port);
+ }
+
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
if (repr_type > NFP_REPR_TYPE_MAX)
return NULL;
@@ -281,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
if (!repr_priv) {
err = -ENOMEM;
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
@@ -294,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
port = nfp_port_alloc(app, port_type, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
+ kfree(repr_priv);
nfp_repr_free(repr);
goto err_reprs_clean;
}
@@ -314,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
err = nfp_repr_init(app, repr,
port_id, port, priv->nn->dp.netdev);
if (err) {
+ kfree(repr_priv);
nfp_port_free(port);
nfp_repr_free(repr);
goto err_reprs_clean;
@@ -396,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
if (!repr_priv) {
err = -ENOMEM;
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
@@ -406,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
+ kfree(repr_priv);
nfp_repr_free(repr);
goto err_reprs_clean;
}
err = nfp_port_init_phy_port(app->pf, app, port, i);
if (err) {
+ kfree(repr_priv);
nfp_port_free(port);
nfp_repr_free(repr);
goto err_reprs_clean;
@@ -423,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
+ kfree(repr_priv);
nfp_port_free(port);
nfp_repr_free(repr);
goto err_reprs_clean;
@@ -641,11 +767,34 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
+ /* Tell the firmware that the driver supports flow merging. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_merge_hint_enable", 1);
+ if (!err) {
+ app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
+ nfp_flower_internal_port_init(app_priv);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
+ } else {
+ goto err_lag_clean;
+ }
+ } else {
+ nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
+ }
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
+ nfp_flower_qos_init(app);
+
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
+ app_priv->pre_tun_rule_cnt = 0;
return 0;
+err_lag_clean:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ nfp_flower_lag_cleanup(&app_priv->nfp_lag);
err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv:
@@ -661,9 +810,15 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
+ nfp_flower_qos_cleanup(app);
+
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+ nfp_flower_internal_port_cleanup(app_priv);
+
nfp_flower_metadata_cleanup(app);
vfree(app->priv);
app->priv = NULL;
@@ -762,6 +917,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
if (ret & NOTIFY_STOP_MASK)
return ret;
+ ret = nfp_flower_internal_port_event_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
@@ -800,7 +959,7 @@ const struct nfp_app_type app_flower = {
.sriov_disable = nfp_flower_sriov_disable,
.eswitch_mode_get = eswitch_mode_get,
- .repr_get = nfp_flower_repr_get,
+ .dev_get = nfp_flower_dev_get,
.setup_tc = nfp_flower_setup_tc,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c0945a5fd1a4..31d94592a7c0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -5,6 +5,7 @@
#define __NFP_FLOWER_H__ 1
#include "cmsg.h"
+#include "../nfp_net.h"
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
@@ -34,14 +35,15 @@ struct nfp_app;
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
-#define NFP_FL_VXLAN_PORT 4789
-#define NFP_FL_GENEVE_PORT 6081
-
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
+#define NFP_FL_FEATS_VF_RLIM BIT(4)
+#define NFP_FL_FEATS_FLOW_MOD BIT(5)
+#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
+#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
@@ -118,6 +120,16 @@ struct nfp_fl_lag {
};
/**
+ * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
+ * @port_ids: Assignment of ids to any additional ports
+ * @lock: Lock for extra ports list
+ */
+struct nfp_fl_internal_ports {
+ struct idr port_ids;
+ spinlock_t lock;
+};
+
+/**
* struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app
* @nn: Pointer to vNIC
@@ -131,6 +143,7 @@ struct nfp_fl_lag {
* @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates
+ * @stats_ctx_table: Hash table to map stats contexts to its flow rule
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
@@ -146,6 +159,11 @@ struct nfp_fl_lag {
* @non_repr_priv: List of offloaded non-repr ports and their priv data
* @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules
+ * @internal_ports: Internal port ids used in offloaded rules
+ * @qos_stats_work: Workqueue for qos stats processing
+ * @qos_rate_limiters: Current active qos rate limiters
+ * @qos_stats_lock: Lock on qos stats updates
+ * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,6 +178,7 @@ struct nfp_flower_priv {
struct rhashtable flow_table;
struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */
+ struct rhashtable stats_ctx_table;
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
@@ -172,6 +191,25 @@ struct nfp_flower_priv {
struct list_head non_repr_priv;
unsigned int active_mem_unit;
unsigned int total_mem_units;
+ struct nfp_fl_internal_ports internal_ports;
+ struct delayed_work qos_stats_work;
+ unsigned int qos_rate_limiters;
+ spinlock_t qos_stats_lock; /* Protect the qos stats */
+ int pre_tun_rule_cnt;
+};
+
+/**
+ * struct nfp_fl_qos - Flower APP priv data for quality of service
+ * @netdev_port_id: NFP port number of repr with qos info
+ * @curr_stats: Currently stored stats updates for qos info
+ * @prev_stats: Previously stored updates for qos info
+ * @last_update: Stored time when last stats were updated
+ */
+struct nfp_fl_qos {
+ u32 netdev_port_id;
+ struct nfp_stat_pair curr_stats;
+ struct nfp_stat_pair prev_stats;
+ u64 last_update;
};
/**
@@ -180,14 +218,20 @@ struct nfp_flower_priv {
* @lag_port_flags: Extended port flags to record lag state of repr
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr
+ * @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
+ * @qos_table: Stored info on filters implementing qos
+ * @on_bridge: Indicates if the repr is attached to a bridge
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
unsigned long lag_port_flags;
bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN];
+ bool block_shared;
struct list_head mac_list;
+ struct nfp_fl_qos qos_table;
+ bool on_bridge;
};
/**
@@ -239,6 +283,30 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
+ struct list_head linked_flows;
+ bool in_hw;
+ struct {
+ struct net_device *dev;
+ __be16 vlan_tci;
+ __be16 port_idx;
+ } pre_tun_rule;
+};
+
+struct nfp_fl_payload_link {
+ /* A link contains a pointer to a merge flow and an associated sub_flow.
+ * Each merge flow will feature in 2 links to its underlying sub_flows.
+ * A sub_flow will have at least 1 link to a merge flow or more if it
+ * has been used to create multiple merge flows.
+ *
+ * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
+ * all links to sub_flows (sub_flow.flow) via merge.list.
+ * For a sub_flow, 'linked_flows' gives all links to merge flows it has
+ * formed (merge_flow.flow) via sub_flow.list.
+ */
+ struct {
+ struct list_head list;
+ struct nfp_fl_payload *flow;
+ } merge_flow, sub_flow;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -250,26 +318,64 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
+static inline bool
+nfp_flower_internal_port_can_offload(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+
+ if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+ return false;
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+
+ return false;
+}
+
+/* The address of the merged flow acts as its cookie.
+ * Cookies supplied to us by TC flower are also addresses to allocated
+ * memory and thus this scheme should not generate any collisions.
+ */
+static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
+{
+ return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
+}
+
+static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
+{
+ return netif_is_ovs_master(netdev);
+}
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
- enum nfp_flower_tun_type tun_type);
+ enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack);
int nfp_flower_compile_action(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ struct netlink_ext_ack *extack);
int nfp_compile_flow_metadata(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_flow,
- struct net_device *netdev);
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack);
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+ struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
@@ -277,6 +383,8 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev);
struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
+struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
@@ -299,9 +407,15 @@ int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
- struct nfp_fl_pre_lag *pre_act);
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+void nfp_flower_qos_init(struct nfp_app *app);
+void nfp_flower_qos_cleanup(struct nfp_app *app);
+int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow);
+void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event);
@@ -314,4 +428,10 @@ void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+ struct net_device *netdev);
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9b8b843d0340..9cc3ba17ff69 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,9 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct tc_cls_flower_offload *flow, u8 key_type)
+ struct flow_cls_offload *flow, u8 key_type)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -54,7 +54,8 @@ nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version, enum nfp_flower_tun_type tun_type)
+ bool mask_version, enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
@@ -64,8 +65,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
} else {
- if (!cmsg_port)
+ if (!cmsg_port) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
return -EOPNOTSUPP;
+ }
frame->in_port = cpu_to_be32(cmsg_port);
}
@@ -75,9 +78,9 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -127,9 +130,9 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -148,9 +151,9 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct nfp_flower_ip_ext *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -222,9 +225,9 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -244,9 +247,9 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -266,7 +269,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
static int
nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct flow_match_enc_opts match;
@@ -278,11 +281,76 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
}
static void
+nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
+ struct nfp_flower_tun_ipv4 *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
+nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
+ struct nfp_flower_tun_ip_ext *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ ext->tun_key = match.key->keyid;
+ msk->tun_key = match.mask->keyid;
+
+ ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ }
+
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+}
+
+static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
@@ -298,41 +366,24 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
msk->tun_id = cpu_to_be32(temp_vni);
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- struct flow_match_ipv4_addrs match;
-
- flow_rule_match_enc_ipv4_addrs(rule, &match);
- ext->ip_src = match.key->src;
- ext->ip_dst = match.key->dst;
- msk->ip_src = match.mask->src;
- msk->ip_dst = match.mask->dst;
- }
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_enc_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
- }
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
- enum nfp_flower_tun_type tun_type)
+ enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack)
{
- u32 cmsg_port = 0;
+ u32 port_id;
int err;
u8 *ext;
u8 *msk;
- if (nfp_netdev_is_nfp_repr(netdev))
- cmsg_port = nfp_repr_get_port_id(netdev);
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -358,13 +409,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- cmsg_port, false, tun_type);
+ port_id, false, tun_type, extack);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- cmsg_port, true, tun_type);
+ port_id, true, tun_type, extack);
if (err)
return err;
@@ -403,12 +454,27 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ __be32 tun_dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
+ tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
+ }
+
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 492837b852b6..7c4a15e967df 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
unsigned long cookie;
};
+struct nfp_fl_stats_ctx_to_flow {
+ struct rhash_head ht_node;
+ u32 stats_cxt;
+ struct nfp_fl_payload *flow;
+};
+
+static const struct rhashtable_params stats_ctx_table_params = {
+ .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
+ .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
+ .key_len = sizeof(u32),
+};
+
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
- if (meta_flags)
- *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
-
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@@ -281,29 +290,55 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
}
int nfp_compile_flow_metadata(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_flow,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack)
{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
u8 new_mask_id;
u32 stats_cxt;
+ int err;
- if (nfp_get_stats_entry(app, &stats_cxt))
- return -ENOENT;
+ err = nfp_get_stats_entry(app, &stats_cxt);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
+ return err;
+ }
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->ingress_dev = netdev;
+ ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
+ if (!ctx_entry) {
+ err = -ENOMEM;
+ goto err_release_stats;
+ }
+
+ ctx_entry->stats_cxt = stats_cxt;
+ ctx_entry->flow = nfp_flow;
+
+ if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
+ stats_ctx_table_params)) {
+ err = -ENOMEM;
+ goto err_free_ctx_entry;
+ }
+
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
- if (nfp_release_stats_entry(app, stats_cxt))
- return -EINVAL;
- return -ENOENT;
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
+ if (nfp_release_stats_entry(app, stats_cxt)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
+ err = -EINVAL;
+ goto err_remove_rhash;
+ }
+ err = -ENOENT;
+ goto err_remove_rhash;
}
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
@@ -317,43 +352,97 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
- if (nfp_release_stats_entry(app, stats_cxt))
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
+ if (nfp_release_stats_entry(app, stats_cxt)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
+ err = -EINVAL;
+ goto err_remove_mask;
+ }
if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
- NULL, &new_mask_id))
- return -EINVAL;
+ NULL, &new_mask_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
+ err = -EINVAL;
+ goto err_remove_mask;
+ }
- return -EEXIST;
+ err = -EEXIST;
+ goto err_remove_mask;
}
return 0;
+
+err_remove_mask:
+ nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
+ NULL, &new_mask_id);
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ &ctx_entry->ht_node,
+ stats_ctx_table_params));
+err_free_ctx_entry:
+ kfree(ctx_entry);
+err_release_stats:
+ nfp_release_stats_entry(app, stats_cxt);
+
+ return err;
+}
+
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+ struct nfp_fl_payload *nfp_flow)
+{
+ nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
}
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow)
{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0;
u32 temp_ctx_id;
+ __nfp_modify_flow_metadata(priv, nfp_flow);
+
nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
&new_mask_id);
- nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
- priv->flower_version++;
-
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- /* Release the stats ctx id. */
+ /* Release the stats ctx id and ctx to flow table entry. */
temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
+ stats_ctx_table_params);
+ if (!ctx_entry)
+ return -ENOENT;
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ &ctx_entry->ht_node,
+ stats_ctx_table_params));
+ kfree(ctx_entry);
+
return nfp_release_stats_entry(app, temp_ctx_id);
}
+struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
+{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
+ struct nfp_flower_priv *priv = app->priv;
+
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
+ stats_ctx_table_params);
+ if (!ctx_entry)
+ return NULL;
+
+ return ctx_entry->flow;
+}
+
static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -403,6 +492,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
return err;
+ err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
+ if (err)
+ goto err_free_flow_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -410,7 +503,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_flow_table;
+ goto err_free_stats_ctx_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -447,6 +540,8 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_stats_ctx_table:
+ rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table:
rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
@@ -461,6 +556,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->stats_ctx_table,
+ nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 450d7296fd57..987ae221f6be 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,34 @@
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
+
+#define NFP_FLOWER_MERGE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_TP | \
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
+
+#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_IPV4)
+
+struct nfp_flower_merge_check {
+ union {
+ struct {
+ __be16 tci;
+ struct nfp_flower_mac_mpls l2;
+ struct nfp_flower_tp_ports l4;
+ union {
+ struct nfp_flower_ipv4 ipv4;
+ struct nfp_flower_ipv6 ipv6;
+ };
+ };
+ unsigned long vals[8];
+ };
+};
static int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
@@ -100,9 +126,9 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
return 0;
}
-static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
+static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
@@ -110,14 +136,25 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
+static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
+}
+
static int
-nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size)
+nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+ u32 *key_layer_two, int *key_size,
+ struct netlink_ext_ack *extack)
{
- if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
+ }
- if (enc_opts->key->len > 0) {
+ if (enc_opts->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options);
}
@@ -126,13 +163,65 @@ nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
}
static int
+nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
+ struct flow_dissector_key_enc_opts *enc_op,
+ u32 *key_layer_two, u8 *key_layer, int *key_size,
+ struct nfp_flower_priv *priv,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ switch (enc_ports->dst) {
+ case htons(IANA_VXLAN_UDP_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ *key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (enc_op) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case htons(GENEVE_UDP_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
+ return -EOPNOTSUPP;
+ }
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (!enc_op)
+ break;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
+ return -EOPNOTSUPP;
+ }
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
+ key_size, extack);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
- struct tc_cls_flower_offload *flow,
- enum nfp_flower_tun_type *tun_type)
+ struct flow_cls_offload *flow,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
@@ -141,14 +230,18 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
int key_size;
int err;
- if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
+ if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
return -EOPNOTSUPP;
+ }
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
- != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
return -EOPNOTSUPP;
+ }
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT;
@@ -166,8 +259,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_vlan(rule, &vlan);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
- vlan.key->vlan_priority)
+ vlan.key->vlan_priority) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
return -EOPNOTSUPP;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -178,56 +273,68 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_control(rule, &enc_ctl);
- if (enc_ctl.mask->addr_type != 0xffff ||
- enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ if (enc_ctl.mask->addr_type != 0xffff) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
+ }
+ if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
+ return -EOPNOTSUPP;
+ }
/* These fields are already verified as used. */
flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
- return -EOPNOTSUPP;
-
- flow_rule_match_enc_ports(rule, &enc_ports);
- if (enc_ports.mask->dst != cpu_to_be16(~0))
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
return -EOPNOTSUPP;
+ }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
- switch (enc_ports.key->dst) {
- case htons(NFP_FL_VXLAN_PORT):
- *tun_type = NFP_FL_TUNNEL_VXLAN;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (enc_op.key)
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ /* check if GRE, which has no enc_ports */
+ if (netif_is_gretap(netdev)) {
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
- break;
- case htons(NFP_FL_GENEVE_PORT):
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ }
+ } else {
+ flow_rule_match_enc_ports(rule, &enc_ports);
+ if (enc_ports.mask->dst != cpu_to_be16(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_GENEVE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
- if (!enc_op.key)
- break;
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
- return -EOPNOTSUPP;
- err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
- &key_size);
+ err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
+ enc_op.key,
+ &key_layer_two,
+ &key_layer,
+ &key_size, priv,
+ tun_type, extack);
if (err)
return err;
- break;
- default:
- return -EOPNOTSUPP;
- }
- /* Ensure the ingress netdev matches the expected tun type. */
- if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
- return -EOPNOTSUPP;
+ /* Ensure the ingress netdev matches the expected
+ * tun type.
+ */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
+ return -EOPNOTSUPP;
+ }
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
@@ -250,6 +357,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
* because we rely on it to get to the host.
*/
case cpu_to_be16(ETH_P_ARP):
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
return -EOPNOTSUPP;
case cpu_to_be16(ETH_P_MPLS_UC):
@@ -265,17 +373,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
default:
- /* Other ethtype - we need check the masks for the
- * remainder of the key to ensure we can offload.
- */
- if (nfp_flower_check_higher_than_mac(flow))
- return -EOPNOTSUPP;
- break;
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
+ return -EOPNOTSUPP;
}
+ } else if (nfp_flower_check_higher_than_mac(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
+ return -EOPNOTSUPP;
}
if (basic.mask && basic.mask->ip_proto) {
- /* Ethernet type is present in the key. */
switch (basic.key->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
@@ -285,14 +391,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_layer |= NFP_FLOWER_LAYER_TP;
key_size += sizeof(struct nfp_flower_tp_ports);
break;
- default:
- /* Other ip proto - we need check the masks for the
- * remainder of the key to ensure we can offload.
- */
- return -EOPNOTSUPP;
}
}
+ if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
+ nfp_flower_check_higher_than_l3(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
+ return -EOPNOTSUPP;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_match_tcp tcp;
u32 tcp_flags;
@@ -300,22 +407,28 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_tcp(rule, &tcp);
tcp_flags = be16_to_cpu(tcp.key->flags);
- if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
+ if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
return -EOPNOTSUPP;
+ }
/* We only support PSH and URG flags when either
* FIN, SYN or RST is present as well.
*/
if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
- !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
+ !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
return -EOPNOTSUPP;
+ }
/* We need to store TCP flags in the either the IPv4 or IPv6 key
* space, thus we need to ensure we include a IPv4/IPv6 key
* layer if we have not done so already.
*/
- if (!basic.key)
+ if (!basic.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
return -EOPNOTSUPP;
+ }
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
@@ -326,11 +439,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
case cpu_to_be16(ETH_P_IPV6):
- key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
return -EOPNOTSUPP;
}
}
@@ -340,8 +454,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct flow_match_control ctl;
flow_rule_match_control(rule, &ctl);
- if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
+ if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
return -EOPNOTSUPP;
+ }
}
ret_key_ls->key_layer = key_layer;
@@ -376,6 +492,9 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
+ INIT_LIST_HEAD(&flow_pay->linked_flows);
+ flow_pay->in_hw = false;
+ flow_pay->pre_tun_rule.dev = NULL;
return flow_pay;
@@ -388,6 +507,601 @@ err_free_flow:
return NULL;
}
+static int
+nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
+ struct nfp_flower_merge_check *merge,
+ u8 *last_act_id, int *act_out)
+{
+ struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
+ struct nfp_fl_set_ip4_addrs *ipv4_add;
+ struct nfp_fl_set_ipv6_addr *ipv6_add;
+ struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_set_tport *tport;
+ struct nfp_fl_set_eth *eth;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+ u8 act_id = 0;
+ u8 *ports;
+ int i;
+
+ while (act_off < flow->meta.act_len) {
+ a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
+ act_id = a->jump_id;
+
+ switch (act_id) {
+ case NFP_FL_ACTION_OPCODE_OUTPUT:
+ if (act_out)
+ (*act_out)++;
+ break;
+ case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
+ push_vlan = (struct nfp_fl_push_vlan *)a;
+ if (push_vlan->vlan_tci)
+ merge->tci = cpu_to_be16(0xffff);
+ break;
+ case NFP_FL_ACTION_OPCODE_POP_VLAN:
+ merge->tci = cpu_to_be16(0);
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ /* New tunnel header means l2 to l4 can be matched. */
+ eth_broadcast_addr(&merge->l2.mac_dst[0]);
+ eth_broadcast_addr(&merge->l2.mac_src[0]);
+ memset(&merge->l4, 0xff,
+ sizeof(struct nfp_flower_tp_ports));
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
+ eth = (struct nfp_fl_set_eth *)a;
+ for (i = 0; i < ETH_ALEN; i++)
+ merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
+ for (i = 0; i < ETH_ALEN; i++)
+ merge->l2.mac_src[i] |=
+ eth->eth_addr_mask[ETH_ALEN + i];
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
+ ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
+ merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
+ merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
+ ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
+ merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
+ merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
+ ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+ for (i = 0; i < 4; i++)
+ merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
+ ipv6_add->ipv6[i].mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
+ ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+ for (i = 0; i < 4; i++)
+ merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
+ ipv6_add->ipv6[i].mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
+ ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
+ merge->ipv6.ip_ext.ttl |=
+ ipv6_tc_hl_fl->ipv6_hop_limit_mask;
+ merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
+ merge->ipv6.ipv6_flow_label_exthdr |=
+ ipv6_tc_hl_fl->ipv6_label_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_UDP:
+ case NFP_FL_ACTION_OPCODE_SET_TCP:
+ tport = (struct nfp_fl_set_tport *)a;
+ ports = (u8 *)&merge->l4.port_src;
+ for (i = 0; i < 4; i++)
+ ports[i] |= tport->tp_port_mask[i];
+ break;
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_PRE_LAG:
+ case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ if (last_act_id)
+ *last_act_id = act_id;
+
+ return 0;
+}
+
+static int
+nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
+ struct nfp_flower_merge_check *merge,
+ bool extra_fields)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ u8 *mask = flow->mask_data;
+ u8 key_layer, match_size;
+
+ memset(merge, 0, sizeof(struct nfp_flower_merge_check));
+
+ meta_tci = (struct nfp_flower_meta_tci *)mask;
+ key_layer = meta_tci->nfp_flow_key_layer;
+
+ if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
+ return -EOPNOTSUPP;
+
+ merge->tci = meta_tci->tci;
+ mask += sizeof(struct nfp_flower_meta_tci);
+
+ if (key_layer & NFP_FLOWER_LAYER_EXT_META)
+ mask += sizeof(struct nfp_flower_ext_meta);
+
+ mask += sizeof(struct nfp_flower_in_port);
+
+ if (key_layer & NFP_FLOWER_LAYER_MAC) {
+ match_size = sizeof(struct nfp_flower_mac_mpls);
+ memcpy(&merge->l2, mask, match_size);
+ mask += match_size;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_TP) {
+ match_size = sizeof(struct nfp_flower_tp_ports);
+ memcpy(&merge->l4, mask, match_size);
+ mask += match_size;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ match_size = sizeof(struct nfp_flower_ipv4);
+ memcpy(&merge->ipv4, mask, match_size);
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV6) {
+ match_size = sizeof(struct nfp_flower_ipv6);
+ memcpy(&merge->ipv6, mask, match_size);
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2)
+{
+ /* Two flows can be merged if sub_flow2 only matches on bits that are
+ * either matched by sub_flow1 or set by a sub_flow1 action. This
+ * ensures that every packet that hits sub_flow1 and recirculates is
+ * guaranteed to hit sub_flow2.
+ */
+ struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
+ int err, act_out = 0;
+ u8 last_act_id = 0;
+
+ err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
+ true);
+ if (err)
+ return err;
+
+ err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
+ false);
+ if (err)
+ return err;
+
+ err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
+ &last_act_id, &act_out);
+ if (err)
+ return err;
+
+ /* Must only be 1 output action and it must be the last in sequence. */
+ if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+ return -EOPNOTSUPP;
+
+ /* Reject merge if sub_flow2 matches on something that is not matched
+ * on or set in an action by sub_flow1.
+ */
+ err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
+ sub_flow1_merge.vals,
+ sizeof(struct nfp_flower_merge_check) * 8);
+ if (err)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int
+nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
+ bool *tunnel_act)
+{
+ unsigned int act_off = 0, act_len;
+ struct nfp_fl_act_head *a;
+ u8 act_id = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&act_src[act_off];
+ act_len = a->len_lw << NFP_FL_LW_SIZ;
+ act_id = a->jump_id;
+
+ switch (act_id) {
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ if (tunnel_act)
+ *tunnel_act = true;
+ /* fall through */
+ case NFP_FL_ACTION_OPCODE_PRE_LAG:
+ memcpy(act_dst + act_off, act_src + act_off, act_len);
+ break;
+ default:
+ return act_off;
+ }
+
+ act_off += act_len;
+ }
+
+ return act_off;
+}
+
+static int
+nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
+{
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
+ *vlan = (struct nfp_fl_push_vlan *)a;
+ else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+ return -EOPNOTSUPP;
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ /* Ensure any VLAN push also has an egress action. */
+ if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
+{
+ struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
+ tun = (struct nfp_fl_set_ipv4_tun *)a;
+ tun->outer_vlan_tpid = vlan->vlan_tpid;
+ tun->outer_vlan_tci = vlan->vlan_tci;
+
+ return 0;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ /* Return error if no tunnel action is found. */
+ return -EOPNOTSUPP;
+}
+
+static int
+nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2,
+ struct nfp_fl_payload *merge_flow)
+{
+ unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+ struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
+ bool tunnel_act = false;
+ char *merge_act;
+ int err;
+
+ /* The last action of sub_flow1 must be output - do not merge this. */
+ sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
+ sub2_act_len = sub_flow2->meta.act_len;
+
+ if (!sub2_act_len)
+ return -EINVAL;
+
+ if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
+ return -EINVAL;
+
+ /* A shortcut can only be applied if there is a single action. */
+ if (sub1_act_len)
+ merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ else
+ merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
+
+ merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
+ merge_act = merge_flow->action_data;
+
+ /* Copy any pre-actions to the start of merge flow action list. */
+ pre_off1 = nfp_flower_copy_pre_actions(merge_act,
+ sub_flow1->action_data,
+ sub1_act_len, &tunnel_act);
+ merge_act += pre_off1;
+ sub1_act_len -= pre_off1;
+ pre_off2 = nfp_flower_copy_pre_actions(merge_act,
+ sub_flow2->action_data,
+ sub2_act_len, NULL);
+ merge_act += pre_off2;
+ sub2_act_len -= pre_off2;
+
+ /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
+ * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
+ * valid merge.
+ */
+ if (tunnel_act) {
+ char *post_tun_acts = &sub_flow2->action_data[pre_off2];
+
+ err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
+ &post_tun_push_vlan);
+ if (err)
+ return err;
+
+ if (post_tun_push_vlan) {
+ pre_off2 += sizeof(*post_tun_push_vlan);
+ sub2_act_len -= sizeof(*post_tun_push_vlan);
+ }
+ }
+
+ /* Copy remaining actions from sub_flows 1 and 2. */
+ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+
+ if (post_tun_push_vlan) {
+ /* Update tunnel action in merge to include VLAN push. */
+ err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
+ post_tun_push_vlan);
+ if (err)
+ return err;
+
+ merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
+ }
+
+ merge_act += sub1_act_len;
+ memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
+
+ return 0;
+}
+
+/* Flow link code should only be accessed under RTNL. */
+static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
+{
+ list_del(&link->merge_flow.list);
+ list_del(&link->sub_flow.list);
+ kfree(link);
+}
+
+static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
+ if (link->sub_flow.flow == sub_flow) {
+ nfp_flower_unlink_flow(link);
+ return;
+ }
+}
+
+static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ link->merge_flow.flow = merge_flow;
+ list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
+ link->sub_flow.flow = sub_flow;
+ list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
+
+ return 0;
+}
+
+/**
+ * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
+ * @app: Pointer to the APP handle
+ * @sub_flow1: Initial flow matched to produce merge hint
+ * @sub_flow2: Post recirculation flow matched in merge hint
+ *
+ * Combines 2 flows (if valid) to a single flow, removing the initial from hw
+ * and offloading the new, merged flow.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2)
+{
+ struct flow_cls_offload merge_tc_off;
+ struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
+ struct nfp_fl_payload *merge_flow;
+ struct nfp_fl_key_ls merge_key_ls;
+ int err;
+
+ ASSERT_RTNL();
+
+ extack = merge_tc_off.common.extack;
+ if (sub_flow1 == sub_flow2 ||
+ nfp_flower_is_merge_flow(sub_flow1) ||
+ nfp_flower_is_merge_flow(sub_flow2))
+ return -EINVAL;
+
+ err = nfp_flower_can_merge(sub_flow1, sub_flow2);
+ if (err)
+ return err;
+
+ merge_key_ls.key_size = sub_flow1->meta.key_len;
+
+ merge_flow = nfp_flower_allocate_new(&merge_key_ls);
+ if (!merge_flow)
+ return -ENOMEM;
+
+ merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
+ merge_flow->ingress_dev = sub_flow1->ingress_dev;
+
+ memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
+ sub_flow1->meta.key_len);
+ memcpy(merge_flow->mask_data, sub_flow1->mask_data,
+ sub_flow1->meta.mask_len);
+
+ err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
+ if (err)
+ goto err_destroy_merge_flow;
+
+ err = nfp_flower_link_flows(merge_flow, sub_flow1);
+ if (err)
+ goto err_destroy_merge_flow;
+
+ err = nfp_flower_link_flows(merge_flow, sub_flow2);
+ if (err)
+ goto err_unlink_sub_flow1;
+
+ merge_tc_off.cookie = merge_flow->tc_flower_cookie;
+ err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
+ merge_flow->ingress_dev, extack);
+ if (err)
+ goto err_unlink_sub_flow2;
+
+ err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto err_release_metadata;
+
+ err = nfp_flower_xmit_flow(app, merge_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ if (err)
+ goto err_remove_rhash;
+
+ merge_flow->in_hw = true;
+ sub_flow1->in_hw = false;
+
+ return 0;
+
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &merge_flow->fl_node,
+ nfp_flower_table_params));
+err_release_metadata:
+ nfp_modify_flow_metadata(app, merge_flow);
+err_unlink_sub_flow2:
+ nfp_flower_unlink_flows(merge_flow, sub_flow2);
+err_unlink_sub_flow1:
+ nfp_flower_unlink_flows(merge_flow, sub_flow1);
+err_destroy_merge_flow:
+ kfree(merge_flow->action_data);
+ kfree(merge_flow->mask_data);
+ kfree(merge_flow->unmasked_data);
+ kfree(merge_flow);
+ return err;
+}
+
+/**
+ * nfp_flower_validate_pre_tun_rule()
+ * @app: Pointer to the APP handle
+ * @flow: Pointer to NFP flow representation of rule
+ * @extack: Netlink extended ACK report
+ *
+ * Verifies the flow as a pre-tunnel rule.
+ *
+ * Return: negative value on error, 0 if verified.
+ */
+static int
+nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ struct nfp_fl_payload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ struct nfp_flower_mac_mpls *mac;
+ struct nfp_fl_act_head *act;
+ u8 *mask = flow->mask_data;
+ bool vlan = false;
+ int act_offset;
+ u8 key_layer;
+
+ meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
+
+ key_layer = meta_tci->nfp_flow_key_layer;
+ if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
+ return -EOPNOTSUPP;
+ }
+
+ /* Skip fields known to exist. */
+ mask += sizeof(struct nfp_flower_meta_tci);
+ mask += sizeof(struct nfp_flower_in_port);
+
+ /* Ensure destination MAC address is fully matched. */
+ mac = (struct nfp_flower_mac_mpls *)mask;
+ if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
+ return -EOPNOTSUPP;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
+ int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int i;
+
+ mask += sizeof(struct nfp_flower_mac_mpls);
+
+ /* Ensure proto and flags are the only IP layer fields. */
+ for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ if (mask[i] && i != ip_flags && i != ip_proto) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Action must be a single egress or pop_vlan and egress. */
+ act_offset = 0;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ if (vlan) {
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ }
+
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+
+ /* Ensure there are no more actions after egress. */
+ if (act_offset != flow->meta.act_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
@@ -400,15 +1114,17 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
struct nfp_port *port = NULL;
int err;
+ extack = flow->common.extack;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
@@ -417,7 +1133,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
return -ENOMEM;
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
- &tun_type);
+ &tun_type, extack);
if (err)
goto err_free_key_ls;
@@ -428,32 +1144,45 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
}
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
- flow_pay, tun_type);
+ flow_pay, tun_type, extack);
if (err)
goto err_destroy_flow;
- err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
+ err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
+ if (flow_pay->pre_tun_rule.dev) {
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ if (err)
+ goto err_destroy_flow;
+ }
+
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
flow_pay->tc_flower_cookie = flow->cookie;
err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
nfp_flower_table_params);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
goto err_release_metadata;
+ }
- err = nfp_flower_xmit_flow(app, flow_pay,
- NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (flow_pay->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
+ else
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_remove_rhash;
if (port)
port->tc_offload_cnt++;
+ flow_pay->in_hw = true;
+
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -475,6 +1204,75 @@ err_free_key_ls:
return err;
}
+static void
+nfp_flower_remove_merge_flow(struct nfp_app *app,
+ struct nfp_fl_payload *del_sub_flow,
+ struct nfp_fl_payload *merge_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload_link *link, *temp;
+ struct nfp_fl_payload *origin;
+ bool mod = false;
+ int err;
+
+ link = list_first_entry(&merge_flow->linked_flows,
+ struct nfp_fl_payload_link, merge_flow.list);
+ origin = link->sub_flow.flow;
+
+ /* Re-add rule the merge had overwritten if it has not been deleted. */
+ if (origin != del_sub_flow)
+ mod = true;
+
+ err = nfp_modify_flow_metadata(app, merge_flow);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
+ goto err_free_links;
+ }
+
+ if (!mod) {
+ err = nfp_flower_xmit_flow(app, merge_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
+ goto err_free_links;
+ }
+ } else {
+ __nfp_modify_flow_metadata(priv, origin);
+ err = nfp_flower_xmit_flow(app, origin,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
+ origin->in_hw = true;
+ }
+
+err_free_links:
+ /* Clean any links connected with the merged flow. */
+ list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
+ merge_flow.list)
+ nfp_flower_unlink_flow(link);
+
+ kfree(merge_flow->action_data);
+ kfree(merge_flow->mask_data);
+ kfree(merge_flow->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &merge_flow->fl_node,
+ nfp_flower_table_params));
+ kfree_rcu(merge_flow, rcu);
+}
+
+static void
+nfp_flower_del_linked_merge_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link, *temp;
+
+ /* Remove any merge flow formed from the deleted sub_flow. */
+ list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
+ sub_flow.list)
+ nfp_flower_remove_merge_flow(app, sub_flow,
+ link->merge_flow.flow);
+}
+
/**
* nfp_flower_del_offload() - Removes a flow from hardware.
* @app: Pointer to the APP handle
@@ -482,39 +1280,51 @@ err_free_key_ls:
* @flow: TC flower classifier offload structure
*
* Removes a flow from the repeated hash structure and clears the
- * action payload.
+ * action payload. Any flows merged from this are also deleted.
*
* Return: negative value on error, 0 if removed successfully.
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
int err;
+ extack = flow->common.extack;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
- if (!nfp_flow)
+ if (!nfp_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
return -ENOENT;
+ }
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
- goto err_free_flow;
+ goto err_free_merge_flow;
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
- err = nfp_flower_xmit_flow(app, nfp_flow,
- NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
- if (err)
- goto err_free_flow;
+ if (!nfp_flow->in_hw) {
+ err = 0;
+ goto err_free_merge_flow;
+ }
-err_free_flow:
+ if (nfp_flow->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
+ else
+ err = nfp_flower_xmit_flow(app, nfp_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ /* Fall through on error. */
+
+err_free_merge_flow:
+ nfp_flower_del_linked_merge_flows(app, nfp_flow);
if (port)
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
@@ -527,6 +1337,52 @@ err_free_flow:
return err;
}
+static void
+__nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *merge_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload_link *link;
+ struct nfp_fl_payload *sub_flow;
+ u64 pkts, bytes, used;
+ u32 ctx_id;
+
+ ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
+ pkts = priv->stats[ctx_id].pkts;
+ /* Do not cycle subflows if no stats to distribute. */
+ if (!pkts)
+ return;
+ bytes = priv->stats[ctx_id].bytes;
+ used = priv->stats[ctx_id].used;
+
+ /* Reset stats for the merge flow. */
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+
+ /* The merge flow has received stats updates from firmware.
+ * Distribute these stats to all subflows that form the merge.
+ * The stats will collected from TC via the subflows.
+ */
+ list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
+ sub_flow = link->sub_flow.flow;
+ ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
+ priv->stats[ctx_id].pkts += pkts;
+ priv->stats[ctx_id].bytes += bytes;
+ max_t(u64, priv->stats[ctx_id].used, used);
+ }
+}
+
+static void
+nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ /* Get merge flows that the subflow forms to distribute their stats. */
+ list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
+ __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
+}
+
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
@@ -540,19 +1396,27 @@ err_free_flow:
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
+ extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
- if (!nfp_flow)
+ if (!nfp_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
return -EINVAL;
+ }
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
+ /* If request is for a sub_flow, update stats from merged flows. */
+ if (!list_empty(&nfp_flow->linked_flows))
+ nfp_flower_update_merge_stats(app, nfp_flow);
+
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
@@ -565,17 +1429,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower)
+ struct flow_cls_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
@@ -594,28 +1458,53 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
+ type_data);
default:
return -EOPNOTSUPP;
}
}
+static LIST_HEAD(nfp_block_cb_list);
+
static int nfp_flower_setup_tc_block(struct net_device *netdev,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_repr_priv *repr_priv;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ repr_priv = repr->app_priv;
+ repr_priv->block_shared = f->block_shared;
+ f->driver_block_list = &nfp_block_cb_list;
+
switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_flower_setup_tc_block_cb,
- repr, repr, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- nfp_flower_setup_tc_block_cb,
- repr);
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
+ &nfp_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
+ repr, repr, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
@@ -660,7 +1549,7 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
- struct tc_cls_flower_offload *flower = type_data;
+ struct flow_cls_offload *flower = type_data;
if (flower->common.chain_index)
return -EOPNOTSUPP;
@@ -674,19 +1563,37 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
}
}
+static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
- int err;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !nfp_flower_internal_port_can_offload(app, netdev)) ||
+ (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
- case TC_BLOCK_BIND:
+ case FLOW_BLOCK_BIND:
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (cb_priv &&
+ flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
+ cb_priv,
+ &nfp_block_cb_list))
+ return -EBUSY;
+
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
if (!cb_priv)
return -ENOMEM;
@@ -695,26 +1602,31 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
- err = tcf_block_cb_register(f->block,
- nfp_flower_setup_indr_block_cb,
- cb_priv, cb_priv, f->extack);
- if (err) {
+ block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ nfp_flower_setup_indr_tc_release);
+ if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
+ return PTR_ERR(block_cb);
}
- return err;
- case TC_BLOCK_UNBIND:
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
if (!cb_priv)
return -ENOENT;
- tcf_block_cb_unregister(f->block,
- nfp_flower_setup_indr_block_cb,
- cb_priv);
- list_del(&cb_priv->list);
- kfree(cb_priv);
+ block_cb = flow_block_cb_lookup(f->block,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv);
+ if (!block_cb)
+ return -ENOENT;
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
@@ -745,16 +1657,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
return NOTIFY_OK;
if (event == NETDEV_REGISTER) {
- err = __tc_indr_block_cb_register(netdev, app,
- nfp_flower_indr_setup_tc_cb,
- app);
+ err = __flow_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ app);
if (err)
nfp_flower_cmsg_warn(app,
"Indirect block reg failed - %s\n",
netdev->name);
} else if (event == NETDEV_UNREGISTER) {
- __tc_indr_block_cb_unregister(netdev,
- nfp_flower_indr_setup_tc_cb, app);
+ __flow_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb,
+ app);
}
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
new file mode 100644
index 000000000000..124a43dc136a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/math64.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_port.h"
+
+#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
+
+struct nfp_police_cfg_head {
+ __be32 flags_opts;
+ __be32 port;
+};
+
+/* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
+ * See RFC 2698 for more details.
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Flag options |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Port Ingress |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Token Bucket Peak |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Token Bucket Committed |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Peak Burst Size |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Committed Burst Size |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Peak Information Rate |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Committed Information Rate |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_police_config {
+ struct nfp_police_cfg_head head;
+ __be32 bkt_tkn_p;
+ __be32 bkt_tkn_c;
+ __be32 pbs;
+ __be32 cbs;
+ __be32 pir;
+ __be32 cir;
+};
+
+struct nfp_police_stats_reply {
+ struct nfp_police_cfg_head head;
+ __be64 pass_bytes;
+ __be64 pass_pkts;
+ __be64 drop_bytes;
+ __be64 drop_pkts;
+};
+
+static int
+nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *action = &flow->rule->action.entries[0];
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_config *config;
+ struct nfp_repr *repr;
+ struct sk_buff *skb;
+ u32 netdev_port_id;
+ u64 burst, rate;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+
+ if (repr_priv->block_shared) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (repr->port->type != NFP_PORT_VF_PORT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_offload_has_one_action(&flow->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow->common.prio != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
+ return -EOPNOTSUPP;
+ }
+
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ rate = action->police.rate_bytes_ps;
+ burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst),
+ PSCHED_TICKS_PER_SEC);
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.port = cpu_to_be32(netdev_port_id);
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ repr_priv->qos_table.netdev_port_id = netdev_port_id;
+ fl_priv->qos_rate_limiters++;
+ if (fl_priv->qos_rate_limiters == 1)
+ schedule_delayed_work(&fl_priv->qos_stats_work,
+ NFP_FL_QOS_UPDATE);
+
+ return 0;
+}
+
+static int
+nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_config *config;
+ struct nfp_repr *repr;
+ struct sk_buff *skb;
+ u32 netdev_port_id;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+ repr_priv = repr->app_priv;
+
+ if (!repr_priv->qos_table.netdev_port_id) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
+ return -EOPNOTSUPP;
+ }
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Clear all qos associate data for this interface */
+ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
+ fl_priv->qos_rate_limiters--;
+ if (!fl_priv->qos_rate_limiters)
+ cancel_delayed_work_sync(&fl_priv->qos_stats_work);
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.port = cpu_to_be32(netdev_port_id);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
+void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_stats_reply *msg;
+ struct nfp_stat_pair *curr_stats;
+ struct nfp_stat_pair *prev_stats;
+ struct net_device *netdev;
+ struct nfp_repr *repr;
+ u32 netdev_port_id;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ netdev_port_id = be32_to_cpu(msg->head.port);
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
+ if (!netdev)
+ goto exit_unlock_rcu;
+
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+ curr_stats = &repr_priv->qos_table.curr_stats;
+ prev_stats = &repr_priv->qos_table.prev_stats;
+
+ spin_lock_bh(&fl_priv->qos_stats_lock);
+ curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
+ be64_to_cpu(msg->drop_pkts);
+ curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
+ be64_to_cpu(msg->drop_bytes);
+
+ if (!repr_priv->qos_table.last_update) {
+ prev_stats->pkts = curr_stats->pkts;
+ prev_stats->bytes = curr_stats->bytes;
+ }
+
+ repr_priv->qos_table.last_update = jiffies;
+ spin_unlock_bh(&fl_priv->qos_stats_lock);
+
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+static void
+nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
+ u32 netdev_port_id)
+{
+ struct nfp_police_cfg_head *head;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(fl_priv->app,
+ sizeof(struct nfp_police_cfg_head),
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS,
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ head = nfp_flower_cmsg_get_data(skb);
+ memset(head, 0, sizeof(struct nfp_police_cfg_head));
+ head->port = cpu_to_be32(netdev_port_id);
+
+ nfp_ctrl_tx(fl_priv->app->ctrl, skb);
+}
+
+static void
+nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
+{
+ struct nfp_reprs *repr_set;
+ int i;
+
+ rcu_read_lock();
+ repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
+ if (!repr_set)
+ goto exit_unlock_rcu;
+
+ for (i = 0; i < repr_set->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = rcu_dereference(repr_set->reprs[i]);
+ if (netdev) {
+ struct nfp_repr *priv = netdev_priv(netdev);
+ struct nfp_flower_repr_priv *repr_priv;
+ u32 netdev_port_id;
+
+ repr_priv = priv->app_priv;
+ netdev_port_id = repr_priv->qos_table.netdev_port_id;
+ if (!netdev_port_id)
+ continue;
+
+ nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
+ }
+ }
+
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct nfp_flower_priv *fl_priv;
+
+ delayed_work = to_delayed_work(work);
+ fl_priv = container_of(delayed_work, struct nfp_flower_priv,
+ qos_stats_work);
+
+ nfp_flower_stats_rlim_request_all(fl_priv);
+ schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
+}
+
+static int
+nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_stat_pair *curr_stats;
+ struct nfp_stat_pair *prev_stats;
+ u64 diff_bytes, diff_pkts;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+
+ repr_priv = repr->app_priv;
+ if (!repr_priv->qos_table.netdev_port_id) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_bh(&fl_priv->qos_stats_lock);
+ curr_stats = &repr_priv->qos_table.curr_stats;
+ prev_stats = &repr_priv->qos_table.prev_stats;
+ diff_pkts = curr_stats->pkts - prev_stats->pkts;
+ diff_bytes = curr_stats->bytes - prev_stats->bytes;
+ prev_stats->pkts = curr_stats->pkts;
+ prev_stats->bytes = curr_stats->bytes;
+ spin_unlock_bh(&fl_priv->qos_stats_lock);
+
+ flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
+ repr_priv->qos_table.last_update);
+ return 0;
+}
+
+void nfp_flower_qos_init(struct nfp_app *app)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ spin_lock_init(&fl_priv->qos_stats_lock);
+ INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
+}
+
+void nfp_flower_qos_cleanup(struct nfp_app *app)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ cancel_delayed_work_sync(&fl_priv->qos_stats_work);
+}
+
+int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow)
+{
+ struct netlink_ext_ack *extack = flow->common.extack;
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return nfp_flower_install_rate_limiter(app, netdev, flow,
+ extack);
+ case TC_CLSMATCHALL_DESTROY:
+ return nfp_flower_remove_rate_limiter(app, netdev, flow,
+ extack);
+ case TC_CLSMATCHALL_STATS:
+ return nfp_flower_stats_rate_limiter(app, netdev, flow,
+ extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 4d78be4ec4e9..2600ce476d6b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -15,6 +15,24 @@
#define NFP_FL_MAX_ROUTES 32
+#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
+#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
+#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
+
+/**
+ * struct nfp_tun_pre_run_rule - rule matched before decap
+ * @flags: options for the rule offset
+ * @port_idx: index of destination MAC address for the rule
+ * @vlan_tci: VLAN info associated with MAC
+ * @host_ctx_id: stats context of rule to update
+ */
+struct nfp_tun_pre_tun_rule {
+ __be32 flags;
+ __be16 port_idx;
+ __be16 vlan_tci;
+ __be32 host_ctx_id;
+};
+
/**
* struct nfp_tun_active_tuns - periodic message of active tunnels
* @seq: sequence number of the message
@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd {
/**
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
- * @ht_node: Hashtable entry
- * @addr: Offloaded MAC address
- * @index: Offloaded index for given MAC address
- * @ref_count: Number of devs using this MAC address
- * @repr_list: List of reprs sharing this MAC address
+ * @ht_node: Hashtable entry
+ * @addr: Offloaded MAC address
+ * @index: Offloaded index for given MAC address
+ * @ref_count: Number of devs using this MAC address
+ * @repr_list: List of reprs sharing this MAC address
+ * @bridge_count: Number of bridge/internal devs with MAC
*/
struct nfp_tun_offloaded_mac {
struct rhash_head ht_node;
@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac {
u16 index;
int ref_count;
struct list_head repr_list;
+ int bridge_count;
};
static const struct rhashtable_params offloaded_macs_params = {
@@ -162,16 +182,16 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
pay_len = nfp_flower_cmsg_get_data_len(skb);
- if (pay_len != sizeof(struct nfp_tun_active_tuns) +
- sizeof(struct route_ip_info) * count) {
+ if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
return;
}
+ rcu_read_lock();
for (i = 0; i < count; i++) {
ipv4_addr = payload->tun_info[i].ipv4;
port = be32_to_cpu(payload->tun_info[i].egress_port);
- netdev = nfp_app_repr_get(app, port);
+ netdev = nfp_app_dev_get(app, port, NULL);
if (!netdev)
continue;
@@ -183,6 +203,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
neigh_event_send(n, NULL);
neigh_release(n);
}
+ rcu_read_unlock();
}
static int
@@ -270,9 +291,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
+ u32 port_id;
- /* Only offload representor IPv4s for now. */
- if (!nfp_netdev_is_nfp_repr(netdev))
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
return;
memset(&payload, 0, sizeof(struct nfp_tun_neigh));
@@ -290,7 +312,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.src_ipv4 = flow->saddr;
ether_addr_copy(payload.src_addr, netdev->dev_addr);
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
- payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
@@ -326,13 +348,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
flow.daddr = *(__be32 *)n->primary_key;
- /* Only concerned with route changes for representors. */
- if (!nfp_netdev_is_nfp_repr(n->dev))
- return NOTIFY_DONE;
-
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
+ if (!nfp_netdev_is_nfp_repr(n->dev) &&
+ !nfp_flower_internal_port_can_offload(app, n->dev))
+ return NOTIFY_DONE;
+
/* Only concerned with changes to routes already added to NFP. */
if (!nfp_tun_has_route(app, flow.daddr))
return NOTIFY_DONE;
@@ -366,9 +388,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
payload = nfp_flower_cmsg_get_data(skb);
- netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev)
- goto route_fail_warning;
+ goto fail_rcu_unlock;
flow.daddr = payload->ipv4_addr;
flow.flowi4_proto = IPPROTO_UDP;
@@ -378,21 +401,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
rt = ip_route_output_key(dev_net(netdev), &flow);
err = PTR_ERR_OR_ZERO(rt);
if (err)
- goto route_fail_warning;
+ goto fail_rcu_unlock;
#else
- goto route_fail_warning;
+ goto fail_rcu_unlock;
#endif
/* Get the neighbour entry for the lookup */
n = dst_neigh_lookup(&rt->dst, &flow.daddr);
ip_rt_put(rt);
if (!n)
- goto route_fail_warning;
- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
+ goto fail_rcu_unlock;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
neigh_release(n);
+ rcu_read_unlock();
return;
-route_fail_warning:
+fail_rcu_unlock:
+ rcu_read_unlock();
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
@@ -551,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
+ } else if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count++;
}
entry->ref_count++;
@@ -567,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
- nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
- return 0;
+ if (entry->bridge_count ||
+ !nfp_flower_is_supported_bridge(netdev)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
+ netdev, mod);
+ return 0;
+ }
+
+ /* MAC is global but matches need to go to pre_tun table. */
+ nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
}
- /* Assign a global index if non-repr or MAC address is now shared. */
- if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
- if (ida_idx < 0)
- return ida_idx;
+ if (!nfp_mac_idx) {
+ /* Assign a global index if non-repr or MAC is now shared. */
+ if (entry || !port) {
+ ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (ida_idx < 0)
+ return ida_idx;
- nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
- } else {
- nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ nfp_mac_idx =
+ nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
+
+ } else {
+ nfp_mac_idx =
+ nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ }
}
if (!entry) {
@@ -649,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
list_del(&repr_priv->mac_list);
}
+ if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count--;
+
+ if (!entry->bridge_count && entry->ref_count) {
+ u16 nfp_mac_idx;
+
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
+ false)) {
+ nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
+ netdev_name(netdev));
+ return 0;
+ }
+
+ entry->index = nfp_mac_idx;
+ return 0;
+ }
+ }
+
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
@@ -708,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
repr_priv = repr->app_priv;
+ if (repr_priv->on_bridge)
+ return 0;
+
mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
@@ -823,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev));
+ } else if (event == NETDEV_CHANGEUPPER) {
+ /* If a repr is attached to a bridge then tunnel packets
+ * entering the physical port are directed through the bridge
+ * datapath and cannot be directly detunneled. Therefore,
+ * associated offloaded MACs and indexes should not be used
+ * by fw for detunneling.
+ */
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev) ||
+ !nfp_flower_is_supported_bridge(upper))
+ return NOTIFY_OK;
+
+ repr = netdev_priv(netdev);
+ if (repr->app != app)
+ return NOTIFY_OK;
+
+ repr_priv = repr->app_priv;
+
+ if (info->linking) {
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL))
+ nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
+ netdev_name(netdev));
+ repr_priv->on_bridge = true;
+ } else {
+ repr_priv->on_bridge = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return NOTIFY_OK;
+
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_ADD))
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
+ netdev_name(netdev));
+ }
}
return NOTIFY_OK;
}
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_offloaded_mac *mac_entry;
+ struct nfp_tun_pre_tun_rule payload;
+ struct net_device *internal_dev;
+ int err;
+
+ if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
+ return -ENOSPC;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ internal_dev = flow->pre_tun_rule.dev;
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.host_ctx_id = flow->meta.host_ctx_id;
+
+ /* Lookup MAC index for the pre-tunnel rule egress device.
+ * Note that because the device is always an internal port, it will
+ * have a constant global index so does not need to be tracked.
+ */
+ mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
+ internal_dev->dev_addr);
+ if (!mac_entry)
+ return -ENOENT;
+
+ payload.port_idx = cpu_to_be16(mac_entry->index);
+
+ /* Copy mac id and vlan to flow - dev may not exist at delete time. */
+ flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
+ flow->pre_tun_rule.port_idx = payload.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt++;
+
+ return 0;
+}
+
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_pre_tun_rule payload;
+ u32 tmp_flags = 0;
+ int err;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
+ payload.flags = cpu_to_be32(tmp_flags);
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.port_idx = flow->pre_tun_rule.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt--;
+
+ return 0;
+}
+
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index f8d422713705..76d13af46a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm;
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
- * @repr_get: get representor netdev
+ * @dev_get: get representor or internal port representing netdev
*/
struct nfp_app_type {
enum nfp_app_id id;
@@ -143,7 +143,8 @@ struct nfp_app_type {
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
- struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
+ struct net_device *(*dev_get)(struct nfp_app *app, u32 id,
+ bool *redir_egress);
};
/**
@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app)
app->type->sriov_disable(app);
}
-static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
+static inline
+struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id,
+ bool *redir_egress)
{
- if (unlikely(!app || !app->type->repr_get))
+ if (unlikely(!app || !app->type->dev_get))
return NULL;
- return app->type->repr_get(app, id);
+ return app->type->dev_get(app, id, redir_egress);
}
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
@@ -433,6 +436,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev);
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index e9eca99cf493..c50fce42f473 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -144,7 +144,8 @@ nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index,
static int
nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
- u32 size, enum devlink_sb_threshold_type threshold_type)
+ u32 size, enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -354,6 +355,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
{
struct nfp_eth_table_port eth_port;
struct devlink *devlink;
+ const u8 *serial;
+ int serial_len;
int ret;
rtnl_lock();
@@ -362,10 +365,10 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
if (ret)
return ret;
- devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ serial_len = nfp_cpp_serial(port->app->cpp, &serial);
devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
eth_port.label_port, eth_port.is_split,
- eth_port.label_subport);
+ eth_port.label_subport, serial, serial_len);
devlink = priv_to_devlink(app->pf);
@@ -377,13 +380,23 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
devlink_port_unregister(&port->dl_port);
}
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev)
+void nfp_devlink_port_type_eth_set(struct nfp_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+}
+
+void nfp_devlink_port_type_clear(struct nfp_port *port)
{
- struct nfp_app *app;
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev)
+{
+ struct nfp_port *port;
- app = nfp_app_from_netdev(netdev);
- if (!app)
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
return NULL;
- return priv_to_devlink(app->pf);
+ return &port->dl_port;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index f4c8776e42b6..4d282fc56009 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -294,6 +294,9 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
+ if (!pci_get_drvdata(pdev))
+ return -ENOENT;
+
if (num_vfs == 0)
return nfp_pcie_sriov_disable(pdev);
else
@@ -349,7 +352,7 @@ nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name)
err = request_firmware_direct(&fw, name, &pdev->dev);
nfp_info(pf->cpp, " %s: %s\n",
- name, err ? "not found" : "found, loading...");
+ name, err ? "not found" : "found");
if (err)
return NULL;
@@ -427,8 +430,35 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
return nfp_net_fw_request(pdev, pf, fw_name);
}
+static int
+nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp,
+ const char *key, const char *default_val, int max_val,
+ int *value)
+{
+ char hwinfo[64];
+ long hi_val;
+ int err;
+
+ snprintf(hwinfo, sizeof(hwinfo), key);
+ err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo),
+ default_val);
+ if (err)
+ return err;
+
+ err = kstrtol(hwinfo, 0, &hi_val);
+ if (err || hi_val < 0 || hi_val > max_val) {
+ dev_warn(&pdev->dev,
+ "Invalid value '%s' from '%s', ignoring\n",
+ hwinfo, key);
+ err = kstrtol(default_val, 0, &hi_val);
+ }
+
+ *value = hi_val;
+ return err;
+}
+
/**
- * nfp_net_fw_load() - Load the firmware image
+ * nfp_fw_load() - Load the firmware image
* @pdev: PCI Device structure
* @pf: NFP PF Device structure
* @nsp: NFP SP handle
@@ -438,44 +468,107 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
static int
nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
{
- const struct firmware *fw;
+ bool do_reset, fw_loaded = false;
+ const struct firmware *fw = NULL;
+ int err, reset, policy, ifcs = 0;
+ char *token, *ptr;
+ char hwinfo[64];
u16 interface;
- int err;
+
+ snprintf(hwinfo, sizeof(hwinfo), "abi_drv_load_ifc");
+ err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo),
+ NFP_NSP_DRV_LOAD_IFC_DEFAULT);
+ if (err)
+ return err;
interface = nfp_cpp_interface(pf->cpp);
- if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) {
- /* Only Unit 0 should reset or load firmware */
+ ptr = hwinfo;
+ while ((token = strsep(&ptr, ","))) {
+ unsigned long interface_hi;
+
+ err = kstrtoul(token, 0, &interface_hi);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to parse interface '%s': %d\n",
+ token, err);
+ return err;
+ }
+
+ ifcs++;
+ if (interface == interface_hi)
+ break;
+ }
+
+ if (!token) {
dev_info(&pdev->dev, "Firmware will be loaded by partner\n");
return 0;
}
+ err = nfp_get_fw_policy_value(pdev, nsp, "abi_drv_reset",
+ NFP_NSP_DRV_RESET_DEFAULT,
+ NFP_NSP_DRV_RESET_NEVER, &reset);
+ if (err)
+ return err;
+
+ err = nfp_get_fw_policy_value(pdev, nsp, "app_fw_from_flash",
+ NFP_NSP_APP_FW_LOAD_DEFAULT,
+ NFP_NSP_APP_FW_LOAD_PREF, &policy);
+ if (err)
+ return err;
+
fw = nfp_net_fw_find(pdev, pf);
- if (!fw) {
- if (nfp_nsp_has_stored_fw_load(nsp))
- nfp_nsp_load_stored_fw(nsp);
- return 0;
+ do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS ||
+ (fw && reset == NFP_NSP_DRV_RESET_DISK);
+
+ if (do_reset) {
+ dev_info(&pdev->dev, "Soft-resetting the NFP\n");
+ err = nfp_nsp_device_soft_reset(nsp);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to soft reset the NFP: %d\n", err);
+ goto exit_release_fw;
+ }
}
- dev_info(&pdev->dev, "Soft-reset, loading FW image\n");
- err = nfp_nsp_device_soft_reset(nsp);
- if (err < 0) {
- dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n",
- err);
- goto exit_release_fw;
- }
+ if (fw && policy != NFP_NSP_APP_FW_LOAD_FLASH) {
+ if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp))
+ goto exit_release_fw;
- err = nfp_nsp_load_fw(nsp, fw);
- if (err < 0) {
- dev_err(&pdev->dev, "FW loading failed: %d\n", err);
- goto exit_release_fw;
+ err = nfp_nsp_load_fw(nsp, fw);
+ if (err < 0) {
+ dev_err(&pdev->dev, "FW loading failed: %d\n",
+ err);
+ goto exit_release_fw;
+ }
+ dev_info(&pdev->dev, "Finished loading FW image\n");
+ fw_loaded = true;
+ } else if (policy != NFP_NSP_APP_FW_LOAD_DISK &&
+ nfp_nsp_has_stored_fw_load(nsp)) {
+
+ /* Don't propagate this error to stick with legacy driver
+ * behavior, failure will be detected later during init.
+ */
+ if (!nfp_nsp_load_stored_fw(nsp))
+ dev_info(&pdev->dev, "Finished loading stored FW image\n");
+
+ /* Don't flag the fw_loaded in this case since other devices
+ * may reuse the firmware when configured this way
+ */
+ } else {
+ dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n");
}
- dev_info(&pdev->dev, "Finished loading FW image\n");
-
exit_release_fw:
release_firmware(fw);
- return err < 0 ? err : 1;
+ /* We don't want to unload firmware when other devices may still be
+ * dependent on it, which could be the case if there are multiple
+ * devices that could load firmware.
+ */
+ if (fw_loaded && ifcs == 1)
+ pf->unload_fw_on_remove = true;
+
+ return err < 0 ? err : fw_loaded;
}
static void
@@ -593,6 +686,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
struct nfp_pf *pf;
int err;
+ if (pdev->vendor == PCI_VENDOR_ID_NETRONOME &&
+ pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
+ dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
+
err = pci_enable_device(pdev);
if (err < 0)
return err;
@@ -697,7 +794,7 @@ err_net_remove:
err_fw_unload:
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
- if (pf->fw_loaded)
+ if (pf->unload_fw_on_remove)
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
kfree(pf->nspi);
@@ -720,9 +817,13 @@ err_pci_disable:
return err;
}
-static void nfp_pci_remove(struct pci_dev *pdev)
+static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
{
- struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct nfp_pf *pf;
+
+ pf = pci_get_drvdata(pdev);
+ if (!pf)
+ return;
nfp_hwmon_unregister(pf);
@@ -733,7 +834,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
- if (pf->fw_loaded)
+ if (unload_fw && pf->unload_fw_on_remove)
nfp_fw_unload(pf);
destroy_workqueue(pf->wq);
@@ -749,11 +850,22 @@ static void nfp_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void nfp_pci_remove(struct pci_dev *pdev)
+{
+ __nfp_pci_shutdown(pdev, true);
+}
+
+static void nfp_pci_shutdown(struct pci_dev *pdev)
+{
+ __nfp_pci_shutdown(pdev, false);
+}
+
static struct pci_driver nfp_pci_driver = {
.name = nfp_driver_name,
.id_table = nfp_pci_device_ids,
.probe = nfp_pci_probe,
.remove = nfp_pci_remove,
+ .shutdown = nfp_pci_shutdown,
.sriov_configure = nfp_pcie_sriov_configure,
};
@@ -793,6 +905,8 @@ static void __exit nfp_main_exit(void)
module_init(nfp_main_init);
module_exit(nfp_main_exit);
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index b7211f200d22..5d5812fd9317 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -64,6 +64,7 @@ struct nfp_dumpspec {
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded?
+ * @unload_fw_on_remove:Do we need to unload firmware on driver removal?
* @ctrl_vnic: Pointer to the control vNIC if available
* @mip: MIP handle
* @rtbl: RTsym table
@@ -110,6 +111,7 @@ struct nfp_pf {
unsigned int num_vfs;
bool fw_loaded;
+ bool unload_fw_on_remove;
struct nfp_net *ctrl_vnic;
@@ -185,4 +187,7 @@ int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index,
int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type);
+
+int nfp_devlink_params_register(struct nfp_pf *pf);
+void nfp_devlink_params_unregister(struct nfp_pf *pf);
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index be37c2d6151c..250f510b1d21 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -12,11 +12,14 @@
#ifndef _NFP_NET_H_
#define _NFP_NET_H_
+#include <linux/atomic.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -63,7 +66,7 @@
#define NFP_NET_MAX_DMA_BITS 40
/* Default size for MTU and freelist buffer sizes */
-#define NFP_NET_DEFAULT_MTU 1500
+#define NFP_NET_DEFAULT_MTU 1500U
/* Maximum number of bytes prepended to a packet */
#define NFP_NET_MAX_PREPEND 64
@@ -238,7 +241,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -365,6 +368,7 @@ struct nfp_net_rx_ring {
* @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
* @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
* @hw_csum_rx_error: Counter of packets with bad checksums
+ * @hw_tls_rx: Number of packets with TLS decrypted by hardware
* @tx_sync: Seqlock for atomic updates of TX stats
* @tx_pkts: Number of Transmitted packets
* @tx_bytes: Number of Transmitted bytes
@@ -372,6 +376,11 @@ struct nfp_net_rx_ring {
* @hw_csum_tx_inner: Counter of inner TX checksum offload requests
* @tx_gather: Counter of packets with Gather DMA
* @tx_lso: Counter of LSO packets sent
+ * @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW
+ * @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted
+ * by the fallback path because packets came out of order
+ * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
+ * path could not encrypt them
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
@@ -392,7 +401,7 @@ struct nfp_net_r_vector {
struct {
struct tasklet_struct tasklet;
struct sk_buff_head queue;
- struct spinlock lock;
+ spinlock_t lock;
};
};
@@ -408,22 +417,30 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_ok;
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_complete;
+ u64 hw_tls_rx;
+
+ u64 hw_csum_rx_error;
+ u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
u64 tx_bytes;
- u64 hw_csum_tx;
+
+ u64 ____cacheline_aligned_in_smp hw_csum_tx;
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
+ u64 hw_tls_tx;
- u64 hw_csum_rx_error;
- u64 rx_replace_buf_alloc_fail;
+ u64 tls_tx_fallback;
+ u64 tls_tx_no_fallback;
u64 tx_errors;
u64 tx_busy;
+ /* Cold data follows */
+
u32 irq_vector;
irq_handler_t handler;
char name[IFNAMSIZ + 8];
@@ -458,6 +475,7 @@ struct nfp_stat_pair {
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @chained_metadata_format: Firemware will use new metadata format
+ * @ktls_tx: Is kTLS TX enabled?
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -482,6 +500,7 @@ struct nfp_net_dp {
u8 is_vf:1;
u8 chained_metadata_format:1;
+ u8 ktls_tx:1;
u8 rx_dma_dir;
u8 rx_offset;
@@ -539,12 +558,17 @@ struct nfp_net_dp {
* @shared_handler: Handler for shared interrupts
* @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz)
- * @reconfig_lock: Protects HW reconfiguration request regs/machinery
+ * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
+ * @reconfig_sync_present and HW reconfiguration request
+ * regs/machinery from async requests (sync must take
+ * @bar_lock)
* @reconfig_posted: Pending reconfig bits coming from async sources
* @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
+ * @bar_lock: vNIC config BAR access lock, protects: update,
+ * mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -557,6 +581,18 @@ struct nfp_net_dp {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @tlv_caps: Parsed TLV capabilities
+ * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
+ * @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
+ * @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
+ * @ktls_no_space: Counter of firmware rejecting kTLS connection due to
+ * lack of space
+ * @mbox_cmsg: Common Control Message via vNIC mailbox state
+ * @mbox_cmsg.queue: CCM mbox queue of pending messages
+ * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
+ * @mbox_cmsg.workq: CCM mbox work queue for @wait_work and @runq_work
+ * @mbox_cmsg.wait_work: CCM mbox posted msg reconfig wait work
+ * @mbox_cmsg.runq_work: CCM mbox posted msg queue runner work
+ * @mbox_cmsg.tag: CCM mbox message tag allocator
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -615,6 +651,8 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
+ struct semaphore bar_lock;
+
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
u32 tx_coalesce_usecs;
@@ -630,6 +668,22 @@ struct nfp_net {
struct nfp_net_tlv_caps tlv_caps;
+ unsigned int ktls_tx_conn_cnt;
+ unsigned int ktls_rx_conn_cnt;
+
+ atomic64_t ktls_conn_id_gen;
+
+ atomic_t ktls_no_space;
+
+ struct {
+ struct sk_buff_head queue;
+ wait_queue_head_t wq;
+ struct workqueue_struct *workq;
+ struct work_struct wait_work;
+ struct work_struct runq_work;
+ u16 tag;
+ } mbox_cmsg;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -839,6 +893,21 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
spin_unlock_bh(&nn->r_vecs[0].lock);
}
+static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
+{
+ down(&nn->bar_lock);
+}
+
+static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
+{
+ return !down_trylock(&nn->bar_lock);
+}
+
+static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
+{
+ up(&nn->bar_lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
@@ -866,12 +935,17 @@ void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6d1b8816552e..bcdcd6de7dea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -36,14 +36,17 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
+#include <net/tls.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
+#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+#include "crypto/crypto.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -137,20 +140,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
return false;
}
-static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
{
bool timed_out = false;
+ int i;
+
+ /* Poll update field, waiting for NFP to ack the config.
+ * Do an opportunistic wait-busy loop, afterward sleep.
+ */
+ for (i = 0; i < 50; i++) {
+ if (nfp_net_reconfig_check_done(nn, false))
+ return false;
+ udelay(4);
+ }
- /* Poll update field, waiting for NFP to ack the config */
while (!nfp_net_reconfig_check_done(nn, timed_out)) {
- msleep(1);
+ usleep_range(250, 500);
timed_out = time_is_before_eq_jiffies(deadline);
}
+ return timed_out;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+ if (__nfp_net_reconfig_wait(nn, deadline))
+ return -EIO;
+
if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
return -EIO;
- return timed_out ? -EIO : 0;
+ return 0;
}
static void nfp_net_reconfig_timer(struct timer_list *t)
@@ -210,6 +230,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_lock_bh(&nn->reconfig_lock);
+ WARN_ON(nn->reconfig_sync_present);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
@@ -243,7 +264,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
}
/**
- * nfp_net_reconfig() - Reconfigure the firmware
+ * __nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config
*
@@ -253,7 +274,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
@@ -274,8 +295,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
return ret;
}
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+ int ret;
+
+ nn_ctrl_bar_lock(nn);
+ ret = __nfp_net_reconfig(nn, update);
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
+{
+ if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
+ nn_err(nn, "mailbox too small for %u of data (%u)\n",
+ data_size, nn->tlv_caps.mbox_len);
+ return -EIO;
+ }
+
+ nn_ctrl_bar_lock(nn);
+ return 0;
+}
+
/**
- * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
* @nn: NFP Net device to reconfigure
* @mbox_cmd: The value for the mailbox command
*
@@ -283,19 +327,14 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- if (!nfp_net_has_mbox(&nn->tlv_caps)) {
- nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
- return -EIO;
- }
-
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
- ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
nn_err(nn, "Mailbox update error\n");
return ret;
@@ -304,6 +343,33 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
+{
+ u32 mbox = nn->tlv_caps.mbox_off;
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
+
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
+}
+
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
+{
+ u32 mbox = nn->tlv_caps.mbox_off;
+
+ nfp_net_reconfig_wait_posted(nn);
+
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
+}
+
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
+{
+ int ret;
+
+ ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
+ nn_ctrl_bar_unlock(nn);
+ return ret;
+}
+
/* Interrupt configuration and handling
*/
@@ -756,6 +822,100 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp,
u64_stats_update_end(&r_vec->tx_sync);
}
+static struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
+{
+#ifdef CONFIG_TLS_DEVICE
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct sk_buff *nskb;
+ bool resync_pending;
+ u32 datalen, seq;
+
+ if (likely(!dp->ktls_tx))
+ return skb;
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ return skb;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ resync_pending = tls_offload_tx_resync_pending(skb->sk);
+ if (unlikely(resync_pending || ntls->next_seq != seq)) {
+ /* Pure ACK out of order already */
+ if (!datalen)
+ return skb;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ nskb = tls_encrypt_skb(skb);
+ if (!nskb) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_no_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NULL;
+ }
+ /* encryption wasn't necessary */
+ if (nskb == skb)
+ return skb;
+ /* we don't re-check ring space */
+ if (unlikely(skb_is_nonlinear(nskb))) {
+ nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(nskb);
+ return NULL;
+ }
+
+ /* jump forward, a TX may have gotten lost, need to sync TX */
+ if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
+ tls_offload_tx_resync_request(nskb->sk, seq,
+ ntls->next_seq);
+
+ *nr_frags = 0;
+ return nskb;
+ }
+
+ if (datalen) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (!skb_is_gso(skb))
+ r_vec->hw_tls_tx++;
+ else
+ r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ ntls->next_seq += datalen;
+#endif
+ return skb;
+}
+
+static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
+{
+#ifdef CONFIG_TLS_DEVICE
+ struct nfp_net_tls_offload_ctx *ntls;
+ u32 datalen, seq;
+
+ if (!tls_handle)
+ return;
+ if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
+ return;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ if (ntls->next_seq == seq + datalen)
+ ntls->next_seq = seq;
+ else
+ WARN_ON_ONCE(1);
+#endif
+}
+
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb();
@@ -763,24 +923,47 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
-static int nfp_net_prep_port_id(struct sk_buff *skb)
+static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ u32 meta_id = 0;
+ int md_bytes;
- if (likely(!md_dst))
- return 0;
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ if (likely(!md_dst && !tls_handle))
return 0;
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+ if (!tls_handle)
+ return 0;
+ md_dst = NULL;
+ }
+
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
- if (unlikely(skb_cow_head(skb, 8)))
+ if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- data = skb_push(skb, 8);
- put_unaligned_be32(NFP_NET_META_PORTID, data);
- put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
+ meta_id = 0;
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= 4;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= 8;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+
+ data -= 4;
+ put_unaligned_be32(meta_id, data);
- return 8;
+ return md_bytes;
}
/**
@@ -793,7 +976,7 @@ static int nfp_net_prep_port_id(struct sk_buff *skb)
static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
@@ -803,6 +986,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
+ u64 tls_handle = 0;
u16 qidx;
dp = &nn->dp;
@@ -824,18 +1008,21 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- md_bytes = nfp_net_prep_port_id(skb);
- if (unlikely(md_bytes < 0)) {
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
nfp_net_tx_xmit_more_flush(tx_ring);
- dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
+
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
- goto err_free;
+ goto err_dma_err;
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
@@ -909,7 +1096,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -931,12 +1118,14 @@ err_unmap:
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
-err_free:
+err_dma_err:
nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
+ nfp_net_tls_tx_undo(skb, tls_handle);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -967,7 +1156,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct nfp_net_tx_buf *tx_buf;
struct sk_buff *skb;
int fidx, nr_frags;
@@ -1082,7 +1271,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct netdev_queue *nd_q;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
@@ -1635,6 +1824,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
+ bool redir_egress = false;
struct net_device *netdev;
dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0;
@@ -1770,13 +1960,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net *nn;
nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
continue;
}
- nfp_repr_inc_rx_stats(netdev, pkt_len);
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
}
skb = build_skb(rxbuf->frag, true_bufsz);
@@ -1805,13 +1998,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
+#ifdef CONFIG_TLS_DEVICE
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+ skb->decrypted = true;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_tls_rx++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+#endif
+
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
}
if (xdp_prog) {
@@ -3111,7 +3320,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
static int
nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
/* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets
@@ -3119,17 +3330,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+ if (err)
+ return err;
+
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q);
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static int
nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
/* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets
@@ -3137,11 +3354,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+ if (err)
+ return err;
+
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q);
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static void nfp_net_stat64(struct net_device *netdev,
@@ -3324,8 +3545,11 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
struct nfp_net *nn = netdev_priv(netdev);
int n;
+ /* If port is defined, devlink_port is registered and devlink core
+ * is taking care of name formatting.
+ */
if (nn->port)
- return nfp_port_get_phys_port_name(netdev, name, len);
+ return -EOPNOTSUPP;
if (nn->dp.is_vf || nn->vnic_no_name)
return -EOPNOTSUPP;
@@ -3517,6 +3741,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_setup_tc = nfp_port_setup_tc,
@@ -3530,8 +3755,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
- .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
/**
@@ -3548,7 +3772,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3564,7 +3788,6 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
- nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
@@ -3632,6 +3855,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+ sema_init(&nn->bar_lock, 1);
+
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
@@ -3642,6 +3867,10 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
if (err)
goto err_free_nn;
+ err = nfp_ccm_mbox_alloc(nn);
+ if (err)
+ goto err_free_nn;
+
return nn;
err_free_nn:
@@ -3659,6 +3888,8 @@ err_free_nn:
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
+ nfp_ccm_mbox_free(nn);
+
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3886,14 +4117,7 @@ int nfp_net_init(struct nfp_net *nn)
/* Set default MTU and Freelist buffer size */
if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
- if (nn->app->ctrl_mtu <= nn->max_mtu) {
- nn->dp.mtu = nn->app->ctrl_mtu;
- } else {
- if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX)
- nn_warn(nn, "app requested MTU above max supported %u > %u\n",
- nn->app->ctrl_mtu, nn->max_mtu);
- nn->dp.mtu = nn->max_mtu;
- }
+ nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
} else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
nn->dp.mtu = nn->max_mtu;
} else {
@@ -3920,9 +4144,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- if (nn->dp.netdev)
- nfp_net_netdev_init(nn);
-
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -3935,11 +4156,27 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
+ if (nn->dp.netdev) {
+ nfp_net_netdev_init(nn);
+
+ err = nfp_ccm_mbox_init(nn);
+ if (err)
+ return err;
+
+ err = nfp_net_tls_init(nn);
+ if (err)
+ goto err_clean_mbox;
+ }
+
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
return 0;
return register_netdev(nn->dp.netdev);
+
+err_clean_mbox:
+ nfp_ccm_mbox_clean(nn);
+ return err;
}
/**
@@ -3952,5 +4189,6 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
+ nfp_ccm_mbox_clean(nn);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 6d5213b5bcb0..d835c14b7257 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -99,6 +99,21 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->repr_cap = readl(data);
break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ if (length >= 4)
+ caps->mbox_cmsg_types = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return -EINVAL;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 372adea10e14..ee6b24e4eacd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -44,6 +44,7 @@
#define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
+#define NFP_NET_META_CONN_HANDLE 7
#define NFP_META_PORT_ID_CTRL ~0U
@@ -104,8 +105,6 @@
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
-#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
-#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -130,7 +129,6 @@
#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
-#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
@@ -138,6 +136,7 @@
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */
+#define NFP_NET_CFG_UPDATE_CRYPTO (0x1 << 14) /* Crypto on/off */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -392,12 +391,12 @@
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
-#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
/**
* VLAN filtering using general use mailbox
@@ -470,6 +469,16 @@
* %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
* Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
* can be used on representors.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ * Variable, bitmap of control message types supported by the mailbox handler.
+ * Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are
+ * encapsulated into simple TLVs, with an end TLV and written to the Mailbox.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ * 8 words, bitmaps of supported and enabled crypto operations.
+ * First 16B (4 words) contains a bitmap of supported crypto operations,
+ * and next 16B contain the enabled operations.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -479,6 +488,8 @@
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
+#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
struct device;
@@ -488,20 +499,20 @@ struct device;
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
* @repr_cap: capabilities for representors
+ * @mbox_cmsg_types: cmsgs which can be passed through the mailbox
+ * @crypto_ops: supported crypto operations
+ * @crypto_enable_off: offset of crypto ops enable region
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
u32 repr_cap;
+ u32 mbox_cmsg_types;
+ u32 crypto_ops;
+ unsigned int crypto_enable_off;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps);
-
-static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
-{
- return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
-}
-
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index ab7f2498e1c4..553c708694e8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -159,19 +159,13 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
- return;
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(queues))
- return;
rx = debugfs_create_dir("rx", queues);
tx = debugfs_create_dir("tx", queues);
xdp = debugfs_create_dir("xdp", queues);
- if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp))
- return;
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(name, "%d", i);
@@ -190,16 +184,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{
- struct dentry *dev_dir;
-
- if (IS_ERR_OR_NULL(nfp_dir))
- return NULL;
-
- dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
- if (IS_ERR_OR_NULL(dev_dir))
- return NULL;
-
- return dev_dir;
+ return debugfs_create_dir(pci_name(pdev), nfp_dir);
}
void nfp_net_debugfs_dir_clean(struct dentry **dir)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 690b62718dbb..1b840ee47339 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
+#include <linux/sfp.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
@@ -149,8 +150,11 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_RVEC_GATHER_STATS 9
+#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
+#define NN_CTRL_PATH_STATS 1
+
+#define SFP_SFF_REV_COMPLIANCE 1
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
@@ -420,7 +424,8 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
+ NN_CTRL_PATH_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -439,10 +444,16 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
+ data = nfp_pr_et(data, "rx_tls_decrypted_packets");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
+ data = nfp_pr_et(data, "tx_tls_encrypted_packets");
+ data = nfp_pr_et(data, "tx_tls_ooo");
+ data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
+
+ data = nfp_pr_et(data, "hw_tls_no_space");
return data;
}
@@ -465,16 +476,20 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
+ tmp[5] = nn->r_vecs[i].hw_tls_rx;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
- tmp[5] = nn->r_vecs[i].hw_csum_tx;
- tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
- tmp[7] = nn->r_vecs[i].tx_gather;
- tmp[8] = nn->r_vecs[i].tx_lso;
+ tmp[6] = nn->r_vecs[i].hw_csum_tx;
+ tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
+ tmp[8] = nn->r_vecs[i].tx_gather;
+ tmp[9] = nn->r_vecs[i].tx_lso;
+ tmp[10] = nn->r_vecs[i].hw_tls_tx;
+ tmp[11] = nn->r_vecs[i].tls_tx_fallback;
+ tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -486,6 +501,8 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
+ *data++ = atomic_read(&nn->ktls_no_space);
+
return data;
}
@@ -1096,6 +1113,130 @@ nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
buffer);
}
+static int
+nfp_port_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ unsigned int read_len;
+ struct nfp_nsp *nsp;
+ int err = 0;
+ u8 data;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ switch (eth_port->interface) {
+ case NFP_INTERFACE_SFP:
+ case NFP_INTERFACE_SFP28:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF8472_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (!data) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF_REV_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (data < 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Unsupported module 0x%x detected\n",
+ eth_port->interface);
+ err = -EINVAL;
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_port_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ eeprom->offset, data, eeprom->len,
+ &eeprom->len);
+ if (err < 0) {
+ if (eeprom->len) {
+ netdev_warn(netdev,
+ "Incomplete read from module EEPROM: %d\n",
+ err);
+ err = 0;
+ } else {
+ netdev_err(netdev,
+ "Reading from module EEPROM failed: %d\n",
+ err);
+ }
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static int nfp_net_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -1253,6 +1394,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
@@ -1272,6 +1415,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 08f5fdbd8e41..921db40047d7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -150,34 +150,39 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nn->id = id;
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ return err;
+ }
+
err = nfp_net_init(nn);
if (err)
- return err;
+ goto err_devlink_port_clean;
nfp_net_debugfs_vnic_add(nn, pf->ddir);
- if (nn->port) {
- err = nfp_devlink_port_register(pf->app, nn->port);
- if (err)
- goto err_dfs_clean;
- }
+ if (nn->port)
+ nfp_devlink_port_type_eth_set(nn->port);
nfp_net_info(nn);
if (nfp_net_is_data_vnic(nn)) {
err = nfp_app_vnic_init(pf->app, nn);
if (err)
- goto err_devlink_port_clean;
+ goto err_devlink_port_type_clean;
}
return 0;
-err_devlink_port_clean:
+err_devlink_port_type_clean:
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
-err_dfs_clean:
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+err_devlink_port_clean:
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
return err;
}
@@ -200,10 +205,8 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
/* Kill the vNIC if app init marked it as invalid */
- if (nn->port && nn->port->type == NFP_PORT_INVALID) {
+ if (nn->port && nn->port->type == NFP_PORT_INVALID)
nfp_net_pf_free_vnic(pf, nn);
- continue;
- }
}
if (list_empty(&pf->vnics))
@@ -221,9 +224,11 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
if (nfp_net_is_data_vnic(nn))
nfp_app_vnic_clean(pf->app, nn);
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
}
static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
@@ -704,6 +709,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_devlink_unreg;
+ err = nfp_devlink_params_register(pf);
+ if (err)
+ goto err_shared_buf_unreg;
+
mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
@@ -737,6 +746,8 @@ err_free_vnics:
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock);
+ nfp_devlink_params_unregister(pf);
+err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
@@ -766,6 +777,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
mutex_unlock(&pf->lock);
+ nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
devlink_unregister(priv_to_devlink(pf));
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 94d228c04496..79d72c88bbef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -267,13 +267,14 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
void
@@ -298,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr)
nfp_port_free(repr->port);
}
-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
-
-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
-}
-
-static void nfp_repr_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
-}
-
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
@@ -323,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
- nfp_repr_set_lockdep_class(netdev);
-
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index b6ec46ed0540..3fdaaf8ed2ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2017 Netronome Systems, Inc. */
+/* Copyright (C) 2017-2019 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/errno.h>
@@ -146,6 +146,30 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
"spoofchk");
}
+int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ unsigned int vf_offset;
+ u8 vf_ctrl;
+ int err;
+
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
+ "trust");
+ if (err)
+ return err;
+
+ /* Write trust control bit to VF entry in VF config symbol */
+ vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ +
+ NFP_NET_VF_CFG_CTRL;
+ vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset);
+ vf_ctrl &= ~NFP_NET_VF_CFG_CTRL_TRUST;
+ vf_ctrl |= FIELD_PREP(NFP_NET_VF_CFG_CTRL_TRUST, enable);
+ writeb(vf_ctrl, app->pf->vfcfg_tbl2 + vf_offset);
+
+ return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_TRUST,
+ "trust");
+}
+
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state)
{
@@ -213,6 +237,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
+ ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index c9f09c5bb5ee..a3db0cbf6425 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-/* Copyright (C) 2017 Netronome Systems, Inc. */
+/* Copyright (C) 2017-2019 Netronome Systems, Inc. */
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
@@ -19,12 +19,14 @@
#define NFP_NET_VF_CFG_MB_CAP_VLAN (0x1 << 1)
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
+#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
#define NFP_NET_VF_CFG_MB_UPD_VLAN (0x1 << 1)
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
+#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -35,6 +37,7 @@
#define NFP_NET_VF_CFG_MAC_HI 0x0
#define NFP_NET_VF_CFG_MAC_LO 0x6
#define NFP_NET_VF_CFG_CTRL 0x4
+#define NFP_NET_VF_CFG_CTRL_TRUST 0x8
#define NFP_NET_VF_CFG_CTRL_SPOOF 0x4
#define NFP_NET_VF_CFG_CTRL_LINK_STATE 0x3
#define NFP_NET_VF_CFG_LS_MODE_AUTO 0
@@ -48,6 +51,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state);
int nfp_app_get_vf_config(struct net_device *netdev, int vf,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 1145849ca7ba..e4977cdf7678 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -282,8 +282,14 @@ err_free_vf:
static void nfp_netvf_pci_remove(struct pci_dev *pdev)
{
- struct nfp_net_vf *vf = pci_get_drvdata(pdev);
- struct nfp_net *nn = vf->nn;
+ struct nfp_net_vf *vf;
+ struct nfp_net *nn;
+
+ vf = pci_get_drvdata(pdev);
+ if (!vf)
+ return;
+
+ nn = vf->nn;
/* Note, the order is slightly different from above as we need
* to keep the nn pointer around till we have freed everything.
@@ -317,4 +323,5 @@ struct pci_driver nfp_netvf_pci_driver = {
.id_table = nfp_netvf_pci_device_ids,
.probe = nfp_netvf_pci_probe,
.remove = nfp_netvf_pci_remove,
+ .shutdown = nfp_netvf_pci_remove,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 90ae053f5c07..d7fd203bb180 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -131,6 +131,8 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
+void nfp_devlink_port_type_eth_set(struct nfp_port *port);
+void nfp_devlink_port_type_clear(struct nfp_port *port);
/**
* Mac stats (0x0000 - 0x0200)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile
deleted file mode 100644
index 805fa28f391a..000000000000
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile
deleted file mode 100644
index 805fa28f391a..000000000000
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 3cfecf105bde..85734c6badf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -24,8 +24,9 @@
/* NFP6000 PL */
#define NFP_PL_DEVICE_ID 0x00000004
#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0)
-
-#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144
+#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16)
+#define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \
+ NFP_PL_DEVICE_ID_MASK)
/**
* nfp_cpp_readl() - Read a u32 word from a CPP location
@@ -120,22 +121,17 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
*/
int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
{
- const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
u32 reg;
int err;
- err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model);
- if (err < 0)
- return err;
-
- /* The PL's PluDeviceID revision code is authoratative */
- *model &= ~0xff;
err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID,
&reg);
if (err < 0)
return err;
- *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10;
+ *model = reg & NFP_PL_DEVICE_MODEL_MASK;
+ if (*model & NFP_PL_DEVICE_ID_MASK)
+ *model -= 0x10;
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 3a4e224a64b7..f18e787fa9ad 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -79,6 +79,8 @@
#define NFP_VERSIONS_NCSI_OFF 22
#define NFP_VERSIONS_CFGR_OFF 26
+#define NSP_SFF_EEPROM_BLOCK_LEN 8
+
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
@@ -94,7 +96,10 @@ enum nfp_nsp_cmd {
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */
SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */
+ SPCODE_HWINFO_SET = 18, /* Set HWinfo entry */
+ SPCODE_FW_LOADED = 19, /* Is application firmware loaded */
SPCODE_VERSIONS = 21, /* Report FW versions */
+ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
};
struct nfp_nsp_dma_buf {
@@ -140,6 +145,8 @@ struct nfp_nsp {
* @option: NFP SP Command Argument
* @buf: NFP SP Buffer Address
* @error_cb: Callback for interpreting option if error occurred
+ * @error_quiet:Don't print command error/warning. Protocol errors are still
+ * logged.
*/
struct nfp_nsp_command_arg {
u16 code;
@@ -148,6 +155,7 @@ struct nfp_nsp_command_arg {
u32 option;
u64 buf;
void (*error_cb)(struct nfp_nsp *state, u32 ret_val);
+ bool error_quiet;
};
/**
@@ -238,11 +246,16 @@ static int nfp_nsp_check(struct nfp_nsp *state)
state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
- if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ if (state->ver.major != NSP_MAJOR) {
nfp_err(cpp, "Unsupported ABI %hu.%hu\n",
state->ver.major, state->ver.minor);
return -EINVAL;
}
+ if (state->ver.minor < NSP_MINOR) {
+ nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n",
+ NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR);
+ return -EINVAL;
+ }
if (reg & NSP_STATUS_BUSY) {
nfp_err(cpp, "Service processor busy!\n");
@@ -397,8 +410,10 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg)
err = FIELD_GET(NSP_STATUS_RESULT, reg);
if (err) {
- nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n",
- -err, (int)ret_val, arg->code);
+ if (!arg->error_quiet)
+ nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n",
+ -err, (int)ret_val, arg->code);
+
if (arg->error_cb)
arg->error_cb(state, ret_val);
else
@@ -883,12 +898,14 @@ int nfp_nsp_load_stored_fw(struct nfp_nsp *state)
}
static int
-__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size)
+__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size,
+ bool optional)
{
struct nfp_nsp_command_buf_arg hwinfo_lookup = {
{
.code = SPCODE_HWINFO_LOOKUP,
.option = size,
+ .error_quiet = optional,
},
.in_buf = buf,
.in_size = size,
@@ -905,7 +922,7 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size)
size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE);
- err = __nfp_nsp_hwinfo_lookup(state, buf, size);
+ err = __nfp_nsp_hwinfo_lookup(state, buf, size, false);
if (err)
return err;
@@ -917,6 +934,66 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size)
return 0;
}
+int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf,
+ unsigned int size, const char *default_val)
+{
+ int err;
+
+ /* Ensure that the default value is usable irrespective of whether
+ * it is actually going to be used.
+ */
+ if (strnlen(default_val, size) == size)
+ return -EINVAL;
+
+ if (!nfp_nsp_has_hwinfo_lookup(state)) {
+ strcpy(buf, default_val);
+ return 0;
+ }
+
+ size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE);
+
+ err = __nfp_nsp_hwinfo_lookup(state, buf, size, true);
+ if (err) {
+ if (err == -ENOENT) {
+ strcpy(buf, default_val);
+ return 0;
+ }
+
+ nfp_err(state->cpp, "NSP HWinfo lookup failed: %d\n", err);
+ return err;
+ }
+
+ if (strnlen(buf, size) == size) {
+ nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg hwinfo_set = {
+ {
+ .code = SPCODE_HWINFO_SET,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &hwinfo_set);
+}
+
+int nfp_nsp_fw_loaded(struct nfp_nsp *state)
+{
+ const struct nfp_nsp_command_arg arg = {
+ .code = SPCODE_FW_LOADED,
+ };
+
+ return __nfp_nsp_command(state, &arg);
+}
+
int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size)
{
struct nfp_nsp_command_buf_arg versions = {
@@ -965,3 +1042,62 @@ const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
return (const char *)&buf[buf_off];
}
+
+static int
+__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg module_eeprom = {
+ {
+ .code = SPCODE_READ_SFF_EEPROM,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &module_eeprom);
+}
+
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len)
+{
+ struct eeprom_buf {
+ u8 metalen;
+ __le16 length;
+ __le16 offset;
+ __le16 readlen;
+ u8 eth_index;
+ u8 data[0];
+ } __packed *buf;
+ int bufsz, ret;
+
+ BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8);
+
+ /* Buffer must be large enough and rounded to the next block size. */
+ bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN));
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->metalen =
+ offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN;
+ buf->length = cpu_to_le16(len);
+ buf->offset = cpu_to_le16(offset);
+ buf->eth_index = eth_index;
+
+ ret = __nfp_nsp_module_eeprom(state, buf, bufsz);
+
+ *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen));
+ if (*read_len)
+ memcpy(data, buf->data, *read_len);
+
+ if (!ret && *read_len < len)
+ ret = -EIO;
+
+ kfree(buf);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index bd9c358c646f..1531c1870020 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -22,6 +22,13 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
int nfp_nsp_load_stored_fw(struct nfp_nsp *state);
int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf,
+ unsigned int size, const char *default_val);
+int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_fw_loaded(struct nfp_nsp *state);
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
{
@@ -38,11 +45,26 @@ static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 24;
}
+static inline bool nfp_nsp_has_hwinfo_set(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 25;
+}
+
+static inline bool nfp_nsp_has_fw_loaded(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 25;
+}
+
static inline bool nfp_nsp_has_versions(struct nfp_nsp *state)
{
return nfp_nsp_get_abi_ver_minor(state) > 27;
}
+static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 28;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
@@ -80,6 +102,21 @@ enum nfp_eth_fec {
#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT)
+/* Defines the valid values of the 'abi_drv_reset' hwinfo key */
+#define NFP_NSP_DRV_RESET_DISK 0
+#define NFP_NSP_DRV_RESET_ALWAYS 1
+#define NFP_NSP_DRV_RESET_NEVER 2
+#define NFP_NSP_DRV_RESET_DEFAULT "0"
+
+/* Defines the valid values of the 'app_fw_from_flash' hwinfo key */
+#define NFP_NSP_APP_FW_LOAD_DISK 0
+#define NFP_NSP_APP_FW_LOAD_FLASH 1
+#define NFP_NSP_APP_FW_LOAD_PREF 2
+#define NFP_NSP_APP_FW_LOAD_DEFAULT "2"
+
+/* Define the default value for the 'abi_drv_load_ifc' key */
+#define NFP_NSP_DRV_LOAD_IFC_DEFAULT "0x10ff"
+
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
diff --git a/drivers/net/ethernet/netronome/nfp/nic/Makefile b/drivers/net/ethernet/netronome/nfp/nic/Makefile
deleted file mode 100644
index 805fa28f391a..000000000000
--- a/drivers/net/ethernet/netronome/nfp/nic/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# kbuild requires Makefile in a directory to build individual objects
OpenPOWER on IntegriCloud