summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp/flower
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/flower')
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h71
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c156
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c10
8 files changed, 397 insertions, 194 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c1c595f8bb87..b3567a596fc1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
return false;
}
@@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
u16 tmp_flags;
- int ifindex;
output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- ifindex = tcf_mirred_ifindex(action);
- out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+ out_dev = tcf_mirred_dev(action);
if (!out_dev)
return -EOPNOTSUPP;
@@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
return 0;
}
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+ const struct tc_action *action)
{
struct ip_tunnel_info *tun = tcf_tunnel_info(action);
-
- return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+ struct nfp_flower_priv *priv = app->priv;
+
+ switch (tun->key.tp_dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ return NFP_FL_TUNNEL_VXLAN;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+ return NFP_FL_TUNNEL_GENEVE;
+ /* FALLTHROUGH */
+ default:
+ return NFP_FL_TUNNEL_NONE;
+ }
}
static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
- const struct tc_action *action,
- struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type)
{
- struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
- size_t act_size = sizeof(struct nfp_fl_set_vxlan);
- u32 tmp_set_vxlan_type_index = 0;
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- if (vxlan->options_len) {
- /* Do not support options e.g. vxlan gpe. */
+ if (ip_tun->options_len)
return -EOPNOTSUPP;
- }
- set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
- set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
- tmp_set_vxlan_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ tmp_set_ip_tun_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
- set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
- set_vxlan->tun_id = vxlan->key.tun_id;
- set_vxlan->tun_flags = vxlan->key.tun_flags;
- set_vxlan->ipv4_ttl = vxlan->key.ttl;
- set_vxlan->ipv4_tos = vxlan->key.tos;
+ set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+ set_tun->tun_id = ip_tun->key.tun_id;
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
return 0;
}
@@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
- struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
- } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ } else if (is_tcf_tunnel_set(a)) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ if (*tun_type == NFP_FL_TUNNEL_NONE)
+ return -EOPNOTSUPP;
+
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_VXLAN;
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ set_tun = (void *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
if (err)
return err;
-
- *a_len += sizeof(struct nfp_fl_set_vxlan);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
} else if (is_tcf_tunnel_release(a)) {
/* Tunnel decap is handled by default so accept action. */
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index e98bb9cdb6a3..baaea6f1a9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
return 0;
}
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+ struct nfp_flower_cmsg_portreify *msg;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+ msg->reserved = 0;
+ msg->info = cpu_to_be16(exists);
+
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
static void
nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
{
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_portreify *msg;
+ bool exists;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ rcu_read_unlock();
+ if (!exists) {
+ nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+ be32_to_cpu(msg->portnum));
+ return;
+ }
+
+ atomic_inc(&priv->reify_replies);
+ wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -168,20 +211,14 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
- nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
- cmsg_hdr->version);
- goto out;
- }
-
type = cmsg_hdr->type;
switch (type) {
+ case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
- case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
- nfp_flower_rx_flow_stats(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
@@ -217,7 +254,23 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work)
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_hdr *cmsg_hdr;
+
+ cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- skb_queue_tail(&priv->cmsg_skbs, skb);
- schedule_work(&priv->cmsg_work);
+ if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+ nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+ cmsg_hdr->version);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
+ /* We need to deal with stats updates from HW asap */
+ nfp_flower_rx_flow_stats(app, skb);
+ dev_consume_skb_any(skb);
+ } else {
+ skb_queue_tail(&priv->cmsg_skbs, skb);
+ schedule_work(&priv->cmsg_work);
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 66070741d55f..adfe474c2cf0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -41,7 +41,7 @@
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
-#define NFP_FLOWER_LAYER_META BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
#define NFP_FLOWER_LAYER_MAC BIT(2)
#define NFP_FLOWER_LAYER_TP BIT(3)
@@ -50,8 +50,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
-#define NFP_FLOWER_LAYER_ETHER BIT(3)
-#define NFP_FLOWER_LAYER_ARP BIT(4)
+#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -108,6 +107,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
NFP_FL_TUNNEL_VXLAN = 2,
+ NFP_FL_TUNNEL_GENEVE = 4,
};
struct nfp_fl_act_head {
@@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan {
__be16 reserved;
};
-/* Metadata without L2 (1W/4B)
- * ----------------------------------------------------------------
- * 3 2 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | key_layers | mask_id | reserved |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-struct nfp_flower_meta_one {
- u8 nfp_flow_key_layer;
- u8 mask_id;
- u16 reserved;
-};
-
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
struct nfp_fl_act_head head;
__be16 reserved;
- __be64 tun_id;
+ __be64 tun_id __packed;
__be32 tun_type_index;
- __be16 tun_flags;
- u8 ipv4_ttl;
- u8 ipv4_tos;
- __be32 extra[2];
-} __packed;
+ __be32 extra[3];
+};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
@@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan {
* NOTE: | TCI |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_meta_two {
+struct nfp_flower_meta_tci {
u8 nfp_flow_key_layer;
u8 mask_id;
__be16 tci;
};
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | nfp_flow_key_layer2 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+ __be32 nfp_flow_key_layer2;
+};
+
/* Port details (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -313,7 +308,7 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -322,22 +317,17 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | tun_flags | tos | ttl |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | gpe_flags | Reserved | Next Protocol |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VNI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be16 tun_flags;
- u8 tos;
- u8 ttl;
- u8 gpe_flags;
- u8 reserved[2];
- u8 nxt_proto;
+ __be32 reserved[2];
__be32 tun_id;
};
@@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
@@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+ __be32 portnum;
+ u16 reserved;
+ __be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
unsigned int nbi, unsigned int nbi_port,
unsigned int phys_port);
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 8fcc90c0d2d3..742d6f1575b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -98,7 +99,57 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
if (port >= reprs->num_reprs)
return NULL;
- return reprs->reprs[port];
+ return rcu_dereference(reprs->reprs[port]);
+}
+
+static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+ bool exists)
+{
+ struct nfp_reprs *reprs;
+ int i, err, count = 0;
+
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+ if (!reprs)
+ return 0;
+
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ err = nfp_flower_cmsg_portreify(repr, exists);
+ if (err)
+ return err;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (!tot_repl)
+ return 0;
+
+ lockdep_assert_held(&app->pf->lock);
+ err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+ atomic_read(replies) >= tot_repl,
+ msecs_to_jiffies(10));
+ if (err <= 0) {
+ nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -110,7 +161,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
if (err)
return err;
- netif_carrier_on(repr->netdev);
netif_tx_wake_all_queues(repr->netdev);
return 0;
@@ -119,7 +169,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
static int
nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
{
- netif_carrier_off(repr->netdev);
netif_tx_disable(repr->netdev);
return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +189,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
netdev_priv(netdev));
}
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
+ int err;
+
+ atomic_set(replies, 0);
+ err = nfp_flower_cmsg_portreify(repr, false);
+ if (err) {
+ nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+ return;
+ }
+
+ nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +224,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
enum nfp_port_type port_type;
struct nfp_reprs *reprs;
+ int i, err, reify_cnt;
const u8 queue = 0;
- int i, err;
port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
NFP_PORT_VF_PORT;
@@ -170,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
return -ENOMEM;
for (i = 0; i < cnt; i++) {
+ struct net_device *repr;
struct nfp_port *port;
u32 port_id;
- reprs->reprs[i] = nfp_repr_alloc(app);
- if (!reprs->reprs[i]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
- port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ port = nfp_port_alloc(app, port_type, repr);
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -193,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
}
- eth_hw_addr_random(reprs->reprs[i]);
+ eth_hw_addr_random(repr);
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
i, queue);
- err = nfp_repr_init(app, reprs->reprs[i],
+ err = nfp_repr_init(app, repr,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -206,14 +276,28 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
- reprs->reprs[i]->name);
+ repr->name);
}
nfp_app_reprs_set(app, repr_type, reprs);
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, repr_type, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
return err;
}
@@ -233,10 +317,11 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+ atomic_t *replies = &priv->reify_replies;
struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs;
+ int err, reify_cnt;
unsigned int i;
- int err;
ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
if (!ctrl_skb)
@@ -250,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
for (i = 0; i < eth_tbl->count; i++) {
unsigned int phys_port = eth_tbl->ports[i].index;
+ struct net_device *repr;
struct nfp_port *port;
u32 cmsg_port_id;
- reprs->reprs[phys_port] = nfp_repr_alloc(app);
- if (!reprs->reprs[phys_port]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
- port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
- reprs->reprs[phys_port]);
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
goto err_reprs_clean;
@@ -271,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
goto err_reprs_clean;
}
- SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
- err = nfp_repr_init(app, reprs->reprs[phys_port],
+ err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -288,23 +374,37 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
- phys_port, reprs->reprs[phys_port]->name);
+ phys_port, repr->name);
}
nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- /* The MAC_REPR control message should be sent after the MAC
+ /* The REIFY/MAC_REPR control messages should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
* because the firmware may respond with control messages for the
* MAC representors, f.e. to provide the driver with information
* about their state, and without registration the driver will drop
* any such messages.
*/
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
nfp_ctrl_tx(app->ctrl, ctrl_skb);
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
kfree_skb(ctrl_skb);
return err;
@@ -381,7 +481,7 @@ static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
struct nfp_flower_priv *app_priv;
- u64 version;
+ u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -419,11 +519,20 @@ static int nfp_flower_init(struct nfp_app *app)
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+ init_waitqueue_head(&app_priv->reify_wait_queue);
err = nfp_flower_metadata_init(app);
if (err)
goto err_free_app_priv;
+ /* Extract the extra features supported by the firmware. */
+ features = nfp_rtsym_read_le(app->pf->rtbl,
+ "_abi_flower_extra_features", &err);
+ if (err)
+ app_priv->flower_ext_feats = 0;
+ else
+ app_priv->flower_ext_feats = features;
+
return 0;
err_free_app_priv:
@@ -456,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app)
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
+
+ .ctrl_cap_mask = ~0U,
.ctrl_has_meta = true,
.extra_cap = nfp_flower_extra_cap,
@@ -468,6 +579,7 @@ const struct nfp_app_type app_flower = {
.vnic_clean = nfp_flower_vnic_clean,
.repr_init = nfp_flower_repr_netdev_init,
+ .repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
.repr_open = nfp_flower_repr_netdev_open,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e6b26c5ae6e0..332ff0fdc038 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,6 +34,8 @@
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
+#include "cmsg.h"
+
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
#include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
#define NFP_FL_MASK_ID_LOCATION 1
#define NFP_FL_VXLAN_PORT 4789
+#define NFP_FL_GENEVE_PORT 6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE BIT(0)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
* @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
+ * @flower_ext_feats: Bitmap of extra features the HW supports
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@@ -95,12 +102,16 @@ struct nfp_fl_stats_id {
* @nfp_mac_off_count: Number of MACs in address list
* @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
+ * @reify_replies: atomically stores the number of replies received
+ * from firmware for repr reify
+ * @reify_wait_queue: wait queue for repr reify response counting
*/
struct nfp_flower_priv {
struct nfp_app *app;
struct nfp_net *nn;
u32 mask_id_seed;
u64 flower_version;
+ u64 flower_ext_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -119,6 +130,8 @@ struct nfp_flower_priv {
int nfp_mac_off_count;
struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
+ atomic_t reify_replies;
+ wait_queue_head_t reify_wait_queue;
};
struct nfp_fl_key_ls {
@@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type);
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 60614d4f0e22..37c2ecae2a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -38,7 +38,7 @@
#include "main.h"
static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
@@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
- memset(frame, 0, sizeof(struct nfp_flower_meta_two));
+ memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
@@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
}
static void
-nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
- frame->nfp_flow_key_layer = key_type;
- frame->mask_id = 0;
- frame->reserved = 0;
+ frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
static int
@@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
}
static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
- /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
- memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -248,80 +245,68 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- vxlan_ips =
+ tun_ips =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
target);
- frame->ip_src = vxlan_ips->src;
- frame->ip_dst = vxlan_ips->dst;
- *tun_dst = vxlan_ips->dst;
+ frame->ip_src = tun_ips->src;
+ frame->ip_dst = tun_ips->dst;
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type)
{
- enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- __be32 tun_dst, tun_dst_mask = 0;
struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
- tun_type = NFP_FL_TUNNEL_VXLAN;
-
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
- if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
- flow, key_ls->key_layer, false);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
- flow, key_ls->key_layer, true);
- ext += sizeof(struct nfp_flower_meta_two);
- msk += sizeof(struct nfp_flower_meta_two);
-
- /* Populate Exact Port data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
- if (err)
- return err;
-
- /* Populate Mask Port Data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
- if (err)
- return err;
-
- ext += sizeof(struct nfp_flower_in_port);
- msk += sizeof(struct nfp_flower_in_port);
- } else {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
- key_ls->key_layer);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
- key_ls->key_layer);
- ext += sizeof(struct nfp_flower_meta_one);
- msk += sizeof(struct nfp_flower_meta_one);
- }
- if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
- /* Additional Metadata Fields.
- * Currently unsupported.
- */
- return -EOPNOTSUPP;
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
+ flow, key_ls->key_layer, false);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
+ flow, key_ls->key_layer, true);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ msk += sizeof(struct nfp_flower_meta_tci);
+
+ /* Populate Extended Metadata if Required. */
+ if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+ key_ls->key_layer_two);
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+ key_ls->key_layer_two);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ msk += sizeof(struct nfp_flower_ext_meta);
}
+ /* Populate Exact Port data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+ nfp_repr_get_port_id(netdev),
+ false, tun_type);
+ if (err)
+ return err;
+
+ /* Populate Mask Port Data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ nfp_repr_get_port_id(netdev),
+ true, tun_type);
+ if (err)
+ return err;
+
+ ext += sizeof(struct nfp_flower_in_port);
+ msk += sizeof(struct nfp_flower_in_port);
+
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
/* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
@@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ __be32 tun_dst;
+
/* Populate Exact VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
- flow, false, &tun_dst);
+ nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
/* Populate Mask VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
- flow, true, &tun_dst_mask);
- ext += sizeof(struct nfp_flower_vxlan);
- msk += sizeof(struct nfp_flower_vxlan);
+ nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 553f94f55dce..08c4c6dc5f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress)
+ bool egress,
+ enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
+ struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
return -EOPNOTSUPP;
key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
+ key_layer = NFP_FLOWER_LAYER_PORT;
+ key_size = sizeof(struct nfp_flower_meta_tci) +
+ sizeof(struct nfp_flower_in_port);
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+ dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ key_layer |= NFP_FLOWER_LAYER_MAC;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->key);
- if (mask_enc_ports->dst != cpu_to_be16(~0) ||
- enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_vxlan);
+ switch (enc_ports->dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ return -EOPNOTSUPP;
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
} else if (egress) {
/* Reject non tunnel matches offloaded to egress repr. */
return -EOPNOTSUPP;
@@ -325,6 +348,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
@@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+ err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ &tun_type);
if (err)
goto err_free_key_ls;
@@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+ err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+ tun_type);
if (err)
goto err_destroy_flow;
@@ -457,8 +483,7 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flower, bool egress)
{
- if (!eth_proto_is_802_3(flower->common.protocol) ||
- flower->common.chain_index)
+ if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
@@ -478,7 +503,7 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -495,7 +520,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index b03f22f29612..ec524d97869d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -50,9 +50,9 @@
* @seq: sequence number of the message
* @count: number of tunnels report in message
* @flags: options part of the request
- * @ipv4: dest IPv4 address of active route
- * @egress_port: port the encapsulated packet egressed
- * @extra: reserved for future use
+ * @tun_info.ipv4: dest IPv4 address of active route
+ * @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info.extra: reserved for future use
* @tun_info: tunnels that have sent traffic in reported period
*/
struct nfp_tun_active_tuns {
@@ -132,8 +132,8 @@ struct nfp_ipv4_addr_entry {
* struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
* @reserved: reserved for future use
* @count: number of MAC addresses in the message
- * @index: index of MAC address in the lookup table
- * @addr: interface MAC address
+ * @addresses.index: index of MAC address in the lookup table
+ * @addresses.addr: interface MAC address
* @addresses: series of MACs to offload
*/
struct nfp_tun_mac_addr {
OpenPOWER on IntegriCloud