summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 11:56:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 11:56:19 -0800
commit5bbcc0f595fadb4cac0eddc4401035ec0bd95b09 (patch)
tree3b65e490cc36a6c6fecac1fa24d9e0ac9ced4455 /drivers/net/ethernet/mellanox
parent892204e06cb9e89fbc4b299a678f9ca358e97cac (diff)
parent50895b9de1d3e0258e015e8e55128d835d9a9f19 (diff)
downloadtalos-obmc-linux-5bbcc0f595fadb4cac0eddc4401035ec0bd95b09.tar.gz
talos-obmc-linux-5bbcc0f595fadb4cac0eddc4401035ec0bd95b09.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Maintain the TCP retransmit queue using an rbtree, with 1GB windows at 100Gb this really has become necessary. From Eric Dumazet. 2) Multi-program support for cgroup+bpf, from Alexei Starovoitov. 3) Perform broadcast flooding in hardware in mv88e6xxx, from Andrew Lunn. 4) Add meter action support to openvswitch, from Andy Zhou. 5) Add a data meta pointer for BPF accessible packets, from Daniel Borkmann. 6) Namespace-ify almost all TCP sysctl knobs, from Eric Dumazet. 7) Turn on Broadcom Tags in b53 driver, from Florian Fainelli. 8) More work to move the RTNL mutex down, from Florian Westphal. 9) Add 'bpftool' utility, to help with bpf program introspection. From Jakub Kicinski. 10) Add new 'cpumap' type for XDP_REDIRECT action, from Jesper Dangaard Brouer. 11) Support 'blocks' of transformations in the packet scheduler which can span multiple network devices, from Jiri Pirko. 12) TC flower offload support in cxgb4, from Kumar Sanghvi. 13) Priority based stream scheduler for SCTP, from Marcelo Ricardo Leitner. 14) Thunderbolt networking driver, from Amir Levy and Mika Westerberg. 15) Add RED qdisc offloadability, and use it in mlxsw driver. From Nogah Frankel. 16) eBPF based device controller for cgroup v2, from Roman Gushchin. 17) Add some fundamental tracepoints for TCP, from Song Liu. 18) Remove garbage collection from ipv6 route layer, this is a significant accomplishment. From Wei Wang. 19) Add multicast route offload support to mlxsw, from Yotam Gigi" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2177 commits) tcp: highest_sack fix geneve: fix fill_info when link down bpf: fix lockdep splat net: cdc_ncm: GetNtbFormat endian fix openvswitch: meter: fix NULL pointer dereference in ovs_meter_cmd_reply_start netem: remove unnecessary 64 bit modulus netem: use 64 bit divide by rate tcp: Namespace-ify sysctl_tcp_default_congestion_control net: Protect iterations over net::fib_notifier_ops in fib_seq_sum() ipv6: set all.accept_dad to 0 by default uapi: fix linux/tls.h userspace compilation error usbnet: ipheth: prevent TX queue timeouts when device not ready vhost_net: conditionally enable tx polling uapi: fix linux/rxrpc.h userspace compilation errors net: stmmac: fix LPI transitioning for dwmac4 atm: horizon: Fix irq release error net-sysfs: trigger netlink notification on ifalias change via sysfs openvswitch: Using kfree_rcu() to simplify the code openvswitch: Make local function ovs_nsh_key_attr_size() static openvswitch: Fix return value check in ovs_meter_cmd_features() ...
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c619
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c899
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c998
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c525
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h817
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h89
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c291
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c1012
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c839
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c276
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1546
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c514
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
78 files changed, 10193 insertions, 2503 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 22b1cc012bc9..36054e6fb9d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -38,3 +38,11 @@ config MLX4_DEBUG
mlx4_core driver. The output can be turned on via the
debug_level module parameter (which can also be set after
the driver is loaded through sysfs).
+
+config MLX4_CORE_GEN2
+ bool "Support for old gen2 Mellanox PCI IDs" if (MLX4_CORE)
+ depends on MLX4_CORE
+ default y
+ ---help---
+ Say Y here if you want to use old gen2 Mellanox devices in the
+ driver.
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 53daa6ca5d83..de0f9e5e42ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
phys_addr_t addr;
INIT_LIST_HEAD(&priv->catas_err.list);
- init_timer(&priv->catas_err.timer);
+ setup_timer(&priv->catas_err.timer, poll_catas, (unsigned long)dev);
priv->catas_err.map = NULL;
if (!mlx4_is_slave(dev)) {
@@ -293,8 +293,6 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
}
}
- priv->catas_err.timer.data = (unsigned long) dev;
- priv->catas_err.timer.function = poll_catas;
priv->catas_err.timer.expires =
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
add_timer(&priv->catas_err.timer);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 72eb50cd5ecd..d8e9a323122e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (atomic_dec_and_test(&mcq->refcount))
+ if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
if (kick)
@@ -344,7 +344,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->cons_index = 0;
cq->arm_sn = 1;
cq->uar = uar;
- atomic_set(&cq->refcount, 1);
+ refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
@@ -386,7 +386,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3d4e4a5d00d1..bf1f04164885 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1742,13 +1742,18 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
+static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
+{
+ return min_t(int, num_online_cpus(), MAX_RX_RINGS);
+}
+
static void mlx4_en_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- channel->max_rx = MAX_RX_RINGS;
- channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
+ channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
+ channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
channel->rx_count = priv->rx_ring_num;
channel->tx_count = priv->tx_ring_num[TX] /
@@ -1777,7 +1782,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
if (channel->tx_count * priv->prof->num_up + xdp_count >
- MAX_TX_RINGS) {
+ priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 686e18de9a97..2c2965497ed3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -153,7 +153,7 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
int i;
params->udp_rss = udp_rss;
- params->num_tx_rings_p_up = mlx4_low_memory_profile() ?
+ params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ?
MLX4_EN_MIN_TX_RING_P_UP :
min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
@@ -170,8 +170,8 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
- params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up;
- params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
+ params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
+ params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
params->prof[i].num_up;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 9c218f1cfc6c..99051a294fa6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -135,7 +135,7 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
{
struct tc_mqprio_qopt *mqprio = type_data;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
@@ -1752,6 +1752,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_arm_cq(priv, cq);
} else {
+ mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
mlx4_en_init_recycle_ring(priv, i);
/* XDP TX CQ should never be armed */
}
@@ -2915,7 +2916,7 @@ static u32 mlx4_xdp_query(struct net_device *dev)
return prog_id;
}
-static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -2957,7 +2958,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
- .ndo_xdp = mlx4_xdp,
+ .ndo_bpf = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2994,7 +2995,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
- .ndo_xdp = mlx4_xdp,
+ .ndo_bpf = mlx4_xdp,
};
struct mlx4_en_bond {
@@ -3305,7 +3306,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
- priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
+ priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 5a47f9669621..6883ac75d37f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -53,7 +53,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
if (is_tx) {
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
- context->params2 |= MLX4_QP_BIT_FPP;
+ context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP);
} else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b97a55c827eb..92aec17f4b4d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -254,8 +254,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
- min_t(int, num_of_eqs,
- netif_get_num_default_rss_queues());
+ min_t(int, num_of_eqs, num_online_cpus());
mdev->profile.prof[i].rx_ring_num =
rounddown_pow_of_two(num_rx_rings);
}
@@ -762,6 +761,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp.data_hard_start = va - frags[0].page_offset;
xdp.data = va;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + length;
orig_data = xdp.data;
@@ -778,7 +778,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
+ if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
&doorbell_pending))) {
frags[0].page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3541a7f9d12e..6b6853773848 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,7 +718,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- ring->doorbell_qpn,
+ (__force u32)ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
@@ -1085,13 +1085,35 @@ tx_drop:
#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
/ 16) & 0x3f)
+void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[i];
+ struct mlx4_en_tx_desc *tx_desc = ring->buf +
+ (i << LOG_TXBB_SIZE);
+
+ tx_info->map0_byte_count = PAGE_SIZE;
+ tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
+ tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
+ tx_info->ts_requested = 0;
+ tx_info->nr_maps = 1;
+ tx_info->linear = 1;
+ tx_info->inl = 0;
+
+ tx_desc->data.lkey = ring->mr_key;
+ tx_desc->ctrl.qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
+ }
+}
+
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
- struct net_device *dev, unsigned int length,
+ struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- union mlx4_wqe_qpn_vlan qpn_vlan = {};
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_en_tx_info *tx_info;
struct mlx4_wqe_data_seg *data;
@@ -1123,25 +1145,16 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_info->page = frame->page;
frame->page = NULL;
tx_info->map0_dma = dma;
- tx_info->map0_byte_count = PAGE_SIZE;
- tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
- tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
- tx_info->ts_requested = 0;
- tx_info->nr_maps = 1;
- tx_info->linear = 1;
- tx_info->inl = 0;
dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
length, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma + frame->page_offset);
- data->lkey = ring->mr_key;
dma_wmb();
data->byte_count = cpu_to_be32(length);
/* tx completion can avoid cache line miss for common cases */
- tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
@@ -1152,10 +1165,13 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
ring->prod += MLX4_EN_XDP_TX_NRTXBB;
- qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ ring->xmit_more++;
- mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, 0,
- op_own, false, false);
*doorbell_pending = true;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 16c09949afd5..634f603f941c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -57,12 +57,12 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
- u64 val; \
- switch (sizeof(dest)) { \
+ __be64 val; \
+ switch (sizeof(dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
- case 8: val = get_unaligned((u64 *)__p); \
+ case 8: val = get_unaligned((__be64 *)__p); \
(dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e61c99ef741d..4d84cab77105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4066,6 +4066,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
static const struct pci_device_id mlx4_pci_table[] = {
+#ifdef CONFIG_MLX4_CORE_GEN2
/* MT25408 "Hermon" */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
@@ -4085,6 +4086,7 @@ static const struct pci_device_id mlx4_pci_table[] = {
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
/* MT25400 Family [ConnectX-2] */
MLX_VF(0x1002), /* Virtual Function */
+#endif /* CONFIG_MLX4_CORE_GEN2 */
/* MT27500 Family [ConnectX-3] */
MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
MLX_VF(0x1004), /* Virtual Function */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index fdb3ad0cbe54..1856e279a7e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -399,7 +399,7 @@ struct mlx4_en_profile {
u32 active_ports;
u32 small_pkt_int;
u8 no_reset;
- u8 num_tx_rings_p_up;
+ u8 max_num_tx_rings_p_up;
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
};
@@ -693,7 +693,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
- struct net_device *dev, unsigned int length,
+ struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
@@ -705,6 +705,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
+void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 728a2fb1f5c0..769598f7b6c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -55,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
qp = __mlx4_qp_lookup(dev, qpn);
if (qp)
- atomic_inc(&qp->refcount);
+ refcount_inc(&qp->refcount);
spin_unlock(&qp_table->lock);
@@ -66,7 +66,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
qp->event(qp, event_type);
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
}
@@ -420,7 +420,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
if (err)
goto err_icm;
- atomic_set(&qp->refcount, 1);
+ refcount_set(&qp->refcount, 1);
init_completion(&qp->free);
return 0;
@@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
@@ -925,7 +925,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
if (states[i + 1] != MLX4_QP_STATE_RTR)
- context->params2 &= ~MLX4_QP_BIT_FPP;
+ context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index fabb53379727..04304dd894c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3185,7 +3185,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
if (slave != mlx4_master_func_num(dev)) {
- qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+ qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
/* setting QP rate-limit is disallowed for VFs */
if (qp_ctx->rate_limit_params)
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index bedf52126824..cbe4d9746ddf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -49,7 +49,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
rcu_read_unlock();
if (srq)
- atomic_inc(&srq->refcount);
+ refcount_inc(&srq->refcount);
else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
@@ -57,7 +57,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
srq->event(srq, event_type);
- if (atomic_dec_and_test(&srq->refcount))
+ if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
}
@@ -203,7 +203,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
if (err)
goto err_radix;
- atomic_set(&srq->refcount, 1);
+ refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
return 0;
@@ -232,7 +232,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
- if (atomic_dec_and_test(&srq->refcount))
+ if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index fdaef00465d7..25deaa5a534c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -6,6 +6,7 @@ config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on MAY_USE_DEVLINK
depends on PCI
+ imply PTP_1588_CLOCK
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -29,7 +30,6 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
- imply PTP_1588_CLOCK
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 714dd0dc5eef..19b21b40ab07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -5,7 +5,7 @@ subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
+ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
diag/fs_tracepoint.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+ en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
@@ -23,7 +23,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 336d4738b807..1016e05c7ec7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (atomic_dec_and_test(&mcq->refcount))
+ if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
@@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
@@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
cq->comp(cq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
@@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq = radix_tree_lookup(&table->tree, cqn);
if (cq)
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
@@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq->event(cq, event_type);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
@@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
- atomic_set(&cq->refcount, 1);
+ refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
@@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 13b5ef9d8703..c0872b3284cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -57,6 +57,7 @@
#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
+#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
@@ -105,6 +106,7 @@
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
@@ -126,6 +128,16 @@
#define MLX5E_NUM_MAIN_GROUPS 9
+#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
+
+#define mlx5e_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msglevel) \
+ netdev_warn(priv->netdev, format, \
+ ##__VA_ARGS__); \
+} while (0)
+
+
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
@@ -187,12 +199,14 @@ extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
+ "tx_cqe_moder",
"rx_cqe_compress",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
- MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
+ MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
+ MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
@@ -212,6 +226,7 @@ enum mlx5e_priv_flag {
struct mlx5e_cq_moder {
u16 usec;
u16 pkts;
+ u8 cq_period_mode;
};
struct mlx5e_params {
@@ -223,7 +238,6 @@ struct mlx5e_params {
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
- u8 rx_cq_period_mode;
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
@@ -260,34 +274,18 @@ enum {
struct mlx5e_dcbx {
enum mlx5_dcbx_oper_mode mode;
struct mlx5e_cee_config cee_cfg; /* pending configuration */
+ u8 dscp_app_cnt;
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
u8 cap;
};
-#endif
-#define MAX_PIN_NUM 8
-struct mlx5e_pps {
- u8 pin_caps[MAX_PIN_NUM];
- struct work_struct out_work;
- u64 start[MAX_PIN_NUM];
- u8 enabled;
-};
-
-struct mlx5e_tstamp {
- rwlock_t lock;
- struct cyclecounter cycles;
- struct timecounter clock;
- struct hwtstamp_config hwtstamp_config;
- u32 nominal_c_mult;
- unsigned long overflow_period;
- struct delayed_work overflow_work;
- struct mlx5_core_dev *mdev;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
- struct mlx5e_pps pps_info;
+struct mlx5e_dcbx_dp {
+ u8 dscp2prio[MLX5E_MAX_DSCP];
+ u8 trust_state;
};
+#endif
enum {
MLX5E_RQ_STATE_ENABLED,
@@ -375,9 +373,10 @@ struct mlx5e_txqsq {
u8 min_inline_mode;
u16 edge;
struct device *pdev;
- struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -543,10 +542,11 @@ struct mlx5e_rq {
struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
- struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
@@ -588,7 +588,7 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct mlx5e_tstamp *tstamp;
+ struct hwtstamp_config *tstamp;
int ix;
};
@@ -655,12 +655,14 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
- bool filter_disabled;
+ bool cvlan_filter_disabled;
};
struct mlx5e_l2_table {
@@ -762,8 +764,12 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ struct mlx5e_dcbx_dp dcbx_dp;
+#endif
/* priv data path fields - end */
+ u32 msglevel;
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
@@ -789,7 +795,7 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
- struct mlx5e_tstamp tstamp;
+ struct hwtstamp_config tstamp;
u16 q_counter;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
@@ -820,6 +826,8 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
+ void (*netdev_registered_init)(struct mlx5e_priv *priv);
+ void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
@@ -873,12 +881,6 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
- struct skb_shared_hwtstamps *hwts);
-void mlx5e_timestamp_init(struct mlx5e_priv *priv);
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
-void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
- struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
@@ -887,8 +889,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_timestamp_set(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -928,6 +931,8 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
+ u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
@@ -993,6 +998,8 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
#ifndef CONFIG_RFS_ACCEL
@@ -1045,6 +1052,9 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1081,6 +1091,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
@@ -1091,5 +1104,5 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
-
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 4614ddfa91eb..6a7c8b04447e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -256,7 +256,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
mdata = mlx5e_ipsec_add_metadata(skb);
- if (unlikely(IS_ERR(mdata))) {
+ if (IS_ERR(mdata)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 12d3ced61114..610d485c4b03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -92,7 +92,7 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
static int arfs_disable(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
@@ -126,7 +126,7 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv)
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
int err = 0;
int tt;
int i;
@@ -175,7 +175,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
@@ -466,7 +466,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
@@ -557,7 +557,7 @@ out:
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule, u16 rxq)
{
- struct mlx5_flow_destination dst;
+ struct mlx5_flow_destination dst = {};
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
deleted file mode 100644
index 84dd63e74041..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/clocksource.h>
-#include "en.h"
-
-enum {
- MLX5E_CYCLES_SHIFT = 23
-};
-
-enum {
- MLX5E_PIN_MODE_IN = 0x0,
- MLX5E_PIN_MODE_OUT = 0x1,
-};
-
-enum {
- MLX5E_OUT_PATTERN_PULSE = 0x0,
- MLX5E_OUT_PATTERN_PERIODIC = 0x1,
-};
-
-enum {
- MLX5E_EVENT_MODE_DISABLE = 0x0,
- MLX5E_EVENT_MODE_REPETETIVE = 0x1,
- MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
-};
-
-enum {
- MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
- MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
- MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
- MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
- MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
- MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
-};
-
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
- struct skb_shared_hwtstamps *hwts)
-{
- u64 nsec;
-
- read_lock(&tstamp->lock);
- nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
- read_unlock(&tstamp->lock);
-
- hwts->hwtstamp = ns_to_ktime(nsec);
-}
-
-static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
-{
- struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
- cycles);
-
- return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
-}
-
-static void mlx5e_pps_out(struct work_struct *work)
-{
- struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
- out_work);
- struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
- pps_info);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- unsigned long flags;
- int i;
-
- for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
- u64 tstart;
-
- write_lock_irqsave(&tstamp->lock, flags);
- tstart = tstamp->pps_info.start[i];
- tstamp->pps_info.start[i] = 0;
- write_unlock_irqrestore(&tstamp->lock, flags);
- if (!tstart)
- continue;
-
- MLX5_SET(mtpps_reg, in, pin, i);
- MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
- MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
- mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
- }
-}
-
-static void mlx5e_timestamp_overflow(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
- overflow_work);
- struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_read(&tstamp->clock);
- write_unlock_irqrestore(&tstamp->lock, flags);
- queue_delayed_work(priv->wq, &tstamp->overflow_work,
- msecs_to_jiffies(tstamp->overflow_period * 1000));
-}
-
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
-{
- struct hwtstamp_config config;
- int err;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* TX HW timestamp */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- mutex_lock(&priv->state_lock);
- /* RX HW timestamp */
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- /* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- /* Disable CQE compression */
- netdev_warn(priv->netdev, "Disabling cqe compression");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
- if (err) {
- netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
- mutex_unlock(&priv->state_lock);
- return err;
- }
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
- }
-
- memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
- mutex_unlock(&priv->state_lock);
-
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
-}
-
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
-{
- struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return -EOPNOTSUPP;
-
- return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
-}
-
-static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- u64 ns = timespec64_to_ns(ts);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- u64 ns;
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- ns = timecounter_read(&tstamp->clock);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- *ts = ns_to_timespec64(ns);
-
- return 0;
-}
-
-static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_adjtime(&tstamp->clock, delta);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
-{
- u64 adj;
- u32 diff;
- unsigned long flags;
- int neg_adj = 0;
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
-
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
-
- adj = tstamp->nominal_c_mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_read(&tstamp->clock);
- tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
- tstamp->nominal_c_mult + diff;
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u32 field_select = 0;
- u8 pin_mode = 0;
- u8 pattern = 0;
- int pin = -1;
- int err = 0;
-
- if (!MLX5_PPS_CAP(priv->mdev))
- return -EOPNOTSUPP;
-
- if (rq->extts.index >= tstamp->ptp_info.n_pins)
- return -EINVAL;
-
- if (on) {
- pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
- pin_mode = MLX5E_PIN_MODE_IN;
- pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
- field_select = MLX5E_MTPPS_FS_PIN_MODE |
- MLX5E_MTPPS_FS_PATTERN |
- MLX5E_MTPPS_FS_ENABLE;
- } else {
- pin = rq->extts.index;
- field_select = MLX5E_MTPPS_FS_ENABLE;
- }
-
- MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
- MLX5_SET(mtpps_reg, in, pattern, pattern);
- MLX5_SET(mtpps_reg, in, enable, on);
- MLX5_SET(mtpps_reg, in, field_select, field_select);
-
- err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- if (err)
- return err;
-
- return mlx5_set_mtppse(priv->mdev, pin, 0,
- MLX5E_EVENT_MODE_REPETETIVE & on);
-}
-
-static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp = 0;
- u64 cycles_now, cycles_delta;
- struct timespec64 ts;
- unsigned long flags;
- u32 field_select = 0;
- u8 pin_mode = 0;
- u8 pattern = 0;
- int pin = -1;
- int err = 0;
- s64 ns;
-
- if (!MLX5_PPS_CAP(priv->mdev))
- return -EOPNOTSUPP;
-
- if (rq->perout.index >= tstamp->ptp_info.n_pins)
- return -EINVAL;
-
- if (on) {
- pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
-
- pin_mode = MLX5E_PIN_MODE_OUT;
- pattern = MLX5E_OUT_PATTERN_PERIODIC;
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
-
- if ((ns >> 1) != 500000000LL)
- return -EINVAL;
-
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- write_unlock_irqrestore(&tstamp->lock, flags);
- time_stamp = cycles_now + cycles_delta;
- field_select = MLX5E_MTPPS_FS_PIN_MODE |
- MLX5E_MTPPS_FS_PATTERN |
- MLX5E_MTPPS_FS_ENABLE |
- MLX5E_MTPPS_FS_TIME_STAMP;
- } else {
- pin = rq->perout.index;
- field_select = MLX5E_MTPPS_FS_ENABLE;
- }
-
- MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
- MLX5_SET(mtpps_reg, in, pattern, pattern);
- MLX5_SET(mtpps_reg, in, enable, on);
- MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
- MLX5_SET(mtpps_reg, in, field_select, field_select);
-
- err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- if (err)
- return err;
-
- return mlx5_set_mtppse(priv->mdev, pin, 0,
- MLX5E_EVENT_MODE_REPETETIVE & on);
-}
-
-static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
-
- tstamp->pps_info.enabled = !!on;
- return 0;
-}
-
-static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- switch (rq->type) {
- case PTP_CLK_REQ_EXTTS:
- return mlx5e_extts_configure(ptp, rq, on);
- case PTP_CLK_REQ_PEROUT:
- return mlx5e_perout_configure(ptp, rq, on);
- case PTP_CLK_REQ_PPS:
- return mlx5e_pps_configure(ptp, rq, on);
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
-}
-
-static const struct ptp_clock_info mlx5e_ptp_clock_info = {
- .owner = THIS_MODULE,
- .max_adj = 100000000,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .adjfreq = mlx5e_ptp_adjfreq,
- .adjtime = mlx5e_ptp_adjtime,
- .gettime64 = mlx5e_ptp_gettime,
- .settime64 = mlx5e_ptp_settime,
- .enable = NULL,
- .verify = NULL,
-};
-
-static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
-{
- tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
- tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
-}
-
-static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
-{
- int i;
-
- tstamp->ptp_info.pin_config =
- kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
- tstamp->ptp_info.n_pins, GFP_KERNEL);
- if (!tstamp->ptp_info.pin_config)
- return -ENOMEM;
- tstamp->ptp_info.enable = mlx5e_ptp_enable;
- tstamp->ptp_info.verify = mlx5e_ptp_verify;
- tstamp->ptp_info.pps = 1;
-
- for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
- snprintf(tstamp->ptp_info.pin_config[i].name,
- sizeof(tstamp->ptp_info.pin_config[i].name),
- "mlx5_pps%d", i);
- tstamp->ptp_info.pin_config[i].index = i;
- tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
- tstamp->ptp_info.pin_config[i].chan = i;
- }
-
- return 0;
-}
-
-static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
- struct mlx5e_tstamp *tstamp)
-{
- u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-
- mlx5_query_mtpps(priv->mdev, out, sizeof(out));
-
- tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
- cap_number_of_pps_pins);
- tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
- cap_max_num_of_pps_in_pins);
- tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
- cap_max_num_of_pps_out_pins);
-
- tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
- tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
- tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
- tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
- tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
- tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
- tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
- tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
-}
-
-void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
- struct ptp_clock_event *event)
-{
- struct net_device *netdev = priv->netdev;
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
- struct timespec64 ts;
- u64 nsec_now, nsec_delta;
- u64 cycles_now, cycles_delta;
- int pin = event->index;
- s64 ns;
- unsigned long flags;
-
- switch (tstamp->ptp_info.pin_config[pin].func) {
- case PTP_PF_EXTTS:
- if (tstamp->pps_info.enabled) {
- event->type = PTP_CLOCK_PPSUSR;
- event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
- } else {
- event->type = PTP_CLOCK_EXTTS;
- }
- ptp_clock_event(tstamp->ptp, event);
- break;
- case PTP_PF_PEROUT:
- mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- ts.tv_sec += 1;
- ts.tv_nsec = 0;
- ns = timespec64_to_ns(&ts);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
- queue_work(priv->wq, &tstamp->pps_info.out_work);
- write_unlock_irqrestore(&tstamp->lock, flags);
- break;
- default:
- netdev_err(netdev, "%s: Unhandled event\n", __func__);
- }
-}
-
-void mlx5e_timestamp_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
- u64 ns;
- u64 frac = 0;
- u32 dev_freq;
-
- mlx5e_timestamp_init_config(tstamp);
- dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
- if (!dev_freq) {
- mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
- rwlock_init(&tstamp->lock);
- tstamp->cycles.read = mlx5e_read_internal_timer;
- tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
- tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
- tstamp->cycles.shift);
- tstamp->nominal_c_mult = tstamp->cycles.mult;
- tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
- tstamp->mdev = priv->mdev;
-
- timecounter_init(&tstamp->clock, &tstamp->cycles,
- ktime_to_ns(ktime_get_real()));
-
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
- frac, &frac);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- tstamp->overflow_period = ns;
-
- INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
- INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
- if (tstamp->overflow_period)
- queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
- else
- mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
-
- /* Configure the PHC */
- tstamp->ptp_info = mlx5e_ptp_clock_info;
- snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
-
- /* Initialize 1PPS data structures */
- if (MLX5_PPS_CAP(priv->mdev))
- mlx5e_get_pps_caps(priv, tstamp);
- if (tstamp->ptp_info.n_pins)
- mlx5e_init_pin_config(tstamp);
-
- tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
- &priv->mdev->pdev->dev);
- if (IS_ERR(tstamp->ptp)) {
- mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
- PTR_ERR(tstamp->ptp));
- tstamp->ptp = NULL;
- }
-}
-
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return;
-
- if (priv->tstamp.ptp) {
- ptp_clock_unregister(priv->tstamp.ptp);
- priv->tstamp.ptp = NULL;
- }
-
- cancel_work_sync(&tstamp->pps_info.out_work);
- cancel_delayed_work_sync(&tstamp->overflow_work);
- kfree(tstamp->ptp_info.pin_config);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index ece3fb147e3e..784e282803db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -134,6 +134,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
+ memset(res, 0, sizeof(*res));
}
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
@@ -170,3 +171,15 @@ out:
return err;
}
+
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
+{
+ u8 min_inline_mode;
+
+ mlx5_query_min_inline(mdev, &min_inline_mode);
+ if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ min_inline_mode = MLX5_INLINE_MODE_L2;
+
+ return min_inline_mode;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 51c4cc00a186..c6d90b6dd80e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -46,6 +46,13 @@ enum {
MLX5E_LOWEST_PRIO_GROUP = 0,
};
+#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
+ MLX5_CAP_QCAM_REG(mdev, qpts) && \
+ MLX5_CAP_QCAM_REG(mdev, qpdpm))
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
+static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
+
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -234,7 +241,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
int max_tc = mlx5_max_tc(mdev);
- int err;
+ int err, i;
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
@@ -253,6 +260,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
+ __func__, i, ets->prio_tc[i]);
+ mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
+ __func__, i, tc_tx_bw[i], tc_group[i]);
+ }
+
return err;
}
@@ -338,6 +353,11 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
mlx5_toggle_port_link(mdev);
+ if (!ret) {
+ mlx5e_dbg(HW, priv,
+ "%s: PFC per priority bit mask: 0x%x\n",
+ __func__, pfc->pfc_en);
+ }
return ret;
}
@@ -381,6 +401,113 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 0;
}
+static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct dcb_app temp;
+ bool is_new;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return -EINVAL;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EINVAL;
+
+ if (app->protocol >= MLX5E_MAX_DSCP)
+ return -EINVAL;
+
+ /* Save the old entry info */
+ temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ temp.protocol = app->protocol;
+ temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
+
+ /* Check if need to switch to dscp trust state */
+ if (!priv->dcbx.dscp_app_cnt) {
+ err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
+ if (err)
+ return err;
+ }
+
+ /* Skip the fw command if new and old mapping are the same */
+ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
+ err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
+ if (err)
+ goto fw_err;
+ }
+
+ /* Delete the old entry if exists */
+ is_new = false;
+ err = dcb_ieee_delapp(dev, &temp);
+ if (err)
+ is_new = true;
+
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ if (is_new)
+ priv->dcbx.dscp_app_cnt++;
+
+ return err;
+
+fw_err:
+ mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+ return err;
+}
+
+static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return -EINVAL;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EINVAL;
+
+ if (app->protocol >= MLX5E_MAX_DSCP)
+ return -EINVAL;
+
+ /* Skip if no dscp app entry */
+ if (!priv->dcbx.dscp_app_cnt)
+ return -ENOENT;
+
+ /* Check if the entry matches fw setting */
+ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
+ return -ENOENT;
+
+ /* Delete the app entry */
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ /* Reset the priority mapping back to zero */
+ err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
+ if (err)
+ goto fw_err;
+
+ priv->dcbx.dscp_app_cnt--;
+
+ /* Check if need to switch to pcp trust state */
+ if (!priv->dcbx.dscp_app_cnt)
+ err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+
+ return err;
+
+fw_err:
+ mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+ return err;
+}
+
static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
@@ -446,6 +573,11 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
}
}
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
+ __func__, i, max_bw_value[i]);
+ }
+
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
@@ -471,6 +603,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
+ mlx5e_dbg(HW, priv,
+ "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
+ __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
+ ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
@@ -740,6 +876,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
+ .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
@@ -801,10 +939,135 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
mlx5e_dcbnl_ieee_setets_core(priv, &ets);
}
+enum {
+ INIT,
+ DELETE,
+};
+
+static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
+{
+ struct dcb_app temp;
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return;
+
+ /* No SEL_DSCP entry in non DSCP state */
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
+ return;
+
+ temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ for (i = 0; i < MLX5E_MAX_DSCP; i++) {
+ temp.protocol = i;
+ temp.priority = priv->dcbx_dp.dscp2prio[i];
+ if (action == INIT)
+ dcb_ieee_setapp(priv->netdev, &temp);
+ else
+ dcb_ieee_delapp(priv->netdev, &temp);
+ }
+
+ priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
+}
+
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
+{
+ mlx5e_dcbnl_dscp_app(priv, INIT);
+}
+
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
+{
+ mlx5e_dcbnl_dscp_app(priv, DELETE);
+}
+
+static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
+ params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
+ params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
+}
+
+static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channels new_channels = {};
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ new_channels.params = priv->channels.params;
+ mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+
+ /* Skip if tx_min_inline is the same */
+ if (new_channels.params.tx_min_inline_mode ==
+ priv->channels.params.tx_min_inline_mode)
+ goto out;
+
+ if (mlx5e_open_channels(priv, &new_channels))
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+
+out:
+ mutex_unlock(&priv->state_lock);
+}
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
+{
+ int err;
+
+ err = mlx5_set_trust_state(priv->mdev, trust_state);
+ if (err)
+ return err;
+ priv->dcbx_dp.trust_state = trust_state;
+ mlx5e_trust_update_sq_inline_mode(priv);
+
+ return err;
+}
+
+static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
+{
+ int err;
+
+ err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
+ if (err)
+ return err;
+
+ priv->dcbx_dp.dscp2prio[dscp] = prio;
+ return err;
+}
+
+static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (!MLX5_DSCP_SUPPORTED(mdev))
+ return 0;
+
+ err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ if (err)
+ return err;
+
+ mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
+
+ err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
+ if (err)
+ return err;
+
+ return 0;
+}
+
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ mlx5e_trust_initialize(priv);
+
if (!MLX5_CAP_GEN(priv->mdev, qos))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d12e9fc0d76b..23425f028405 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,7 +31,6 @@
*/
#include "en.h"
-#include "en_accel/ipsec.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -136,59 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
-static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u8 pfc_en_tx;
- u8 pfc_en_rx;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return 0;
-
- err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
-
- return err ? 0 : pfc_en_tx | pfc_en_rx;
-}
-
-static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 rx_pause;
- u32 tx_pause;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return false;
-
- err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
-
- return err ? false : rx_pause | tx_pause;
-}
-
-#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
-#define MLX5E_NUM_SQ_STATS(priv) \
- (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
-#define MLX5E_NUM_PFC_COUNTERS(priv) \
- ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
- NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
+ int i, num_stats = 0;
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_SW_COUNTERS +
- MLX5E_NUM_Q_CNTRS(priv) +
- NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
- NUM_PCIE_COUNTERS(priv) +
- MLX5E_NUM_RQ_STATS(priv) +
- MLX5E_NUM_SQ_STATS(priv) +
- MLX5E_NUM_PFC_COUNTERS(priv) +
- ARRAY_SIZE(mlx5e_pme_status_desc) +
- ARRAY_SIZE(mlx5e_pme_error_desc) +
- mlx5e_ipsec_get_count(priv);
-
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
+ return num_stats;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
case ETH_SS_TEST:
@@ -208,104 +163,10 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
-
- /* SW counters */
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
-
- /* Q counters */
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
-
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].format);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- char pfc_string[ETH_GSTRING_LEN];
-
- snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
- }
- }
-
- /* port module event counters */
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ int i, idx = 0;
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
@@ -340,10 +201,7 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- struct mlx5e_channels *channels;
- struct mlx5_priv *mlx5_priv;
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
+ int i, idx = 0;
if (!data)
return;
@@ -351,102 +209,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv, true);
- channels = &priv->channels;
mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_stats_desc, i);
-
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
-
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- /* port module event counters */
- mlx5_priv = &priv->mdev->priv;
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_stats(priv, data + idx);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
- rq_stats_desc, j);
-
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
- sq_stats_desc, j);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
@@ -1417,14 +1183,15 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int ret;
ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
- info->phc_index = priv->tstamp.ptp ?
- ptp_clock_index(priv->tstamp.ptp) : -1;
+ info->phc_index = mdev->clock.ptp ?
+ ptp_clock_index(mdev->clock.ptp) : -1;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return 0;
@@ -1573,6 +1340,16 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static u32 mlx5e_get_msglevel(struct net_device *dev)
+{
+ return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
+}
+
+static void mlx5e_set_msglevel(struct net_device *dev, u32 val)
+{
+ ((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val;
+}
+
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
@@ -1677,29 +1454,36 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
-static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
+ bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- bool rx_mode_changed;
- u8 rx_cq_period_mode;
+ bool mode_changed;
+ u8 cq_period_mode, current_cq_period_mode;
int err = 0;
- rx_cq_period_mode = enable ?
+ cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
+ current_cq_period_mode = is_rx_cq ?
+ priv->channels.params.rx_cq_moderation.cq_period_mode :
+ priv->channels.params.tx_cq_moderation.cq_period_mode;
+ mode_changed = cq_period_mode != current_cq_period_mode;
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
- if (!rx_mode_changed)
+ if (!mode_changed)
return 0;
new_channels.params = priv->channels.params;
- mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
+ if (is_rx_cq)
+ mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode);
+ else
+ mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1714,6 +1498,16 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return 0;
}
+static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ return set_pflag_cqe_based_moder(netdev, enable, false);
+}
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ return set_pflag_cqe_based_moder(netdev, enable, true);
+}
+
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
@@ -1754,7 +1548,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
return -EINVAL;
}
@@ -1802,6 +1596,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ set_pflag_tx_cqe_based_moder);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
@@ -1905,4 +1705,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
+ .get_msglevel = mlx5e_get_msglevel,
+ .set_msglevel = mlx5e_set_msglevel,
+
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 4837045ffba3..def513484845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -118,7 +118,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -135,7 +135,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -154,7 +154,8 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
- MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
@@ -162,7 +163,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
@@ -174,6 +175,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ /* cvlan_tag enabled in match criteria and
+ * disabled in match value means both S & C tags
+ * don't exist (untagged of both)
+ */
rule_p = &priv->fs.vlan.untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
@@ -190,8 +195,18 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
- default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- rule_p = &priv->fs.vlan.active_vlans_rule[vid];
+ case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
+ rule_p = &priv->fs.vlan.active_svlans_rule[vid];
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
+ rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -223,7 +238,7 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
- if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+ if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
mlx5e_vport_context_update_vlans(priv);
err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
@@ -255,11 +270,17 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.any_svlan_rule = NULL;
}
break;
- case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
+ if (priv->fs.vlan.active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
+ priv->fs.vlan.active_svlans_rule[vid] = NULL;
+ }
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
mlx5e_vport_context_update_vlans(priv);
- if (priv->fs.vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
- priv->fs.vlan.active_vlans_rule[vid] = NULL;
+ if (priv->fs.vlan.active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
+ priv->fs.vlan.active_cvlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -283,46 +304,83 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
return;
- priv->fs.vlan.filter_disabled = false;
+ priv->fs.vlan.cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.filter_disabled)
+ if (priv->fs.vlan.cvlan_filter_disabled)
return;
- priv->fs.vlan.filter_disabled = true;
+ priv->fs.vlan.cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
- u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ set_bit(vid, priv->fs.vlan.active_cvlans);
- set_bit(vid, priv->fs.vlan.active_vlans);
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ if (err)
+ clear_bit(vid, priv->fs.vlan.active_cvlans);
+
+ return err;
+}
+
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+{
+ struct net_device *netdev = priv->netdev;
+ int err;
+
+ set_bit(vid, priv->fs.vlan.active_svlans);
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ if (err) {
+ clear_bit(vid, priv->fs.vlan.active_svlans);
+ return err;
+ }
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ /* Need to fix some features.. */
+ netdev_update_features(netdev);
+ return err;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
- u16 vid)
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- clear_bit(vid, priv->fs.vlan.active_vlans);
+ if (be16_to_cpu(proto) == ETH_P_8021Q)
+ return mlx5e_vlan_rx_add_cvid(priv, vid);
+ else if (be16_to_cpu(proto) == ETH_P_8021AD)
+ return mlx5e_vlan_rx_add_svid(priv, vid);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ return -EOPNOTSUPP;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (be16_to_cpu(proto) == ETH_P_8021Q) {
+ clear_bit(vid, priv->fs.vlan.active_cvlans);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
+ clear_bit(vid, priv->fs.vlan.active_svlans);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(dev);
+ }
return 0;
}
@@ -333,11 +391,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- if (priv->fs.vlan.filter_disabled &&
+ for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+
+ if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_add_any_vid_rules(priv);
}
@@ -348,11 +409,14 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- if (priv->fs.vlan.filter_disabled &&
+ for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+
+ if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_del_any_vid_rules(priv);
}
@@ -548,8 +612,11 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
+ if (!priv->channels.params.vlan_strip_disable)
+ netdev_warn_once(ndev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
@@ -564,7 +631,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
@@ -741,7 +808,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
@@ -912,7 +979,7 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
@@ -1008,7 +1075,7 @@ err:
return err;
}
-static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
struct mlx5_flow_table_attr ft_attr = {};
@@ -1044,7 +1111,7 @@ err:
return err;
}
-static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
@@ -1109,7 +1176,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
@@ -1268,13 +1335,15 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 3
+#define MLX5E_NUM_VLAN_GROUPS 4
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
-#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
-#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
+#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
+#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
+#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
- MLX5E_VLAN_GROUP2_SIZE)
+ MLX5E_VLAN_GROUP2_SIZE +\
+ MLX5E_VLAN_GROUP3_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -1297,7 +1366,8 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1308,7 +1378,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1317,6 +1387,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP3_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cc11bbbd0309..d2b057a3e512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -196,6 +196,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
@@ -224,6 +225,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@@ -373,8 +375,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
- struct ptp_clock_event ptp_event;
- struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
@@ -384,14 +384,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
- case MLX5_DEV_EVENT_PPS:
- eqe = (struct mlx5_eqe *)param;
- ptp_event.index = eqe->data.pps.pin;
- ptp_event.timestamp =
- timecounter_cyc2time(&priv->tstamp.clock,
- be64_to_cpu(eqe->data.pps.time_stamp));
- mlx5e_pps_event_handler(vpriv, &ptp_event);
- break;
default:
break;
}
@@ -585,6 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
@@ -690,7 +683,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_period_mode;
+ rq->am.mode = params->rx_cq_moderation.cq_period_mode;
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -1123,6 +1116,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->tstamp = c->tstamp;
+ sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->txq_ix = txq_ix;
@@ -1982,7 +1976,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->rx_cq_period_mode;
+ param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1994,8 +1988,7 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
@@ -2678,6 +2671,12 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
+void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+{
+ priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
+ priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -2693,7 +2692,7 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_init(priv);
+ mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -2731,7 +2730,6 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -3086,13 +3084,10 @@ out:
}
#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
+static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
@@ -3106,17 +3101,54 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
return -EOPNOTSUPP;
}
}
+
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_can_offload(priv->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
+ priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
#endif
-static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
- case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(dev, type_data);
+ case TC_SETUP_BLOCK:
+ return mlx5e_setup_tc_block(dev, type_data);
#endif
- case TC_SETUP_MQPRIO:
+ case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
@@ -3230,14 +3262,14 @@ out:
return err;
}
-static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
- mlx5e_enable_vlan_filter(priv);
+ mlx5e_enable_cvlan_filter(priv);
else
- mlx5e_disable_vlan_filter(priv);
+ mlx5e_disable_cvlan_filter(priv);
return 0;
}
@@ -3348,7 +3380,7 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_lro);
err |= mlx5e_handle_feature(netdev, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
- set_feature_vlan_filter);
+ set_feature_cvlan_filter);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
@@ -3365,6 +3397,25 @@ static int mlx5e_set_features(struct net_device *netdev,
return err ? -EINVAL : 0;
}
+static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mutex_lock(&priv->state_lock);
+ if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
+ /* HW strips the outer C-tag header, this is a problem
+ * for S-tag traffic.
+ */
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ if (!priv->channels.params.vlan_strip_disable)
+ netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
+ }
+ mutex_unlock(&priv->state_lock);
+
+ return features;
+}
+
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3403,6 +3454,80 @@ out:
return err;
}
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&priv->state_lock);
+ /* RX HW timestamp */
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* Reset CQE compression to Admin default */
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling cqe compression");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err) {
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ mutex_unlock(&priv->state_lock);
+ return -ERANGE;
+ }
+
+ memcpy(&priv->tstamp, &config, sizeof(config));
+ mutex_unlock(&priv->state_lock);
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
+ struct hwtstamp_config *cfg = &priv->tstamp;
+
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
+}
+
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -3726,7 +3851,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev)
return prog_id;
}
-static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -3768,6 +3893,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
+ .ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
@@ -3778,7 +3904,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
- .ndo_xdp = mlx5e_xdp,
+ .ndo_bpf = mlx5e_xdp,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -3882,14 +4008,32 @@ static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw <= 16000) && (pci_bw < link_speed));
}
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ params->tx_cq_moderation.cq_period_mode = cq_period_mode;
+
+ params->tx_cq_moderation.pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ params->tx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ params->tx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
- params->rx_cq_period_mode = cq_period_mode;
+ params->rx_cq_moderation.cq_period_mode = cq_period_mode;
params->rx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
params->rx_cq_moderation.usec =
@@ -3897,10 +4041,11 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
if (params->rx_am_enabled)
params->rx_cq_moderation =
- mlx5e_am_get_def_profile(params->rx_cq_period_mode);
+ mlx5e_am_get_def_profile(cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ params->rx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -3960,16 +4105,11 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
-
- params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
- if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
@@ -3989,6 +4129,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->msglevel = MLX5E_MSG_LEVEL;
priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
@@ -4055,6 +4196,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
@@ -4112,6 +4254,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -4269,7 +4412,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (netdev->reg_state != NETREG_REGISTERED)
return;
-
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_init_app(priv);
+#endif
/* Device already registered: sync netdev system state */
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
@@ -4290,6 +4435,11 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->netdev->reg_state == NETREG_REGISTERED)
+ mlx5e_dcbnl_delete_app(priv);
+#endif
+
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
@@ -4510,6 +4660,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach;
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_init_app(priv);
+#endif
return priv;
err_detach:
@@ -4526,6 +4679,9 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
void *ppriv = priv->ppriv;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_delete_app(priv);
+#endif
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 45e03c427faf..2c43606c26b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
+#include <net/act_api.h>
#include <net/netevent.h>
#include <net/arp.h>
@@ -658,23 +659,12 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
}
static int
-mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
+mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
- if (cls_flower->egress_dev) {
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- dev = mlx5_eswitch_get_uplink_netdev(esw);
- return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
- cls_flower);
- }
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -687,12 +677,48 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
}
}
+static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_can_offload(priv->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(dev, type_data);
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -986,6 +1012,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
+ struct mlx5e_priv *upriv;
int err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
@@ -1017,15 +1044,25 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
+ upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
+ if (err)
+ goto err_neigh_cleanup;
+
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_neigh_cleanup;
+ goto err_egdev_cleanup;
}
return 0;
+err_egdev_cleanup:
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
+
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1045,9 +1082,12 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
void *ppriv = priv->ppriv;
+ struct mlx5e_priv *upriv;
unregister_netdev(rep->netdev);
-
+ upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 91b1b0938931..5b499c7a698f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,10 +42,11 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "lib/clock.h"
-static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
- return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
@@ -560,7 +561,6 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
- skb->mac_len = ETH_HLEN;
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@@ -607,10 +607,11 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
-static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
{
__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+ ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
}
@@ -620,6 +621,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
struct sk_buff *skb,
bool lro)
{
+ int network_depth = 0;
+
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
@@ -629,9 +632,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_first_ethertype_ip(skb)) {
+ if (is_last_ethertype_ip(skb, &network_depth)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+ * the checksum manually.
+ */
+ skb->csum = csum_partial(skb->data + ETH_HLEN,
+ network_depth - ETH_HLEN,
+ skb->csum);
rq->stats.csum_complete++;
return;
}
@@ -659,9 +670,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
+ skb->mac_len = ETH_HLEN;
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
@@ -674,17 +685,20 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
rq->stats.lro_bytes += cqe_bcnt;
}
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+ if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+ mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
mlx5e_skb_set_hash(cqe, skb);
- if (cqe_has_vlan(cqe))
+ if (cqe_has_vlan(cqe)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info));
+ rq->stats.removed_vlan_packets++;
+ }
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
@@ -795,6 +809,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
return false;
xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
@@ -1160,12 +1175,25 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- struct net_device *netdev = rq->netdev;
- struct mlx5e_tstamp *tstamp = rq->tstamp;
+ struct net_device *netdev;
char *pseudo_header;
+ u32 qpn;
u8 *dgid;
u8 g;
+ qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
+ netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
+
+ /* No mapping present, cannot process SKB. This might happen if a child
+ * interface is going down while having unprocessed CQEs on parent RQ
+ */
+ if (unlikely(!netdev)) {
+ /* TODO: add drop counters support */
+ skb->dev = NULL;
+ pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
+ return;
+ }
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1186,8 +1214,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+ if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+ mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -1227,6 +1256,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_free_wqe;
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!skb->dev)) {
+ dev_kfree_skb_any(skb);
+ goto wq_free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index acf32fe952cd..e401d9d245f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -63,7 +63,11 @@ profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
{
- return profile[cq_period_mode][ix];
+ struct mlx5e_cq_moder cq_moder;
+
+ cq_moder = profile[cq_period_mode][ix];
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
}
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
@@ -75,7 +79,7 @@ struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
- return profile[rx_cq_period_mode][default_profile_ix];
+ return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
}
/* Adaptive moderation logic */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
new file mode 100644
index 000000000000..b74ddc7984bc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -0,0 +1,899 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "en_accel/ipsec.h"
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+
+static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_SW_COUNTERS;
+}
+
+static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
+ return idx;
+}
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+
+static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+{
+ return priv->q_counter ? NUM_Q_COUNTERS : 0;
+}
+
+static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
+ return idx;
+}
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
+};
+
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+
+static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_VPORT_COUNTERS;
+}
+
+static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+
+static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_802_3_COUNTERS;
+}
+
+static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+
+static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2863_COUNTERS;
+}
+
+static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+
+static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2819_COUNTERS;
+}
+
+static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+ { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
+#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+
+static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
+ NUM_PPORT_PHY_COUNTERS : 0;
+}
+
+static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_ETH_EXT_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_eth_ext_stats_desc[] = {
+ { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
+};
+
+#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
+
+static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+{
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ return NUM_PPORT_ETH_EXT_COUNTERS;
+
+ return 0;
+}
+
+static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_eth_ext_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i);
+ return idx;
+}
+
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
+#define PCIE_PERF_OFF64(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pcie_perf_stats_desc64[] = {
+ { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
+};
+
+static const struct counter_desc pcie_perf_stall_stats_desc[] = {
+ { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
+ { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
+ { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
+ { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
+};
+
+#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
+#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
+#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
+
+static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+{
+ int num_stats = 0;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ num_stats += NUM_PCIE_PERF_COUNTERS;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ num_stats += NUM_PCIE_PERF_COUNTERS64;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
+
+ return num_stats;
+}
+
+static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc64[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stall_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+
+static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
+static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (mlx5e_query_global_pause_combined(priv) +
+ hweight8(mlx5e_query_pfc_combined(priv))) *
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, "global");
+ }
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ return idx;
+}
+
+static const struct counter_desc mlx5e_pme_status_desc[] = {
+ { "module_unplug", 8 },
+};
+
+static const struct counter_desc mlx5e_pme_error_desc[] = {
+ { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
+ { "module_high_temp", 48 }, /* high temperature */
+ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+};
+
+#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
+#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
+
+static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
+}
+
+static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+
+ return idx;
+}
+
+static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ mlx5e_pme_status_desc, i);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ mlx5e_pme_error_desc, i);
+
+ return idx;
+}
+
+static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_ipsec_get_count(priv);
+}
+
+static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_strings(priv,
+ data + idx * ETH_GSTRING_LEN);
+}
+
+static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_stats(priv, data + idx);
+}
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+};
+
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
+}
+
+static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channel_tc2txq[i][tc]);
+
+ return idx;
+}
+
+static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5e_channels *channels = &priv->channels;
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
+ rq_stats_desc, j);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
+ sq_stats_desc, j);
+
+ return idx;
+}
+
+const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
+ {
+ .get_num_stats = mlx5e_grp_sw_get_num_stats,
+ .fill_strings = mlx5e_grp_sw_fill_strings,
+ .fill_stats = mlx5e_grp_sw_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_q_get_num_stats,
+ .fill_strings = mlx5e_grp_q_fill_strings,
+ .fill_stats = mlx5e_grp_q_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_vport_get_num_stats,
+ .fill_strings = mlx5e_grp_vport_fill_strings,
+ .fill_stats = mlx5e_grp_vport_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_802_3_get_num_stats,
+ .fill_strings = mlx5e_grp_802_3_fill_strings,
+ .fill_stats = mlx5e_grp_802_3_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2863_get_num_stats,
+ .fill_strings = mlx5e_grp_2863_fill_strings,
+ .fill_stats = mlx5e_grp_2863_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2819_get_num_stats,
+ .fill_strings = mlx5e_grp_2819_fill_strings,
+ .fill_stats = mlx5e_grp_2819_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_phy_get_num_stats,
+ .fill_strings = mlx5e_grp_phy_fill_strings,
+ .fill_stats = mlx5e_grp_phy_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
+ .fill_strings = mlx5e_grp_eth_ext_fill_strings,
+ .fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pcie_get_num_stats,
+ .fill_strings = mlx5e_grp_pcie_fill_strings,
+ .fill_stats = mlx5e_grp_pcie_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pme_get_num_stats,
+ .fill_strings = mlx5e_grp_pme_fill_strings,
+ .fill_stats = mlx5e_grp_pme_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
+ .fill_strings = mlx5e_grp_ipsec_fill_strings,
+ .fill_stats = mlx5e_grp_ipsec_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_channels_get_num_stats,
+ .fill_strings = mlx5e_grp_channels_fill_strings,
+ .fill_stats = mlx5e_grp_channels_fill_stats,
+ }
+};
+
+const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f8637213afc0..d679e21f686e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -59,8 +59,10 @@ struct mlx5e_sw_stats {
u64 tx_tso_bytes;
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
+ u64 tx_added_vlan_packets;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
@@ -91,54 +93,10 @@ struct mlx5e_sw_stats {
u64 link_down_events_phy;
};
-static const struct counter_desc sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
-};
-
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
};
-static const struct counter_desc q_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
-};
-
-#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
vstats->query_vport_out, c)
@@ -146,83 +104,22 @@ struct mlx5e_vport_stats {
__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
};
-static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_unicast_packets",
- VPORT_COUNTER_OFF(received_eth_unicast.packets) },
- { "rx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(received_eth_unicast.octets) },
- { "tx_vport_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
- { "tx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
- { "rx_vport_multicast_packets",
- VPORT_COUNTER_OFF(received_eth_multicast.packets) },
- { "rx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(received_eth_multicast.octets) },
- { "tx_vport_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
- { "tx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
- { "rx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
- { "rx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
- { "tx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
- { "tx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
- { "rx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(received_ib_unicast.packets) },
- { "rx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(received_ib_unicast.octets) },
- { "tx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
- { "tx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
- { "rx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(received_ib_multicast.packets) },
- { "rx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(received_ib_multicast.octets) },
- { "tx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
- { "tx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
-};
-
-#define PPORT_802_3_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
#define PPORT_802_3_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
-#define PPORT_2863_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
#define PPORT_2863_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
-#define PPORT_2819_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
-#define PPORT_PHY_STATISTICAL_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
-#define PPORT_PER_PRIO_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_per_prio_grp_data_layout.c##_high)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
#define NUM_PPORT_PRIO 8
-#define PPORT_ETH_EXT_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
#define PPORT_ETH_EXT_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -237,82 +134,10 @@ struct mlx5e_pport_stats {
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
-static const struct counter_desc pport_802_3_stats_desc[] = {
- { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
- { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
- { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
-};
-
-static const struct counter_desc pport_2863_stats_desc[] = {
- { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
- { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
- { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
-};
-
-static const struct counter_desc pport_2819_stats_desc[] = {
- { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
- { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
- { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
-};
-
-static const struct counter_desc pport_phy_statistical_stats_desc[] = {
- { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
- { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
-};
-
-static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
-};
-
-static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- /* %s is "global" or "prio{i}" */
- { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
-};
-
-static const struct counter_desc pport_eth_ext_stats_desc[] = {
- { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
-};
-
-#define PCIE_PERF_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_OFF64(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
#define PCIE_PERF_GET64(pcie_stats, c) \
MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
@@ -321,22 +146,6 @@ struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
-static const struct counter_desc pcie_perf_stats_desc[] = {
- { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
- { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_perf_stats_desc64[] = {
- { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
-};
-
-static const struct counter_desc pcie_perf_stall_stats_desc[] = {
- { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
- { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
- { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
- { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
-};
-
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -346,6 +155,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_tx;
u64 xdp_tx_full;
@@ -362,31 +172,6 @@ struct mlx5e_rq_stats {
u64 cache_waive;
};
-static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
-};
-
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
@@ -398,6 +183,7 @@ struct mlx5e_sq_stats {
u64 tso_inner_bytes;
u64 csum_partial;
u64 csum_partial_inner;
+ u64 added_vlan_packets;
u64 nop;
/* less likely accessed in data path */
u64 csum_none;
@@ -406,61 +192,6 @@ struct mlx5e_sq_stats {
u64 dropped;
};
-static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
-};
-
-#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
-#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
- (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
-#define NUM_PCIE_PERF_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
-#define NUM_PCIE_PERF_COUNTERS64(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc64) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
-#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
-#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
-#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \
- (ARRAY_SIZE(pport_eth_ext_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
-#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
- NUM_PPORT_2863_COUNTERS + \
- NUM_PPORT_2819_COUNTERS + \
- NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
- NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
- NUM_PPORT_PRIO + \
- NUM_PPORT_ETH_EXT_COUNTERS(priv))
-#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \
- NUM_PCIE_PERF_COUNTERS64(priv) +\
- NUM_PCIE_PERF_STALL_COUNTERS(priv))
-#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
-#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
-
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -470,14 +201,14 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_unplug", 8 },
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
};
-static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_high_temp", 48 }, /* high temperature */
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
-};
+extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
+extern const int mlx5e_num_stats_grps;
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ba1f72060aa..55979ec2e88a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -90,8 +90,8 @@ enum {
MLX5_HEADER_TYPE_NVGRE = 0x1,
};
-#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
struct mod_hdr_key {
int num_actions;
@@ -263,10 +263,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+ int tc_grp_size, tc_tbl_size;
+ u32 max_flow_counter;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
+
+ tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
+ BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_PRIO,
- MLX5E_TC_TABLE_NUM_ENTRIES,
+ tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
0, 0);
if (IS_ERR(priv->fs.tc.t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1d6925d4369a..569b42a01026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,9 +32,11 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -85,6 +87,20 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -96,8 +112,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (!netdev_get_num_tc(dev))
return channel_ix;
- if (skb_vlan_tag_present(skb))
- up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+ up = mlx5e_get_dscp_up(priv, skb);
+ else
+#endif
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
@@ -340,6 +361,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
ihs += VLAN_HLEN;
+ sq->stats.added_vlan_packets++;
} else {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
@@ -348,7 +370,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
+ eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
+ sq->stats.added_vlan_packets++;
}
headlen = skb_len - skb->data_len;
@@ -452,8 +477,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
SKBTX_HW_TSTAMP)) {
struct skb_shared_hwtstamps hwts = {};
- mlx5e_fill_hwstamp(sq->tstamp,
- get_cqe_ts(cqe), &hwts);
+ hwts.hwtstamp =
+ mlx5_timecounter_cyc2time(sq->clock,
+ get_cqe_ts(cqe));
skb_tstamp_tx(skb, &hwts);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index fc606bfd1d6e..60771865c99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -491,8 +491,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_PPS_EVENT:
- if (dev->event)
- dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
+ mlx5_pps_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_FPGA_ERROR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c77f4c0c7769..bbb140f517c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -157,7 +157,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
void *mc_misc = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d9fd8570b07c..1143d80119bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -306,7 +306,7 @@ static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
@@ -395,7 +395,7 @@ out_err:
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -670,7 +670,7 @@ struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 36ecc2b2e187..881e2e55840c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -40,7 +40,8 @@
#include "eswitch.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft, u32 underlay_qpn)
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
@@ -52,7 +53,15 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
- MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+
+ if (disconnect) {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ MLX5_SET(set_flow_table_root_in, in, table_id, 0);
+ } else {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ }
+
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index c6d7bdf255b6..71e2d0f37ad9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -71,8 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
unsigned int index);
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- u32 underlay_qpn);
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect);
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5a7bea688ec8..c70fd663a633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -145,10 +145,10 @@ static struct init_tree_node {
}
};
-enum fs_i_mutex_lock_class {
- FS_MUTEX_GRANDPARENT,
- FS_MUTEX_PARENT,
- FS_MUTEX_CHILD
+enum fs_i_lock_class {
+ FS_LOCK_GRANDPARENT,
+ FS_LOCK_PARENT,
+ FS_LOCK_CHILD
};
static const struct rhashtable_params rhash_fte = {
@@ -168,10 +168,16 @@ static const struct rhashtable_params rhash_fg = {
};
-static void del_rule(struct fs_node *node);
-static void del_flow_table(struct fs_node *node);
-static void del_flow_group(struct fs_node *node);
-static void del_fte(struct fs_node *node);
+static void del_hw_flow_table(struct fs_node *node);
+static void del_hw_flow_group(struct fs_node *node);
+static void del_hw_fte(struct fs_node *node);
+static void del_sw_flow_table(struct fs_node *node);
+static void del_sw_flow_group(struct fs_node *node);
+static void del_sw_fte(struct fs_node *node);
+/* Delete rule (destination) is special case that
+ * requires to lock the FTE for all the deletion process.
+ */
+static void del_sw_hw_rule(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
static struct mlx5_flow_rule *
@@ -179,20 +185,22 @@ find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
static void tree_init_node(struct fs_node *node,
- unsigned int refcount,
- void (*remove_func)(struct fs_node *))
+ void (*del_hw_func)(struct fs_node *),
+ void (*del_sw_func)(struct fs_node *))
{
- atomic_set(&node->refcount, refcount);
+ refcount_set(&node->refcount, 1);
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
- mutex_init(&node->lock);
- node->remove_func = remove_func;
+ init_rwsem(&node->lock);
+ node->del_hw_func = del_hw_func;
+ node->del_sw_func = del_sw_func;
+ node->active = false;
}
static void tree_add_node(struct fs_node *node, struct fs_node *parent)
{
if (parent)
- atomic_inc(&parent->refcount);
+ refcount_inc(&parent->refcount);
node->parent = parent;
/* Parent is the root */
@@ -202,58 +210,78 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent)
node->root = parent->root;
}
-static void tree_get_node(struct fs_node *node)
+static int tree_get_node(struct fs_node *node)
{
- atomic_inc(&node->refcount);
+ return refcount_inc_not_zero(&node->refcount);
}
-static void nested_lock_ref_node(struct fs_node *node,
- enum fs_i_mutex_lock_class class)
+static void nested_down_read_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock_nested(&node->lock, class);
- atomic_inc(&node->refcount);
+ down_read_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
}
}
-static void lock_ref_node(struct fs_node *node)
+static void nested_down_write_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock(&node->lock);
- atomic_inc(&node->refcount);
+ down_write_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
}
}
-static void unlock_ref_node(struct fs_node *node)
+static void down_write_ref_node(struct fs_node *node)
{
if (node) {
- atomic_dec(&node->refcount);
- mutex_unlock(&node->lock);
+ down_write(&node->lock);
+ refcount_inc(&node->refcount);
}
}
+static void up_read_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_read(&node->lock);
+}
+
+static void up_write_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_write(&node->lock);
+}
+
static void tree_put_node(struct fs_node *node)
{
struct fs_node *parent_node = node->parent;
- lock_ref_node(parent_node);
- if (atomic_dec_and_test(&node->refcount)) {
- if (parent_node)
+ if (refcount_dec_and_test(&node->refcount)) {
+ if (node->del_hw_func)
+ node->del_hw_func(node);
+ if (parent_node) {
+ /* Only root namespace doesn't have parent and we just
+ * need to free its node.
+ */
+ down_write_ref_node(parent_node);
list_del_init(&node->list);
- if (node->remove_func)
- node->remove_func(node);
- kfree(node);
+ if (node->del_sw_func)
+ node->del_sw_func(node);
+ up_write_ref_node(parent_node);
+ } else {
+ kfree(node);
+ }
node = NULL;
}
- unlock_ref_node(parent_node);
if (!node && parent_node)
tree_put_node(parent_node);
}
static int tree_remove_node(struct fs_node *node)
{
- if (atomic_read(&node->refcount) > 1) {
- atomic_dec(&node->refcount);
+ if (refcount_read(&node->refcount) > 1) {
+ refcount_dec(&node->refcount);
return -EEXIST;
}
tree_put_node(node);
@@ -362,6 +390,15 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
return container_of(ns, struct mlx5_flow_root_namespace, ns);
}
+static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev->priv.steering;
+ return NULL;
+}
+
static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root = find_root(node);
@@ -371,26 +408,36 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
-static void del_flow_table(struct fs_node *node)
+static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- struct fs_prio *prio;
int err;
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
- err = mlx5_cmd_destroy_flow_table(dev, ft);
- if (err)
- mlx5_core_warn(dev, "flow steering can't destroy ft\n");
- ida_destroy(&ft->fte_allocator);
+ if (node->active) {
+ err = mlx5_cmd_destroy_flow_table(dev, ft);
+ if (err)
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
+ }
+}
+
+static void del_sw_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct fs_prio *prio;
+
+ fs_get_obj(ft, node);
+
rhltable_destroy(&ft->fgs_hash);
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
+ kfree(ft);
}
-static void del_rule(struct fs_node *node)
+static void del_sw_hw_rule(struct fs_node *node)
{
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
@@ -406,7 +453,6 @@ static void del_rule(struct fs_node *node)
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_rule(rule);
- list_del(&rule->node.list);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
@@ -434,117 +480,203 @@ out:
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
}
+ kfree(rule);
}
-static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg)
+static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_table *ft;
- int ret;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ struct fs_fte *fte;
+ int err;
- ret = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- WARN_ON(ret);
- fte->status = 0;
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
- ida_simple_remove(&ft->fte_allocator, fte->index);
+
+ trace_mlx5_fs_del_fte(fte);
+ dev = get_dev(&ft->node);
+ if (node->active) {
+ err = mlx5_cmd_delete_fte(dev, ft,
+ fte->index);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ }
}
-static void del_fte(struct fs_node *node)
+static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev;
struct fs_fte *fte;
int err;
fs_get_obj(fte, node);
fs_get_obj(fg, fte->node.parent);
- fs_get_obj(ft, fg->node.parent);
- trace_mlx5_fs_del_fte(fte);
- dev = get_dev(&ft->node);
- err = mlx5_cmd_delete_fte(dev, ft,
- fte->index);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
-
- destroy_fte(fte, fg);
+ err = rhashtable_remove_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ WARN_ON(err);
+ ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
+ kmem_cache_free(steering->ftes_cache, fte);
}
-static void del_flow_group(struct fs_node *node)
+static void del_hw_flow_group(struct fs_node *node)
{
struct mlx5_flow_group *fg;
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- int err;
fs_get_obj(fg, node);
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
trace_mlx5_fs_del_fg(fg);
- if (ft->autogroup.active)
- ft->autogroup.num_groups--;
+ if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
+}
+
+static void del_sw_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_steering *steering = get_steering(node);
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
rhashtable_destroy(&fg->ftes_hash);
+ ida_destroy(&fg->fte_allocator);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
rhash_fg);
WARN_ON(err);
- if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
+{
+ int index;
+ int ret;
+
+ index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ fte->index = index + fg->start_index;
+ ret = rhashtable_insert_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ if (ret)
+ goto err_ida_remove;
+
+ tree_add_node(&fte->node, &fg->node);
+ list_add_tail(&fte->node.list, &fg->node.children);
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&fg->fte_allocator, index);
+ return ret;
}
-static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
+static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
u32 *match_value,
- unsigned int index)
+ struct mlx5_flow_act *flow_act)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct fs_fte *fte;
- fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+ fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
if (!fte)
return ERR_PTR(-ENOMEM);
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->flow_tag = flow_act->flow_tag;
- fte->index = index;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
fte->modify_id = flow_act->modify_id;
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+
return fte;
}
-static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+static void dealloc_flow_group(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_group *fg)
+{
+ rhashtable_destroy(&fg->ftes_hash);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index)
{
struct mlx5_flow_group *fg;
- void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- create_fg_in, match_criteria);
- u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
- create_fg_in,
- match_criteria_enable);
int ret;
- fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+ fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
if (!fg)
return ERR_PTR(-ENOMEM);
ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
if (ret) {
- kfree(fg);
+ kmem_cache_free(steering->fgs_cache, fg);
return ERR_PTR(ret);
- }
+}
+ ida_init(&fg->fte_allocator);
fg->mask.match_criteria_enable = match_criteria_enable;
memcpy(&fg->mask.match_criteria, match_criteria,
sizeof(fg->mask.match_criteria));
fg->node.type = FS_TYPE_FLOW_GROUP;
- fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
- start_flow_index);
- fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
- end_flow_index) - fg->start_index + 1;
+ fg->start_index = start_index;
+ fg->max_ftes = end_index - start_index + 1;
+
+ return fg;
+}
+
+static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index,
+ struct list_head *prev)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *fg;
+ int ret;
+
+ fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
+ start_index, end_index);
+ if (IS_ERR(fg))
+ return fg;
+
+ /* initialize refcnt, add to parent list */
+ ret = rhltable_insert(&ft->fgs_hash,
+ &fg->hash,
+ rhash_fg);
+ if (ret) {
+ dealloc_flow_group(steering, fg);
+ return ERR_PTR(ret);
+ }
+
+ tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
+ tree_add_node(&fg->node, &ft->node);
+ /* Add node to group list */
+ list_add(&fg->node.list, prev);
+ atomic_inc(&ft->node.version);
+
return fg;
}
@@ -575,7 +707,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
- ida_init(&ft->fte_allocator);
return ft;
}
@@ -693,8 +824,10 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
*prio)
{
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+ struct mlx5_ft_underlay_qp *uqp;
int min_level = INT_MAX;
int err;
+ u32 qpn;
if (root->root_ft)
min_level = root->root_ft->level;
@@ -702,10 +835,24 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (ft->level >= min_level)
return 0;
- err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = mlx5_cmd_update_root_ft(root->dev, ft, qpn,
+ false);
+ if (err)
+ break;
+ }
+ }
+
if (err)
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
else
root->root_ft = ft;
@@ -724,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent);
if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
- lock_ref_node(&fte->node);
+ down_write_ref_node(&fte->node);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
@@ -733,7 +880,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
ft, fg->id,
modify_mask,
fte);
- unlock_ref_node(&fte->node);
+ up_write_ref_node(&fte->node);
return err;
}
@@ -765,7 +912,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
struct mlx5_flow_table *new_next_ft,
struct mlx5_flow_table *old_next_ft)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_rule *iter;
int err = 0;
@@ -870,7 +1017,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
goto unlock_root;
}
- tree_init_node(&ft->node, 1, del_flow_table);
+ tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
@@ -882,17 +1029,17 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = connect_flow_table(root->dev, ft, fs_prio);
if (err)
goto destroy_ft;
- lock_ref_node(&fs_prio->node);
+ ft->node.active = true;
+ down_write_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
- unlock_ref_node(&fs_prio->node);
+ up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
return ft;
destroy_ft:
mlx5_cmd_destroy_flow_table(root->dev, ft);
free_ft:
- ida_destroy(&ft->fte_allocator);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
@@ -960,54 +1107,6 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
-/* Flow table should be locked */
-static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
- u32 *fg_in,
- struct list_head
- *prev_fg,
- bool is_auto_fg)
-{
- struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev = get_dev(&ft->node);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- fg = alloc_flow_group(fg_in);
- if (IS_ERR(fg))
- return fg;
-
- err = rhltable_insert(&ft->fgs_hash, &fg->hash, rhash_fg);
- if (err)
- goto err_free_fg;
-
- err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
- if (err)
- goto err_remove_fg;
-
- if (ft->autogroup.active)
- ft->autogroup.num_groups++;
- /* Add node to tree */
- tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
- tree_add_node(&fg->node, &ft->node);
- /* Add node to group list */
- list_add(&fg->node.list, prev_fg);
-
- trace_mlx5_fs_add_fg(fg);
- return fg;
-
-err_remove_fg:
- WARN_ON(rhltable_remove(&ft->fgs_hash,
- &fg->hash,
- rhash_fg));
-err_free_fg:
- rhashtable_destroy(&fg->ftes_hash);
- kfree(fg);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u32 *fg_in)
{
@@ -1016,7 +1115,13 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
fg_in,
match_criteria_enable);
+ int start_index = MLX5_GET(create_flow_group_in, fg_in,
+ start_flow_index);
+ int end_index = MLX5_GET(create_flow_group_in, fg_in,
+ end_flow_index);
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
+ int err;
if (!check_valid_mask(match_criteria_enable, match_criteria))
return ERR_PTR(-EINVAL);
@@ -1024,9 +1129,21 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
- lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
- unlock_ref_node(&ft->node);
+ down_write_ref_node(&ft->node);
+ fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
+ start_index, end_index,
+ ft->node.children.prev);
+ up_write_ref_node(&ft->node);
+ if (IS_ERR(fg))
+ return fg;
+
+ err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+ if (err) {
+ tree_put_node(&fg->node);
+ return ERR_PTR(err);
+ }
+ trace_mlx5_fs_add_fg(fg);
+ fg->node.active = true;
return fg;
}
@@ -1067,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
int i)
{
for (; --i >= 0;) {
- if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
+ if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
@@ -1098,7 +1215,7 @@ create_flow_handle(struct fs_fte *fte,
if (dest) {
rule = find_flow_rule(fte, dest + i);
if (rule) {
- atomic_inc(&rule->node.refcount);
+ refcount_inc(&rule->node.refcount);
goto rule_found;
}
}
@@ -1111,7 +1228,7 @@ create_flow_handle(struct fs_fte *fte,
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
- tree_init_node(&rule->node, 1, del_rule);
+ tree_init_node(&rule->node, NULL, del_sw_hw_rule);
if (dest &&
dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, &fte->node.children);
@@ -1167,7 +1284,9 @@ add_rule_fte(struct fs_fte *fte,
if (err)
goto free_handle;
+ fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING;
+ atomic_inc(&fte->node.version);
out:
return handle;
@@ -1177,59 +1296,17 @@ free_handle:
return ERR_PTR(err);
}
-static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
- u32 *match_value,
- struct mlx5_flow_act *flow_act)
-{
- struct mlx5_flow_table *ft;
- struct fs_fte *fte;
- int index;
- int ret;
-
- fs_get_obj(ft, fg->node.parent);
- index = ida_simple_get(&ft->fte_allocator, fg->start_index,
- fg->start_index + fg->max_ftes,
- GFP_KERNEL);
- if (index < 0)
- return ERR_PTR(index);
-
- fte = alloc_fte(flow_act, match_value, index);
- if (IS_ERR(fte)) {
- ret = PTR_ERR(fte);
- goto err_alloc;
- }
- ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- if (ret)
- goto err_hash;
-
- return fte;
-
-err_hash:
- kfree(fte);
-err_alloc:
- ida_simple_remove(&ft->fte_allocator, index);
- return ERR_PTR(ret);
-}
-
-static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria)
+static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct list_head *prev = &ft->node.children;
- unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
- void *match_criteria_addr;
+ unsigned int candidate_index = 0;
unsigned int group_size = 0;
- u32 *in;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return ERR_PTR(-ENOMEM);
-
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
@@ -1247,25 +1324,55 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte) {
- fg = ERR_PTR(-ENOSPC);
+ if (candidate_index + group_size > ft->max_fte)
+ return ERR_PTR(-ENOSPC);
+
+ fg = alloc_insert_flow_group(ft,
+ spec->match_criteria_enable,
+ spec->match_criteria,
+ candidate_index,
+ candidate_index + group_size - 1,
+ prev);
+ if (IS_ERR(fg))
goto out;
- }
+
+ ft->autogroup.num_groups++;
+
+out:
+ return fg;
+}
+
+static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *match_criteria_addr;
+ int err;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable);
- MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
- MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
- group_size - 1);
+ fg->mask.match_criteria_enable);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
+ fg->max_ftes - 1);
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
- memcpy(match_criteria_addr, match_criteria,
- MLX5_ST_SZ_BYTES(fte_match_param));
+ memcpy(match_criteria_addr, fg->mask.match_criteria,
+ sizeof(fg->mask.match_criteria));
+
+ err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id);
+ if (!err) {
+ fg->node.active = true;
+ trace_mlx5_fs_add_fg(fg);
+ }
- fg = create_flow_group_common(ft, in, prev, true);
-out:
kvfree(in);
- return fg;
+ return err;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -1340,60 +1447,30 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
struct mlx5_flow_handle *handle;
- struct mlx5_flow_table *ft;
+ int old_action;
int i;
+ int ret;
- if (fte) {
- int old_action;
- int ret;
-
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- ret = check_conflicting_ftes(fte, flow_act);
- if (ret) {
- handle = ERR_PTR(ret);
- goto unlock_fte;
- }
-
- old_action = fte->action;
- fte->action |= flow_act->action;
- handle = add_rule_fte(fte, fg, dest, dest_num,
- old_action != flow_act->action);
- if (IS_ERR(handle)) {
- fte->action = old_action;
- goto unlock_fte;
- } else {
- trace_mlx5_fs_set_fte(fte, false);
- goto add_rules;
- }
- }
- fs_get_obj(ft, fg->node.parent);
+ ret = check_conflicting_ftes(fte, flow_act);
+ if (ret)
+ return ERR_PTR(ret);
- fte = create_fte(fg, match_value, flow_act);
- if (IS_ERR(fte))
- return (void *)fte;
- tree_init_node(&fte->node, 0, del_fte);
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- handle = add_rule_fte(fte, fg, dest, dest_num, false);
+ old_action = fte->action;
+ fte->action |= flow_act->action;
+ handle = add_rule_fte(fte, fg, dest, dest_num,
+ old_action != flow_act->action);
if (IS_ERR(handle)) {
- unlock_ref_node(&fte->node);
- destroy_fte(fte, fg);
- kfree(fte);
+ fte->action = old_action;
return handle;
}
+ trace_mlx5_fs_set_fte(fte, false);
- tree_add_node(&fte->node, &fg->node);
- /* fte list isn't sorted */
- list_add_tail(&fte->node.list, &fg->node.children);
- trace_mlx5_fs_set_fte(fte, true);
-add_rules:
for (i = 0; i < handle->num_rules; i++) {
- if (atomic_read(&handle->rule[i]->node.refcount) == 1) {
+ if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
}
-unlock_fte:
- unlock_ref_node(&fte->node);
return handle;
}
@@ -1441,93 +1518,197 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
}
-static struct mlx5_flow_handle *
-try_add_to_existing_fg(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest,
- int dest_num)
-{
+struct match_list {
+ struct list_head list;
struct mlx5_flow_group *g;
- struct mlx5_flow_handle *rule = ERR_PTR(-ENOENT);
+};
+
+struct match_list_head {
+ struct list_head list;
+ struct match_list first;
+};
+
+static void free_match_list(struct match_list_head *head)
+{
+ if (!list_empty(&head->list)) {
+ struct match_list *iter, *match_tmp;
+
+ list_del(&head->first.list);
+ tree_put_node(&head->first.g->node);
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+}
+
+static int build_match_list(struct match_list_head *match_head,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
+{
struct rhlist_head *tmp, *list;
- struct match_list {
- struct list_head list;
- struct mlx5_flow_group *g;
- } match_list, *iter;
- LIST_HEAD(match_head);
+ struct mlx5_flow_group *g;
+ int err = 0;
rcu_read_lock();
+ INIT_LIST_HEAD(&match_head->list);
/* Collect all fgs which has a matching match_criteria */
list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
+ /* RCU is atomic, we can't execute FW commands here */
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
- if (likely(list_empty(&match_head))) {
- match_list.g = g;
- list_add_tail(&match_list.list, &match_head);
+ if (likely(list_empty(&match_head->list))) {
+ if (!tree_get_node(&g->node))
+ continue;
+ match_head->first.g = g;
+ list_add_tail(&match_head->first.list,
+ &match_head->list);
continue;
}
- curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+ curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
- rcu_read_unlock();
- rule = ERR_PTR(-ENOMEM);
- goto free_list;
+ free_match_list(match_head);
+ err = -ENOMEM;
+ goto out;
+ }
+ if (!tree_get_node(&g->node)) {
+ kfree(curr_match);
+ continue;
}
curr_match->g = g;
- list_add_tail(&curr_match->list, &match_head);
+ list_add_tail(&curr_match->list, &match_head->list);
}
+out:
rcu_read_unlock();
+ return err;
+}
+
+static u64 matched_fgs_get_version(struct list_head *match_head)
+{
+ struct match_list *iter;
+ u64 version = 0;
+
+ list_for_each_entry(iter, match_head, list)
+ version += (u64)atomic_read(&iter->g->node.version);
+ return version;
+}
+static struct mlx5_flow_handle *
+try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ struct list_head *match_head,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int ft_version)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+ struct match_list *iter;
+ bool take_write = false;
+ struct fs_fte *fte;
+ u64 version;
+ int err;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte))
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(iter, match_head, list) {
+ nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
+ ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL);
+ }
+
+search_again_locked:
+ version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
- list_for_each_entry(iter, &match_head, list) {
- struct fs_fte *fte;
+ list_for_each_entry(iter, match_head, list) {
+ struct fs_fte *fte_tmp;
g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
- rhash_fte);
- if (fte) {
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, fte);
- unlock_ref_node(&g->node);
- goto free_list;
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
+ rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+ continue;
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ } else {
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
}
- unlock_ref_node(&g->node);
+
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte_tmp);
+ up_write_ref_node(&fte_tmp->node);
+ tree_put_node(&fte_tmp->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return rule;
}
/* No group with matching fte found. Try to add a new fte to any
* matching fg.
*/
- list_for_each_entry(iter, &match_head, list) {
- g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, NULL);
- if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) {
- unlock_ref_node(&g->node);
- goto free_list;
- }
- unlock_ref_node(&g->node);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ list_for_each_entry(iter, match_head, list)
+ nested_down_write_ref_node(&iter->g->node,
+ FS_LOCK_PARENT);
+ take_write = true;
}
-free_list:
- if (!list_empty(&match_head)) {
- struct match_list *match_tmp;
+ /* Check the ft version, for case that new flow group
+ * was added while the fgs weren't locked
+ */
+ if (atomic_read(&ft->node.version) != ft_version) {
+ rule = ERR_PTR(-EAGAIN);
+ goto out;
+ }
- /* The most common case is having one FG. Since we want to
- * optimize this case, we save the first on the stack.
- * Therefore, no need to free it.
- */
- list_del(&list_first_entry(&match_head, typeof(*iter), list)->list);
- list_for_each_entry_safe(iter, match_tmp, &match_head, list) {
- list_del(&iter->list);
- kfree(iter);
+ /* Check the fgs version, for case the new FTE with the
+ * same values was added while the fgs weren't locked
+ */
+ if (version != matched_fgs_get_version(match_head))
+ goto search_again_locked;
+
+ list_for_each_entry(iter, match_head, list) {
+ g = iter->g;
+
+ if (!g->node.active)
+ continue;
+ err = insert_fte(g, fte);
+ if (err) {
+ if (err == -ENOSPC)
+ continue;
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return ERR_PTR(err);
}
- }
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ return rule;
+ }
+ rule = ERR_PTR(-ENOENT);
+out:
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1539,8 +1720,14 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
+ struct match_list_head match_head;
+ bool take_write = false;
+ struct fs_fte *fte;
+ int version;
+ int err;
int i;
if (!check_valid_spec(spec))
@@ -1550,33 +1737,73 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL);
}
+ nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+search_again_locked:
+ version = atomic_read(&ft->node.version);
+
+ /* Collect all fgs which has a matching match_criteria */
+ err = build_match_list(&match_head, ft, spec);
+ if (err)
+ return ERR_PTR(err);
- nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
- rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num);
- if (!IS_ERR(rule))
- goto unlock;
+ if (!take_write)
+ up_read_ref_node(&ft->node);
- g = create_autogroup(ft, spec->match_criteria_enable,
- spec->match_criteria);
+ rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
+ dest_num, version);
+ free_match_list(&match_head);
+ if (!IS_ERR(rule) ||
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ return rule;
+
+ if (!take_write) {
+ nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+ take_write = true;
+ }
+
+ if (PTR_ERR(rule) == -EAGAIN ||
+ version != atomic_read(&ft->node.version))
+ goto search_again_locked;
+
+ g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = (void *)g;
- goto unlock;
+ up_write_ref_node(&ft->node);
+ return rule;
}
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
- dest_num, NULL);
- if (IS_ERR(rule)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- unlock_ref_node(&ft->node);
- tree_get_node(&g->node);
- tree_remove_node(&g->node);
- return rule;
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ up_write_ref_node(&ft->node);
+
+ err = create_auto_flow_group(ft, g);
+ if (err)
+ goto err_release_fg;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ goto err_release_fg;
}
-unlock:
- unlock_ref_node(&ft->node);
+
+ err = insert_fte(g, fte);
+ if (err) {
+ kmem_cache_free(steering->ftes_cache, fte);
+ goto err_release_fg;
+ }
+
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ up_write_ref_node(&g->node);
+ rule = add_rule_fg(g, spec->match_value, flow_act, dest,
+ dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ tree_put_node(&g->node);
return rule;
+
+err_release_fg:
+ up_write_ref_node(&g->node);
+ tree_put_node(&g->node);
+ return ERR_PTR(err);
}
static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
@@ -1593,7 +1820,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
- struct mlx5_flow_destination gen_dest;
+ struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
@@ -1661,23 +1888,43 @@ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
static int update_root_ft_destroy(struct mlx5_flow_table *ft)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct mlx5_ft_underlay_qp *uqp;
struct mlx5_flow_table *new_root_ft = NULL;
+ int err = 0;
+ u32 qpn;
if (root->root_ft != ft)
return 0;
new_root_ft = find_next_ft(ft);
- if (new_root_ft) {
- int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
- root->underlay_qpn);
- if (err) {
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
- return err;
+ if (!new_root_ft) {
+ root->root_ft = NULL;
+ return 0;
+ }
+
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn,
+ false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
+ qpn, false);
+ if (err)
+ break;
}
}
- root->root_ft = new_root_ft;
+
+ if (err)
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
+ else
+ root->root_ft = new_root_ft;
+
return 0;
}
@@ -1817,7 +2064,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
- tree_init_node(&fs_prio->node, 1, NULL);
+ tree_init_node(&fs_prio->node, NULL, NULL);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@@ -1843,7 +2090,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -1965,10 +2212,12 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
root_ns->dev = steering->dev;
root_ns->table_type = table_type;
+ INIT_LIST_HEAD(&root_ns->underlay_qpns);
+
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, NULL);
return root_ns;
@@ -2066,8 +2315,10 @@ static void clean_tree(struct fs_node *node)
struct fs_node *iter;
struct fs_node *temp;
+ tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter);
+ tree_put_node(node);
tree_remove_node(node);
}
}
@@ -2091,6 +2342,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
mlx5_cleanup_fc_stats(dev);
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
kfree(steering);
}
@@ -2196,6 +2449,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@@ -2245,17 +2508,76 @@ err:
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
{
struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *new_uqp;
+ int err = 0;
+
+ new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
+ if (!new_uqp)
+ return -ENOMEM;
+
+ mutex_lock(&root->chain_lock);
+
+ if (!root->root_ft) {
+ err = -EINVAL;
+ goto update_ft_fail;
+ }
+
+ err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false);
+ if (err) {
+ mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
+ underlay_qpn, err);
+ goto update_ft_fail;
+ }
+
+ new_uqp->qpn = underlay_qpn;
+ list_add_tail(&new_uqp->list, &root->underlay_qpns);
+
+ mutex_unlock(&root->chain_lock);
- root->underlay_qpn = underlay_qpn;
return 0;
+
+update_ft_fail:
+ mutex_unlock(&root->chain_lock);
+ kfree(new_uqp);
+ return err;
}
EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
{
struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *uqp;
+ bool found = false;
+ int err = 0;
+
+ mutex_lock(&root->chain_lock);
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ if (uqp->qpn == underlay_qpn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
+ underlay_qpn);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true);
+ if (err)
+ mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
+ underlay_qpn, err);
+
+ list_del(&uqp->list);
+ mutex_unlock(&root->chain_lock);
+ kfree(uqp);
- root->underlay_qpn = 0;
return 0;
+
+out:
+ mutex_unlock(&root->chain_lock);
+ return err;
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 48dd78975062..397d24a621a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -33,6 +33,7 @@
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
+#include <linux/refcount.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
@@ -66,6 +67,8 @@ enum fs_fte_status {
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
+ struct kmem_cache *fgs_cache;
+ struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
@@ -81,9 +84,12 @@ struct fs_node {
struct fs_node *parent;
struct fs_node *root;
/* lock the node for writing and traversing */
- struct mutex lock;
- atomic_t refcount;
- void (*remove_func)(struct fs_node *);
+ struct rw_semaphore lock;
+ refcount_t refcount;
+ bool active;
+ void (*del_hw_func)(struct fs_node *);
+ void (*del_sw_func)(struct fs_node *);
+ atomic_t version;
};
struct mlx5_flow_rule {
@@ -120,7 +126,6 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
- struct ida fte_allocator;
struct rhltable fgs_hash;
};
@@ -147,6 +152,11 @@ struct mlx5_fc {
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
+struct mlx5_ft_underlay_qp {
+ struct list_head list;
+ u32 qpn;
+};
+
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
@@ -200,6 +210,7 @@ struct mlx5_flow_group {
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
+ struct ida fte_allocator;
u32 id;
struct rhashtable ftes_hash;
struct rhlist_head hash;
@@ -212,7 +223,7 @@ struct mlx5_flow_root_namespace {
struct mlx5_flow_table *root_ft;
/* Should be held when chaining flow tables */
struct mutex chain_lock;
- u32 underlay_qpn;
+ struct list_head underlay_qpns;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2c71557d1cee..5ef1b56b6a96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -106,6 +106,13 @@ static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
MLX5_MCAM_REGS_FIRST_128);
}
+static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_qcam_reg(dev, dev->caps.qcam,
+ MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_QCAM_REGS_FIRST_128);
+}
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
@@ -182,6 +189,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg))
mlx5_get_mcam_reg(dev);
+ if (MLX5_CAP_GEN(dev, qcam_reg))
+ mlx5_get_qcam_reg(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index db86e1506c8b..1a0e797ad001 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -285,9 +285,9 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&health->wq_lock, flags);
}
-static void poll_health(unsigned long data)
+static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
+ struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
u32 count;
@@ -320,15 +320,13 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- init_timer(&health->timer);
+ timer_setup(&health->timer, poll_health, 0);
health->sick = 0;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.data = (unsigned long)dev;
- health->timer.function = poll_health;
health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
add_timer(&health->timer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 43c126c63955..6f338a9219c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -250,3 +250,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
+
+const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
+ .get_drvinfo = mlx5i_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 145e392ab849..d2a66dc4adc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -40,8 +40,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
-static int mlx5i_dev_init(struct net_device *dev);
-static void mlx5i_dev_cleanup(struct net_device *dev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -70,10 +68,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-static void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
@@ -108,11 +106,69 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv)
/* Do nothing .. */
}
+int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_qp *qp = &ipriv->qp;
+ struct mlx5_qp_context *context;
+ int ret;
+
+ /* QP states */
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ context->pri_path.port = 1;
+ context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
+ context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+ memset(context, 0, sizeof(*context));
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+
+ kfree(context);
+ return 0;
+
+err_qp_modify_to_err:
+ mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
+ kfree(context);
+ return ret;
+}
+
+void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_qp_context context;
+ int err;
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
+ &ipriv->qp);
+ if (err)
+ mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
+}
+
#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
-static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
- struct mlx5_qp_context *context = NULL;
u32 *in = NULL;
void *addr_path;
int ret = 0;
@@ -140,43 +196,12 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
goto out;
}
- /* QP states */
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = -ENOMEM;
- goto out;
- }
-
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
- context->pri_path.port = 1;
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
- goto out;
- }
- memset(context, 0, sizeof(*context));
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
- goto out;
- }
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
- goto out;
- }
-
out:
- kfree(context);
kvfree(in);
return ret;
}
-static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
mlx5_core_destroy_qp(mdev, qp);
}
@@ -195,10 +220,14 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
- return err;
+ goto err_destroy_underlay_qp;
}
return 0;
+
+err_destroy_underlay_qp:
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ return err;
}
static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
@@ -226,15 +255,24 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
+ err = mlx5e_create_inner_ttc_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
+
err = mlx5e_create_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
- goto err_destroy_arfs_tables;
+ goto err_destroy_inner_ttc_table;
}
return 0;
+err_destroy_inner_ttc_table:
+ mlx5e_destroy_inner_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -244,12 +282,12 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
int err;
err = mlx5e_create_indirect_rqt(priv);
@@ -268,18 +306,12 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
- if (err)
- goto err_destroy_direct_tirs;
-
err = mlx5i_create_flow_steering(priv);
if (err)
- goto err_remove_rx_underlay_qpn;
+ goto err_destroy_direct_tirs;
return 0;
-err_remove_rx_underlay_qpn:
- mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
@@ -293,9 +325,6 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
-
- mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
@@ -351,7 +380,7 @@ out:
return err;
}
-static int mlx5i_dev_init(struct net_device *dev)
+int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -361,6 +390,9 @@ static int mlx5i_dev_init(struct net_device *dev)
dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+ /* Add QPN to net-device mapping to HT */
+ mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
+
return 0;
}
@@ -378,63 +410,84 @@ static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-static void mlx5i_dev_cleanup(struct net_device *dev)
+void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5i_priv *ipriv = priv->ppriv;
- struct mlx5_qp_context context;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5i_uninit_underlay_qp(priv);
- /* detach qp from flow-steering by reset it */
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp);
+ /* Delete QPN to net-device mapping from HT */
+ mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
}
static int mlx5i_open(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
int err;
- mutex_lock(&priv->state_lock);
+ mutex_lock(&epriv->state_lock);
- set_bit(MLX5E_STATE_OPENED, &priv->state);
+ set_bit(MLX5E_STATE_OPENED, &epriv->state);
- err = mlx5e_open_channels(priv, &priv->channels);
- if (err)
+ err = mlx5i_init_underlay_qp(epriv);
+ if (err) {
+ mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
goto err_clear_state_opened_flag;
+ }
+
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ if (err) {
+ mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
+ goto err_reset_qp;
+ }
- mlx5e_refresh_tirs(priv, false);
- mlx5e_activate_priv_channels(priv);
- mlx5e_timestamp_init(priv);
+ err = mlx5e_open_channels(epriv, &epriv->channels);
+ if (err)
+ goto err_remove_fs_underlay_qp;
- mutex_unlock(&priv->state_lock);
+ mlx5e_refresh_tirs(epriv, false);
+ mlx5e_activate_priv_channels(epriv);
+ mlx5e_timestamp_set(epriv);
+
+ mutex_unlock(&epriv->state_lock);
return 0;
+err_remove_fs_underlay_qp:
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+err_reset_qp:
+ mlx5i_uninit_underlay_qp(epriv);
err_clear_state_opened_flag:
- clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mutex_unlock(&priv->state_lock);
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
+ mutex_unlock(&epriv->state_lock);
return err;
}
static int mlx5i_close(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
*/
- mutex_lock(&priv->state_lock);
+ mutex_lock(&epriv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
goto unlock;
- clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
- mlx5e_timestamp_cleanup(priv);
- netif_carrier_off(priv->netdev);
- mlx5e_deactivate_priv_channels(priv);
- mlx5e_close_channels(&priv->channels);
+ netif_carrier_off(epriv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5i_uninit_underlay_qp(epriv);
+ mlx5e_deactivate_priv_channels(epriv);
+ mlx5e_close_channels(&epriv->channels);;
unlock:
- mutex_unlock(&priv->state_lock);
+ mutex_unlock(&epriv->state_lock);
return 0;
}
@@ -492,6 +545,13 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
}
+static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+
+ ipriv->pkey_index = (u16)id;
+}
+
static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
@@ -510,12 +570,13 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
const char *name,
void (*setup)(struct net_device *))
{
- const struct mlx5e_profile *profile = &mlx5i_nic_profile;
- int nch = profile->max_nch(mdev);
+ const struct mlx5e_profile *profile;
struct net_device *netdev;
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
struct rdma_netdev *rn;
+ bool sub_interface;
+ int nch;
int err;
if (mlx5i_check_required_hca_cap(mdev)) {
@@ -523,10 +584,15 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
return ERR_PTR(-EOPNOTSUPP);
}
- /* This function should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
- if (err)
- return NULL;
+ /* TODO: Need to find a better way to check if child device*/
+ sub_interface = (mdev->mlx5e_res.pdn != 0);
+
+ if (sub_interface)
+ profile = mlx5i_pkey_get_profile();
+ else
+ profile = &mlx5i_nic_profile;
+
+ nch = profile->max_nch(mdev);
netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
name, NET_NAME_UNKNOWN,
@@ -535,7 +601,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
nch);
if (!netdev) {
mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
- goto free_mdev_resources;
+ return NULL;
}
ipriv = netdev_priv(netdev);
@@ -545,6 +611,20 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
if (!epriv->wq)
goto err_free_netdev;
+ ipriv->sub_interface = sub_interface;
+ if (!ipriv->sub_interface) {
+ err = mlx5i_pkey_qpn_ht_init(netdev);
+ if (err) {
+ mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
+ goto destroy_wq;
+ }
+
+ /* This should only be called once per mdev */
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ goto destroy_ht;
+ }
+
profile->init(mdev, netdev, profile, ipriv);
mlx5e_attach_netdev(epriv);
@@ -556,13 +636,16 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
rn->send = mlx5i_xmit;
rn->attach_mcast = mlx5i_attach_mcast;
rn->detach_mcast = mlx5i_detach_mcast;
+ rn->set_id = mlx5i_set_pkey_index;
return netdev;
+destroy_ht:
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+destroy_wq:
+ destroy_workqueue(epriv->wq);
err_free_netdev:
free_netdev(netdev);
-free_mdev_resources:
- mlx5e_destroy_mdev_resources(mdev);
return NULL;
}
@@ -570,15 +653,18 @@ EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
void mlx5_rdma_netdev_free(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
const struct mlx5e_profile *profile = priv->profile;
- struct mlx5_core_dev *mdev = priv->mdev;
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
destroy_workqueue(priv->wq);
- free_netdev(netdev);
- mlx5e_destroy_mdev_resources(mdev);
+ if (!ipriv->sub_interface) {
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+ mlx5e_destroy_mdev_resources(priv->mdev);
+ }
+ free_netdev(netdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index a0f405f520f7..49008022c306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -39,6 +39,7 @@
#define MLX5I_MAX_NUM_TC 1
extern const struct ethtool_ops mlx5i_ethtool_ops;
+extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
#define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4
@@ -49,10 +50,45 @@ extern const struct ethtool_ops mlx5i_ethtool_ops;
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
struct mlx5_core_qp qp;
+ bool sub_interface;
u32 qkey;
+ u16 pkey_index;
+ struct mlx5i_pkey_qpn_ht *qpn_htbl;
char *mlx5e_priv[0];
};
+/* Underlay QP create/destroy functions */
+int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+
+/* Underlay QP state modification init/uninit functions */
+int mlx5i_init_underlay_qp(struct mlx5e_priv *priv);
+void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv);
+
+/* Allocate/Free underlay QPN to net-device hash table */
+int mlx5i_pkey_qpn_ht_init(struct net_device *netdev);
+void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev);
+
+/* Add/Remove an underlay QPN to net-device mapping to/from the hash table */
+int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn);
+int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
+
+/* Get the net-device corresponding to the given underlay QPN */
+struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
+
+/* Shared ndo functionts */
+int mlx5i_dev_init(struct net_device *dev);
+void mlx5i_dev_cleanup(struct net_device *dev);
+
+/* Parent profile functions */
+void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+
+/* Get child interface nic profile */
+const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
+
/* Extract mlx5e_priv from IPoIB netdev */
#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
new file mode 100644
index 000000000000..531b02cc979b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hash.h>
+#include "ipoib.h"
+
+#define MLX5I_MAX_LOG_PKEY_SUP 7
+
+struct qpn_to_netdev {
+ struct net_device *netdev;
+ struct hlist_node hlist;
+ u32 underlay_qpn;
+};
+
+struct mlx5i_pkey_qpn_ht {
+ struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP];
+ spinlock_t ht_lock; /* Synchronise with NAPI */
+};
+
+int mlx5i_pkey_qpn_ht_init(struct net_device *netdev)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct mlx5i_pkey_qpn_ht *qpn_htbl;
+
+ qpn_htbl = kzalloc(sizeof(*qpn_htbl), GFP_KERNEL);
+ if (!qpn_htbl)
+ return -ENOMEM;
+
+ ipriv->qpn_htbl = qpn_htbl;
+ spin_lock_init(&qpn_htbl->ht_lock);
+
+ return 0;
+}
+
+void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+
+ kfree(ipriv->qpn_htbl);
+}
+
+static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets,
+ u32 qpn)
+{
+ struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)];
+ struct qpn_to_netdev *node;
+
+ hlist_for_each_entry(node, h, hlist) {
+ if (node->underlay_qpn == qpn)
+ return node;
+ }
+
+ return NULL;
+}
+
+int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
+ u8 key = hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP);
+ struct qpn_to_netdev *new_node;
+
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->netdev = netdev;
+ new_node->underlay_qpn = qpn;
+ spin_lock_bh(&ht->ht_lock);
+ hlist_add_head(&new_node->hlist, &ht->buckets[key]);
+ spin_unlock_bh(&ht->ht_lock);
+
+ return 0;
+}
+
+int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
+ struct qpn_to_netdev *node;
+
+ node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
+ if (!node) {
+ mlx5_core_warn(epriv->mdev, "QPN to netdev delete from HT failed\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ht->ht_lock);
+ hlist_del_init(&node->hlist);
+ spin_unlock_bh(&ht->ht_lock);
+ kfree(node);
+
+ return 0;
+}
+
+struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct qpn_to_netdev *node;
+
+ node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn);
+ if (!node)
+ return NULL;
+
+ return node->netdev;
+}
+
+static int mlx5i_pkey_open(struct net_device *netdev);
+static int mlx5i_pkey_close(struct net_device *netdev);
+static int mlx5i_pkey_dev_init(struct net_device *dev);
+static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
+static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+
+static const struct net_device_ops mlx5i_pkey_netdev_ops = {
+ .ndo_open = mlx5i_pkey_open,
+ .ndo_stop = mlx5i_pkey_close,
+ .ndo_init = mlx5i_pkey_dev_init,
+ .ndo_uninit = mlx5i_pkey_dev_cleanup,
+ .ndo_change_mtu = mlx5i_pkey_change_mtu,
+};
+
+/* Child NDOs */
+static int mlx5i_pkey_dev_init(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5i_priv *ipriv, *parent_ipriv;
+ struct net_device *parent_dev;
+ int parent_ifindex;
+
+ ipriv = priv->ppriv;
+
+ /* Get QPN to netdevice hash table from parent */
+ parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
+ parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
+ if (!parent_dev) {
+ mlx5_core_warn(priv->mdev, "failed to get parent device\n");
+ return -EINVAL;
+ }
+
+ parent_ipriv = netdev_priv(parent_dev);
+ ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
+ dev_put(parent_dev);
+
+ return mlx5i_dev_init(dev);
+}
+
+static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
+{
+ return mlx5i_dev_cleanup(netdev);
+}
+
+static int mlx5i_pkey_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ int err;
+
+ mutex_lock(&epriv->state_lock);
+
+ set_bit(MLX5E_STATE_OPENED, &epriv->state);
+
+ err = mlx5i_init_underlay_qp(epriv);
+ if (err) {
+ mlx5_core_warn(mdev, "prepare child underlay qp state failed, %d\n", err);
+ goto err_release_lock;
+ }
+
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ if (err) {
+ mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
+ goto err_unint_underlay_qp;
+ }
+
+ err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]);
+ if (err) {
+ mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
+ goto err_remove_rx_uderlay_qp;
+ }
+
+ err = mlx5e_open_channels(epriv, &epriv->channels);
+ if (err) {
+ mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
+ goto err_clear_state_opened_flag;
+ }
+ mlx5e_refresh_tirs(epriv, false);
+ mlx5e_activate_priv_channels(epriv);
+ mutex_unlock(&epriv->state_lock);
+
+ return 0;
+
+err_clear_state_opened_flag:
+ mlx5e_destroy_tis(mdev, epriv->tisn[0]);
+err_remove_rx_uderlay_qp:
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+err_unint_underlay_qp:
+ mlx5i_uninit_underlay_qp(epriv);
+err_release_lock:
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
+ mutex_unlock(&epriv->state_lock);
+ return err;
+}
+
+static int mlx5i_pkey_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ netif_carrier_off(priv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5i_uninit_underlay_qp(priv);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+ mlx5e_destroy_tis(mdev, priv->tisn[0]);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+/* Called directly after IPoIB netdevice was created to initialize SW structs */
+static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ mlx5i_init(mdev, netdev, profile, ppriv);
+
+ /* Override parent ndo */
+ netdev->netdev_ops = &mlx5i_pkey_netdev_ops;
+
+ /* Set child limited ethtool support */
+ netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops;
+
+ /* Use dummy rqs */
+ priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+}
+
+/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
+static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
+{
+ /* Do nothing .. */
+}
+
+static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+}
+
+static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv)
+{
+ /* Since the rx resources are shared between child and parent, the
+ * parent interface is taking care of rx resource allocation and init
+ */
+ return 0;
+}
+
+static void mlx5i_pkey_cleanup_rx(struct mlx5e_priv *priv)
+{
+ /* Since the rx resources are shared between child and parent, the
+ * parent interface is taking care of rx resource free and de-init
+ */
+}
+
+static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
+ .init = mlx5i_pkey_init,
+ .cleanup = mlx5i_pkey_cleanup,
+ .init_tx = mlx5i_pkey_init_tx,
+ .cleanup_tx = mlx5i_pkey_cleanup_tx,
+ .init_rx = mlx5i_pkey_init_rx,
+ .cleanup_rx = mlx5i_pkey_cleanup_rx,
+ .enable = NULL,
+ .disable = NULL,
+ .update_stats = NULL,
+ .max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .max_tc = MLX5I_MAX_NUM_TC,
+};
+
+const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
+{
+ return &mlx5i_pkey_nic_profile;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
new file mode 100644
index 000000000000..fa8aed62b231
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/clocksource.h>
+#include "en.h"
+
+enum {
+ MLX5_CYCLES_SHIFT = 23
+};
+
+enum {
+ MLX5_PIN_MODE_IN = 0x0,
+ MLX5_PIN_MODE_OUT = 0x1,
+};
+
+enum {
+ MLX5_OUT_PATTERN_PULSE = 0x0,
+ MLX5_OUT_PATTERN_PERIODIC = 0x1,
+};
+
+enum {
+ MLX5_EVENT_MODE_DISABLE = 0x0,
+ MLX5_EVENT_MODE_REPETETIVE = 0x1,
+ MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
+};
+
+enum {
+ MLX5_MTPPS_FS_ENABLE = BIT(0x0),
+ MLX5_MTPPS_FS_PATTERN = BIT(0x2),
+ MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
+ MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
+ MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+};
+
+static u64 read_internal_timer(const struct cyclecounter *cc)
+{
+ struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
+
+ return mlx5_read_internal_timer(mdev) & cc->mask;
+}
+
+static void mlx5_pps_out(struct work_struct *work)
+{
+ struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
+ out_work);
+ struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
+ pps_info);
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < clock->ptp_info.n_pins; i++) {
+ u64 tstart;
+
+ write_lock_irqsave(&clock->lock, flags);
+ tstart = clock->pps_info.start[i];
+ clock->pps_info.start[i] = 0;
+ write_unlock_irqrestore(&clock->lock, flags);
+ if (!tstart)
+ continue;
+
+ MLX5_SET(mtpps_reg, in, pin, i);
+ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
+ MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
+ mlx5_set_mtpps(mdev, in, sizeof(in));
+ }
+}
+
+static void mlx5_timestamp_overflow(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
+ overflow_work);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_read(&clock->tc);
+ write_unlock_irqrestore(&clock->lock, flags);
+ schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
+}
+
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_init(&clock->tc, &clock->cycles, ns);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ u64 ns;
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ ns = timecounter_read(&clock->tc);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_adjtime(&clock->tc, delta);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff;
+ unsigned long flags;
+ int neg_adj = 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ adj = clock->nominal_c_mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_read(&clock->tc);
+ clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
+ clock->nominal_c_mult + diff;
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_extts_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev =
+ container_of(clock, struct mlx5_core_dev, clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->extts.index >= clock->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ pin_mode = MLX5_PIN_MODE_IN;
+ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
+ field_select = MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_ENABLE;
+ } else {
+ pin = rq->extts.index;
+ field_select = MLX5_MTPPS_FS_ENABLE;
+ }
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(mdev, pin, 0,
+ MLX5_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5_perout_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev =
+ container_of(clock, struct mlx5_core_dev, clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u64 nsec_now, nsec_delta, time_stamp = 0;
+ u64 cycles_now, cycles_delta;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+ s64 ns;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= clock->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+
+ pin_mode = MLX5_PIN_MODE_OUT;
+ pattern = MLX5_OUT_PATTERN_PERIODIC;
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(mdev);
+ write_lock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
+ clock->cycles.mult);
+ write_unlock_irqrestore(&clock->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ field_select = MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_ENABLE |
+ MLX5_MTPPS_FS_TIME_STAMP;
+ } else {
+ pin = rq->perout.index;
+ field_select = MLX5_MTPPS_FS_ENABLE;
+ }
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(mdev, pin, 0,
+ MLX5_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+
+ clock->pps_info.enabled = !!on;
+ return 0;
+}
+
+static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mlx5_extts_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return mlx5_perout_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return mlx5_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+}
+
+static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlx5_p2p",
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = mlx5_ptp_adjfreq,
+ .adjtime = mlx5_ptp_adjtime,
+ .gettime64 = mlx5_ptp_gettime,
+ .settime64 = mlx5_ptp_settime,
+ .enable = NULL,
+ .verify = NULL,
+};
+
+static int mlx5_init_pin_config(struct mlx5_clock *clock)
+{
+ int i;
+
+ clock->ptp_info.pin_config =
+ kzalloc(sizeof(*clock->ptp_info.pin_config) *
+ clock->ptp_info.n_pins, GFP_KERNEL);
+ if (!clock->ptp_info.pin_config)
+ return -ENOMEM;
+ clock->ptp_info.enable = mlx5_ptp_enable;
+ clock->ptp_info.verify = mlx5_ptp_verify;
+ clock->ptp_info.pps = 1;
+
+ for (i = 0; i < clock->ptp_info.n_pins; i++) {
+ snprintf(clock->ptp_info.pin_config[i].name,
+ sizeof(clock->ptp_info.pin_config[i].name),
+ "mlx5_pps%d", i);
+ clock->ptp_info.pin_config[i].index = i;
+ clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
+ clock->ptp_info.pin_config[i].chan = i;
+ }
+
+ return 0;
+}
+
+static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ mlx5_query_mtpps(mdev, out, sizeof(out));
+
+ clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
+ cap_number_of_pps_pins);
+ clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_in_pins);
+ clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_out_pins);
+
+ clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+}
+
+void mlx5_pps_event(struct mlx5_core_dev *mdev,
+ struct mlx5_eqe *eqe)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ u64 nsec_now, nsec_delta;
+ u64 cycles_now, cycles_delta;
+ int pin = eqe->data.pps.pin;
+ s64 ns;
+ unsigned long flags;
+
+ switch (clock->ptp_info.pin_config[pin].func) {
+ case PTP_PF_EXTTS:
+ if (clock->pps_info.enabled) {
+ ptp_event.type = PTP_CLOCK_PPSUSR;
+ ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ } else {
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ }
+ ptp_clock_event(clock->ptp, &ptp_event);
+ break;
+ case PTP_PF_PEROUT:
+ mlx5_ptp_gettime(&clock->ptp_info, &ts);
+ cycles_now = mlx5_read_internal_timer(mdev);
+ ts.tv_sec += 1;
+ ts.tv_nsec = 0;
+ ns = timespec64_to_ns(&ts);
+ write_lock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
+ clock->cycles.mult);
+ clock->pps_info.start[pin] = cycles_now + cycles_delta;
+ schedule_work(&clock->pps_info.out_work);
+ write_unlock_irqrestore(&clock->lock, flags);
+ break;
+ default:
+ mlx5_core_err(mdev, " Unhandled event\n");
+ }
+}
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u64 ns;
+ u64 frac = 0;
+ u32 dev_freq;
+
+ dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
+ if (!dev_freq) {
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return;
+ }
+ rwlock_init(&clock->lock);
+ clock->cycles.read = read_internal_timer;
+ clock->cycles.shift = MLX5_CYCLES_SHIFT;
+ clock->cycles.mult = clocksource_khz2mult(dev_freq,
+ clock->cycles.shift);
+ clock->nominal_c_mult = clock->cycles.mult;
+ clock->cycles.mask = CLOCKSOURCE_MASK(41);
+
+ timecounter_init(&clock->tc, &clock->cycles,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+ * sure counter is checked at least once every wrap around.
+ */
+ ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+ frac, &frac);
+ do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ clock->overflow_period = ns;
+
+ INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
+ INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
+ if (clock->overflow_period)
+ schedule_delayed_work(&clock->overflow_work, 0);
+ else
+ mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
+
+ /* Configure the PHC */
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ /* Initialize 1PPS data structures */
+ if (MLX5_PPS_CAP(mdev))
+ mlx5_get_pps_caps(mdev);
+ if (clock->ptp_info.n_pins)
+ mlx5_init_pin_config(clock);
+
+ clock->ptp = ptp_clock_register(&clock->ptp_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(clock->ptp)) {
+ mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
+ PTR_ERR(clock->ptp));
+ clock->ptp = NULL;
+ }
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (clock->ptp) {
+ ptp_clock_unregister(clock->ptp);
+ clock->ptp = NULL;
+ }
+
+ cancel_work_sync(&clock->pps_info.out_work);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock->ptp_info.pin_config);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
new file mode 100644
index 000000000000..a8eecedd46c2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_CLOCK_H__
+#define __LIB_CLOCK_H__
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev);
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+
+static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ u64 nsec;
+
+ read_lock(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ read_unlock(&clock->lock);
+
+ return ns_to_ktime(nsec);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 06562c9a6b9c..5f323442cc5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -59,6 +59,7 @@
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "accel/ipsec.h"
+#include "lib/clock.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -889,6 +890,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_reserved_gids(dev);
+ mlx5_init_clock(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -949,6 +952,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b7c2900b75f9..ff4a0b889a6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -93,6 +93,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
+void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
@@ -121,6 +122,8 @@ int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
+int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
+ u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e07061f565d6..c37d00cd472a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -98,6 +98,18 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
}
+int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
+ u8 feature_group, u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(qcam_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(qcam_reg);
+
+ MLX5_SET(qcam_reg, in, feature_group, feature_group);
+ MLX5_SET(qcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0);
+}
+
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -959,3 +971,102 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MTPPSE, 0, 1);
}
+
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state)
+{
+ u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ int err;
+
+ MLX5_SET(qpts_reg, in, local_port, 1);
+ MLX5_SET(qpts_reg, in, trust_state, trust_state);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QPTS, 0, 1);
+ return err;
+}
+
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
+{
+ u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ int err;
+
+ MLX5_SET(qpts_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QPTS, 0, 0);
+ if (!err)
+ *trust_state = MLX5_GET(qpts_reg, out, trust_state);
+
+ return err;
+}
+
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
+{
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+ void *qpdpm_dscp;
+ void *out;
+ void *in;
+ int err;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
+ if (err)
+ goto out;
+
+ memcpy(in, out, sz);
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+
+ /* Update the corresponding dscp entry */
+ qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, in, dscp[dscp]);
+ MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, prio, prio);
+ MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+
+/* dscp2prio[i]: priority that dscp i mapped to */
+#define MLX5E_SUPPORTED_DSCP 64
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio)
+{
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+ void *qpdpm_dscp;
+ void *out;
+ void *in;
+ int err;
+ int i;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
+ if (err)
+ goto out;
+
+ for (i = 0; i < (MLX5E_SUPPORTED_DSCP); i++) {
+ qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, out, dscp[i]);
+ dscp2prio[i] = MLX5_GET16(qpdpm_dscp_reg, qpdpm_dscp, prio);
+ }
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9a5a1cc877b6..9463c3fa254f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -18,7 +18,9 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o spectrum_fid.o \
- spectrum_ipip.o
+ spectrum_ipip.o spectrum_acl_flex_actions.o \
+ spectrum_mr.o spectrum_mr_tcam.o \
+ spectrum_qdisc.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 5ae110172c22..6a979a09ab72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -399,23 +399,25 @@ u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
-void mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
+int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
- if (WARN_ON(block->finished))
- return;
+ if (block->finished)
+ return -EINVAL;
mlxsw_afa_set_goto_set(block->cur_set,
MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
block->finished = true;
+ return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_continue);
-void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
+int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
{
- if (WARN_ON(block->finished))
- return;
+ if (block->finished)
+ return -EINVAL;
mlxsw_afa_set_goto_set(block->cur_set,
MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
block->finished = true;
+ return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_jump);
@@ -674,6 +676,7 @@ enum mlxsw_afa_trapdisc_trap_action {
MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
enum mlxsw_afa_trapdisc_forward_action {
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
};
@@ -712,7 +715,7 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
-int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block)
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_TRAPDISC_CODE,
@@ -722,11 +725,27 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block)
return -ENOBUFS;
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
- MLXSW_TRAP_ID_ACL0);
+ trap_id);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
+int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
+ u16 trap_id)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
+ trap_id);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -891,3 +910,74 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
+
+/* MC Routing Action
+ * -----------------
+ * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
+ * Forwarding Table Version 2 Register.
+ */
+
+#define MLXSW_AFA_MCROUTER_CODE 0x10
+#define MLXSW_AFA_MCROUTER_SIZE 2
+
+enum mlxsw_afa_mcrouter_rpf_action {
+ MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
+ MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
+ MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
+};
+
+/* afa_mcrouter_rpf_action */
+MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
+
+/* afa_mcrouter_expected_irif */
+MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
+
+/* afa_mcrouter_min_mtu */
+MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
+
+enum mlxsw_afa_mrouter_vrmid {
+ MLXSW_AFA_MCROUTER_VRMID_INVALID,
+ MLXSW_AFA_MCROUTER_VRMID_VALID
+};
+
+/* afa_mcrouter_vrmid
+ * Valid RMID: rigr_rmid_index is used as RMID
+ */
+MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
+
+/* afa_mcrouter_rigr_rmid_index
+ * When the vrmid field is set to invalid, the field is used as pointer to
+ * Router Interface Group (RIGR) Table in the KVD linear.
+ * When the vrmid is set to valid, the field is used as RMID index, ranged
+ * from 0 to max_mid - 1. The index is to the Port Group Table.
+ */
+MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_afa_mcrouter_pack(char *payload,
+ enum mlxsw_afa_mcrouter_rpf_action rpf_action,
+ u16 expected_irif, u16 min_mtu,
+ enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
+
+{
+ mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
+ mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
+ mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
+ mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
+ mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
+}
+
+int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
+ u16 expected_irif, u16 min_mtu,
+ bool rmid_valid, u32 kvdl_index)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_MCROUTER_CODE,
+ MLXSW_AFA_MCROUTER_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
+ expected_irif, min_mtu, rmid_valid, kvdl_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index f99c341b2497..a8d3314c3a24 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -57,10 +57,12 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
-void mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
-void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
-int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
+int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
+ u16 trap_id);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
@@ -68,5 +70,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
+int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
+ u16 expected_irif, u16 min_mtu,
+ bool rmid_valid, u32 kvdl_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5acfbe5b8b9d..6c4e08b8058a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1758,6 +1758,191 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* CWTP - Congetion WRED ECN TClass Profile
+ * ----------------------------------------
+ * Configures the profiles for queues of egress port and traffic class
+ */
+#define MLXSW_REG_CWTP_ID 0x2802
+#define MLXSW_REG_CWTP_BASE_LEN 0x28
+#define MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN 0x08
+#define MLXSW_REG_CWTP_LEN 0x40
+
+MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN);
+
+/* reg_cwtp_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8);
+
+/* reg_cwtp_traffic_class
+ * Traffic Class to configure
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtp, traffic_class, 32, 0, 8);
+
+/* reg_cwtp_profile_min
+ * Minimum Average Queue Size of the profile in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_min, MLXSW_REG_CWTP_BASE_LEN,
+ 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 0, false);
+
+/* reg_cwtp_profile_percent
+ * Percentage of WRED and ECN marking for maximum Average Queue size
+ * Range is 0 to 100, units of integer percentage
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_percent, MLXSW_REG_CWTP_BASE_LEN,
+ 24, 7, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false);
+
+/* reg_cwtp_profile_max
+ * Maximum Average Queue size of the profile in cells
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN,
+ 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false);
+
+#define MLXSW_REG_CWTP_MIN_VALUE 64
+#define MLXSW_REG_CWTP_MAX_PROFILE 2
+#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1
+
+static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port,
+ u8 traffic_class)
+{
+ int i;
+
+ MLXSW_REG_ZERO(cwtp, payload);
+ mlxsw_reg_cwtp_local_port_set(payload, local_port);
+ mlxsw_reg_cwtp_traffic_class_set(payload, traffic_class);
+
+ for (i = 0; i <= MLXSW_REG_CWTP_MAX_PROFILE; i++) {
+ mlxsw_reg_cwtp_profile_min_set(payload, i,
+ MLXSW_REG_CWTP_MIN_VALUE);
+ mlxsw_reg_cwtp_profile_max_set(payload, i,
+ MLXSW_REG_CWTP_MIN_VALUE);
+ }
+}
+
+#define MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile) (profile - 1)
+
+static inline void
+mlxsw_reg_cwtp_profile_pack(char *payload, u8 profile, u32 min, u32 max,
+ u32 probability)
+{
+ u8 index = MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile);
+
+ mlxsw_reg_cwtp_profile_min_set(payload, index, min);
+ mlxsw_reg_cwtp_profile_max_set(payload, index, max);
+ mlxsw_reg_cwtp_profile_percent_set(payload, index, probability);
+}
+
+/* CWTPM - Congestion WRED ECN TClass and Pool Mapping
+ * ---------------------------------------------------
+ * The CWTPM register maps each egress port and traffic class to profile num.
+ */
+#define MLXSW_REG_CWTPM_ID 0x2803
+#define MLXSW_REG_CWTPM_LEN 0x44
+
+MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN);
+
+/* reg_cwtpm_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8);
+
+/* reg_cwtpm_traffic_class
+ * Traffic Class to configure
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtpm, traffic_class, 32, 0, 8);
+
+/* reg_cwtpm_ew
+ * Control enablement of WRED for traffic class:
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ew, 36, 1, 1);
+
+/* reg_cwtpm_ee
+ * Control enablement of ECN for traffic class:
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ee, 36, 0, 1);
+
+/* reg_cwtpm_tcp_g
+ * TCP Green Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_g, 52, 0, 2);
+
+/* reg_cwtpm_tcp_y
+ * TCP Yellow Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_y, 56, 16, 2);
+
+/* reg_cwtpm_tcp_r
+ * TCP Red Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_r, 56, 0, 2);
+
+/* reg_cwtpm_ntcp_g
+ * Non-TCP Green Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_g, 60, 0, 2);
+
+/* reg_cwtpm_ntcp_y
+ * Non-TCP Yellow Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_y, 64, 16, 2);
+
+/* reg_cwtpm_ntcp_r
+ * Non-TCP Red Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2);
+
+#define MLXSW_REG_CWTPM_RESET_PROFILE 0
+
+static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
+ u8 traffic_class, u8 profile,
+ bool wred, bool ecn)
+{
+ MLXSW_REG_ZERO(cwtpm, payload);
+ mlxsw_reg_cwtpm_local_port_set(payload, local_port);
+ mlxsw_reg_cwtpm_traffic_class_set(payload, traffic_class);
+ mlxsw_reg_cwtpm_ew_set(payload, wred);
+ mlxsw_reg_cwtpm_ee_set(payload, ecn);
+ mlxsw_reg_cwtpm_tcp_g_set(payload, profile);
+ mlxsw_reg_cwtpm_tcp_y_set(payload, profile);
+ mlxsw_reg_cwtpm_tcp_r_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_g_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_y_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
+}
+
/* PPBT - Policy-Engine Port Binding Table
* ---------------------------------------
* This register is used for configuration of the Port Binding Table.
@@ -2142,15 +2327,14 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
*/
MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
-#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8
+#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
/* reg_pefa_flex_action_set
* Action-set to perform when rule is matched.
* Must be zero padded if action set is shorter.
* Access: RW
*/
-MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08,
- MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
const char *flex_action_set)
@@ -2243,7 +2427,7 @@ MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
- MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+ MLXSW_REG_FLEX_ACTION_SET_LEN);
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
@@ -3124,6 +3308,7 @@ static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
*/
#define MLXSW_REG_PPCNT_ID 0x5008
#define MLXSW_REG_PPCNT_LEN 0x100
+#define MLXSW_REG_PPCNT_COUNTERS_OFFSET 0x08
MLXSW_REG_DEFINE(ppcnt, MLXSW_REG_PPCNT_ID, MLXSW_REG_PPCNT_LEN);
@@ -3156,8 +3341,10 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_EXT_CNT = 0x5,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
+ MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
};
/* reg_ppcnt_grp
@@ -3173,6 +3360,7 @@ enum mlxsw_reg_ppcnt_grp {
* 0x10: Per Priority Counters
* 0x11: Per Traffic Class Counters
* 0x12: Physical Layer Counters
+ * 0x13: Per Traffic Class Congestion Counters
* Access: Index
*/
MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
@@ -3201,162 +3389,179 @@ MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
- 0x08 + 0x00, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
/* reg_ppcnt_a_frames_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
- 0x08 + 0x08, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
/* reg_ppcnt_a_frame_check_sequence_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
- 0x08 + 0x10, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
/* reg_ppcnt_a_alignment_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
- 0x08 + 0x18, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
/* reg_ppcnt_a_octets_transmitted_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
- 0x08 + 0x20, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
/* reg_ppcnt_a_octets_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
- 0x08 + 0x28, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
/* reg_ppcnt_a_multicast_frames_xmitted_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
- 0x08 + 0x30, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
/* reg_ppcnt_a_broadcast_frames_xmitted_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
- 0x08 + 0x38, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
/* reg_ppcnt_a_multicast_frames_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
- 0x08 + 0x40, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
/* reg_ppcnt_a_broadcast_frames_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
- 0x08 + 0x48, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64);
/* reg_ppcnt_a_in_range_length_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
- 0x08 + 0x50, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
/* reg_ppcnt_a_out_of_range_length_field
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
- 0x08 + 0x58, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
/* reg_ppcnt_a_frame_too_long_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
- 0x08 + 0x60, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
/* reg_ppcnt_a_symbol_error_during_carrier
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
- 0x08 + 0x68, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
/* reg_ppcnt_a_mac_control_frames_transmitted
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
- 0x08 + 0x70, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
/* reg_ppcnt_a_mac_control_frames_received
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
- 0x08 + 0x78, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
/* reg_ppcnt_a_unsupported_opcodes_received
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
- 0x08 + 0x80, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
/* reg_ppcnt_a_pause_mac_ctrl_frames_received
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
- 0x08 + 0x88, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
- 0x08 + 0x90, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* Ethernet Extended Counter Group Counters */
+
+/* reg_ppcnt_ecn_marked
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
/* Ethernet Per Priority Group Counters */
/* reg_ppcnt_rx_octets
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
/* reg_ppcnt_rx_frames
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
/* reg_ppcnt_tx_octets
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
/* reg_ppcnt_tx_frames
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64);
/* reg_ppcnt_rx_pause
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_pause,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
/* reg_ppcnt_rx_pause_duration
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
/* reg_ppcnt_tx_pause
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_pause,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
/* reg_ppcnt_tx_pause_duration
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
/* reg_ppcnt_rx_pause_transition
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
/* Ethernet Per Traffic Group Counters */
@@ -3366,14 +3571,24 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
* The field cannot be cleared.
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
/* reg_ppcnt_tc_no_buffer_discard_uc
* The number of unicast packets dropped due to lack of shared
* buffer resources.
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* Ethernet Per Traffic Class Congestion Group Counters */
+
+/* reg_ppcnt_wred_discard
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, wred_discard,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
@@ -3682,12 +3897,15 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
@@ -3992,6 +4210,12 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
+/* reg_ritr_ipv4_mc
+ * IPv4 multicast routing enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1);
+
enum mlxsw_reg_ritr_if_type {
/* VLAN interface. */
MLXSW_REG_RITR_VLAN_IF,
@@ -4049,6 +4273,14 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+/* reg_ritr_ipv4_mc_fe
+ * IPv4 Multicast Forwarding Enable.
+ * When disabled, forwarding is blocked but local traffic (traps and IP to me)
+ * will be enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1);
+
/* reg_ritr_lb_en
* Loop-back filter enable for unicast packets.
* If the flag is set then loop-back filter for unicast packets is
@@ -4271,11 +4503,13 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_enable_set(payload, enable);
mlxsw_reg_ritr_ipv4_set(payload, 1);
mlxsw_reg_ritr_ipv6_set(payload, 1);
+ mlxsw_reg_ritr_ipv4_mc_set(payload, 1);
mlxsw_reg_ritr_type_set(payload, type);
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_ipv6_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
@@ -4311,6 +4545,57 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
+/* RTAR - Router TCAM Allocation Register
+ * --------------------------------------
+ * This register is used for allocation of regions in the TCAM table.
+ */
+#define MLXSW_REG_RTAR_ID 0x8004
+#define MLXSW_REG_RTAR_LEN 0x20
+
+MLXSW_REG_DEFINE(rtar, MLXSW_REG_RTAR_ID, MLXSW_REG_RTAR_LEN);
+
+enum mlxsw_reg_rtar_op {
+ MLXSW_REG_RTAR_OP_ALLOCATE,
+ MLXSW_REG_RTAR_OP_RESIZE,
+ MLXSW_REG_RTAR_OP_DEALLOCATE,
+};
+
+/* reg_rtar_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, op, 0x00, 28, 4);
+
+enum mlxsw_reg_rtar_key_type {
+ MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST = 1,
+ MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST = 3
+};
+
+/* reg_rtar_key_type
+ * TCAM key type for the region.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, key_type, 0x00, 0, 8);
+
+/* reg_rtar_region_size
+ * TCAM region size. When allocating/resizing this is the requested
+ * size, the response is the actual size.
+ * Note: Actual size may be larger than requested.
+ * Reserved for op = Deallocate
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, region_size, 0x04, 0, 16);
+
+static inline void mlxsw_reg_rtar_pack(char *payload,
+ enum mlxsw_reg_rtar_op op,
+ enum mlxsw_reg_rtar_key_type key_type,
+ u16 region_size)
+{
+ MLXSW_REG_ZERO(rtar, payload);
+ mlxsw_reg_rtar_op_set(payload, op);
+ mlxsw_reg_rtar_key_type_set(payload, key_type);
+ mlxsw_reg_rtar_region_size_set(payload, region_size);
+}
+
/* RATR - Router Adjacency Table Register
* --------------------------------------
* The RATR register is used to configure the Router Adjacency (next-hop)
@@ -4480,6 +4765,27 @@ MLXSW_ITEM32(reg, ratr, ipip_ipv4_udip, 0x18, 0, 32);
*/
MLXSW_ITEM32(reg, ratr, ipip_ipv6_ptr, 0x1C, 0, 24);
+enum mlxsw_reg_flow_counter_set_type {
+ /* No count */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_ratr_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, counter_set_type, 0x28, 24, 8);
+
+/* reg_ratr_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, counter_index, 0x28, 0, 24);
+
static inline void
mlxsw_reg_ratr_pack(char *payload,
enum mlxsw_reg_ratr_op op, bool valid,
@@ -4507,6 +4813,20 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
}
+static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
+ bool counter_enable)
+{
+ enum mlxsw_reg_flow_counter_set_type set_type;
+
+ if (counter_enable)
+ set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES;
+ else
+ set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT;
+
+ mlxsw_reg_ratr_counter_index_set(payload, counter_index);
+ mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -4630,6 +4950,65 @@ static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index,
MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC);
}
+/* RRCR - Router Rules Copy Register Layout
+ * ----------------------------------------
+ * This register is used for moving and copying route entry rules.
+ */
+#define MLXSW_REG_RRCR_ID 0x800F
+#define MLXSW_REG_RRCR_LEN 0x24
+
+MLXSW_REG_DEFINE(rrcr, MLXSW_REG_RRCR_ID, MLXSW_REG_RRCR_LEN);
+
+enum mlxsw_reg_rrcr_op {
+ /* Move rules */
+ MLXSW_REG_RRCR_OP_MOVE,
+ /* Copy rules */
+ MLXSW_REG_RRCR_OP_COPY,
+};
+
+/* reg_rrcr_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rrcr, op, 0x00, 28, 4);
+
+/* reg_rrcr_offset
+ * Offset within the region from which to copy/move.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, offset, 0x00, 0, 16);
+
+/* reg_rrcr_size
+ * The number of rules to copy/move.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rrcr, size, 0x04, 0, 16);
+
+/* reg_rrcr_table_id
+ * Identifier of the table on which to perform the operation. Encoding is the
+ * same as in RTAR.key_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, table_id, 0x10, 0, 4);
+
+/* reg_rrcr_dest_offset
+ * Offset within the region to which to copy/move
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, dest_offset, 0x20, 0, 16);
+
+static inline void mlxsw_reg_rrcr_pack(char *payload, enum mlxsw_reg_rrcr_op op,
+ u16 offset, u16 size,
+ enum mlxsw_reg_rtar_key_type table_id,
+ u16 dest_offset)
+{
+ MLXSW_REG_ZERO(rrcr, payload);
+ mlxsw_reg_rrcr_op_set(payload, op);
+ mlxsw_reg_rrcr_offset_set(payload, offset);
+ mlxsw_reg_rrcr_size_set(payload, size);
+ mlxsw_reg_rrcr_table_id_set(payload, table_id);
+ mlxsw_reg_rrcr_dest_offset_set(payload, dest_offset);
+}
+
/* RALTA - Router Algorithmic LPM Tree Allocation Register
* -------------------------------------------------------
* RALTA is used to allocate the LPM trees of the SHSPM method.
@@ -5169,15 +5548,6 @@ enum mlxsw_reg_rauht_trap_id {
*/
MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
-enum mlxsw_reg_flow_counter_set_type {
- /* No count */
- MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00,
- /* Count packets and bytes */
- MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
- /* Count only packets */
- MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05,
-};
-
/* reg_rauht_counter_set_type
* Counter set type for flow counters
* Access: RW
@@ -5596,6 +5966,360 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+/* RIGR-V2 - Router Interface Group Register Version 2
+ * ---------------------------------------------------
+ * The RIGR_V2 register is used to add, remove and query egress interface list
+ * of a multicast forwarding entry.
+ */
+#define MLXSW_REG_RIGR2_ID 0x8023
+#define MLXSW_REG_RIGR2_LEN 0xB0
+
+#define MLXSW_REG_RIGR2_MAX_ERIFS 32
+
+MLXSW_REG_DEFINE(rigr2, MLXSW_REG_RIGR2_ID, MLXSW_REG_RIGR2_LEN);
+
+/* reg_rigr2_rigr_index
+ * KVD Linear index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rigr2, rigr_index, 0x04, 0, 24);
+
+/* reg_rigr2_vnext
+ * Next RIGR Index is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, vnext, 0x08, 31, 1);
+
+/* reg_rigr2_next_rigr_index
+ * Next RIGR Index. The index is to the KVD linear.
+ * Reserved when vnxet = '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, next_rigr_index, 0x08, 0, 24);
+
+/* reg_rigr2_vrmid
+ * RMID Index is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, vrmid, 0x20, 31, 1);
+
+/* reg_rigr2_rmid_index
+ * RMID Index.
+ * Range 0 .. max_mid - 1
+ * Reserved when vrmid = '0'.
+ * The index is to the Port Group Table (PGT)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, rmid_index, 0x20, 0, 16);
+
+/* reg_rigr2_erif_entry_v
+ * Egress Router Interface is valid.
+ * Note that low-entries must be set if high-entries are set. For
+ * example: if erif_entry[2].v is set then erif_entry[1].v and
+ * erif_entry[0].v must be set.
+ * Index can be from 0 to cap_mc_erif_list_entries-1
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_v, 0x24, 31, 1, 4, 0, false);
+
+/* reg_rigr2_erif_entry_erif
+ * Egress Router Interface.
+ * Valid range is from 0 to cap_max_router_interfaces - 1
+ * Index can be from 0 to MLXSW_REG_RIGR2_MAX_ERIFS - 1
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_erif, 0x24, 0, 16, 4, 0, false);
+
+static inline void mlxsw_reg_rigr2_pack(char *payload, u32 rigr_index,
+ bool vnext, u32 next_rigr_index)
+{
+ MLXSW_REG_ZERO(rigr2, payload);
+ mlxsw_reg_rigr2_rigr_index_set(payload, rigr_index);
+ mlxsw_reg_rigr2_vnext_set(payload, vnext);
+ mlxsw_reg_rigr2_next_rigr_index_set(payload, next_rigr_index);
+ mlxsw_reg_rigr2_vrmid_set(payload, 0);
+ mlxsw_reg_rigr2_rmid_index_set(payload, 0);
+}
+
+static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index,
+ bool v, u16 erif)
+{
+ mlxsw_reg_rigr2_erif_entry_v_set(payload, index, v);
+ mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif);
+}
+
+/* RECR-V2 - Router ECMP Configuration Version 2 Register
+ * ------------------------------------------------------
+ */
+#define MLXSW_REG_RECR2_ID 0x8025
+#define MLXSW_REG_RECR2_LEN 0x38
+
+MLXSW_REG_DEFINE(recr2, MLXSW_REG_RECR2_ID, MLXSW_REG_RECR2_LEN);
+
+/* reg_recr2_pp
+ * Per-port configuration
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, recr2, pp, 0x00, 24, 1);
+
+/* reg_recr2_sh
+ * Symmetric hash
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, recr2, sh, 0x00, 8, 1);
+
+/* reg_recr2_seed
+ * Seed
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, recr2, seed, 0x08, 0, 32);
+
+enum {
+ /* Enable IPv4 fields if packet is not TCP and not UDP */
+ MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP = 3,
+ /* Enable IPv4 fields if packet is TCP or UDP */
+ MLXSW_REG_RECR2_IPV4_EN_TCP_UDP = 4,
+ /* Enable IPv6 fields if packet is not TCP and not UDP */
+ MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP = 5,
+ /* Enable IPv6 fields if packet is TCP or UDP */
+ MLXSW_REG_RECR2_IPV6_EN_TCP_UDP = 6,
+ /* Enable TCP/UDP header fields if packet is IPv4 */
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV4 = 7,
+ /* Enable TCP/UDP header fields if packet is IPv6 */
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6 = 8,
+};
+
+/* reg_recr2_outer_header_enables
+ * Bit mask where each bit enables a specific layer to be included in
+ * the hash calculation.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_enables, 0x10, 0x04, 1);
+
+enum {
+ /* IPv4 Source IP */
+ MLXSW_REG_RECR2_IPV4_SIP0 = 9,
+ MLXSW_REG_RECR2_IPV4_SIP3 = 12,
+ /* IPv4 Destination IP */
+ MLXSW_REG_RECR2_IPV4_DIP0 = 13,
+ MLXSW_REG_RECR2_IPV4_DIP3 = 16,
+ /* IP Protocol */
+ MLXSW_REG_RECR2_IPV4_PROTOCOL = 17,
+ /* IPv6 Source IP */
+ MLXSW_REG_RECR2_IPV6_SIP0_7 = 21,
+ MLXSW_REG_RECR2_IPV6_SIP8 = 29,
+ MLXSW_REG_RECR2_IPV6_SIP15 = 36,
+ /* IPv6 Destination IP */
+ MLXSW_REG_RECR2_IPV6_DIP0_7 = 37,
+ MLXSW_REG_RECR2_IPV6_DIP8 = 45,
+ MLXSW_REG_RECR2_IPV6_DIP15 = 52,
+ /* IPv6 Next Header */
+ MLXSW_REG_RECR2_IPV6_NEXT_HEADER = 53,
+ /* IPv6 Flow Label */
+ MLXSW_REG_RECR2_IPV6_FLOW_LABEL = 57,
+ /* TCP/UDP Source Port */
+ MLXSW_REG_RECR2_TCP_UDP_SPORT = 74,
+ /* TCP/UDP Destination Port */
+ MLXSW_REG_RECR2_TCP_UDP_DPORT = 75,
+};
+
+/* reg_recr2_outer_header_fields_enable
+ * Packet fields to enable for ECMP hash subject to outer_header_enable.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1);
+
+static inline void mlxsw_reg_recr2_ipv4_sip_enable(char *payload)
+{
+ int i;
+
+ for (i = MLXSW_REG_RECR2_IPV4_SIP0; i <= MLXSW_REG_RECR2_IPV4_SIP3; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_ipv4_dip_enable(char *payload)
+{
+ int i;
+
+ for (i = MLXSW_REG_RECR2_IPV4_DIP0; i <= MLXSW_REG_RECR2_IPV4_DIP3; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_ipv6_sip_enable(char *payload)
+{
+ int i = MLXSW_REG_RECR2_IPV6_SIP0_7;
+
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
+
+ i = MLXSW_REG_RECR2_IPV6_SIP8;
+ for (; i <= MLXSW_REG_RECR2_IPV6_SIP15; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_ipv6_dip_enable(char *payload)
+{
+ int i = MLXSW_REG_RECR2_IPV6_DIP0_7;
+
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
+
+ i = MLXSW_REG_RECR2_IPV6_DIP8;
+ for (; i <= MLXSW_REG_RECR2_IPV6_DIP15; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed)
+{
+ MLXSW_REG_ZERO(recr2, payload);
+ mlxsw_reg_recr2_pp_set(payload, false);
+ mlxsw_reg_recr2_sh_set(payload, true);
+ mlxsw_reg_recr2_seed_set(payload, seed);
+}
+
+/* RMFT-V2 - Router Multicast Forwarding Table Version 2 Register
+ * --------------------------------------------------------------
+ * The RMFT_V2 register is used to configure and query the multicast table.
+ */
+#define MLXSW_REG_RMFT2_ID 0x8027
+#define MLXSW_REG_RMFT2_LEN 0x174
+
+MLXSW_REG_DEFINE(rmft2, MLXSW_REG_RMFT2_ID, MLXSW_REG_RMFT2_LEN);
+
+/* reg_rmft2_v
+ * Valid
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, v, 0x00, 31, 1);
+
+enum mlxsw_reg_rmft2_type {
+ MLXSW_REG_RMFT2_TYPE_IPV4,
+ MLXSW_REG_RMFT2_TYPE_IPV6
+};
+
+/* reg_rmft2_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rmft2, type, 0x00, 28, 2);
+
+enum mlxsw_sp_reg_rmft2_op {
+ /* For Write:
+ * Write operation. Used to write a new entry to the table. All RW
+ * fields are relevant for new entry. Activity bit is set for new
+ * entries - Note write with v (Valid) 0 will delete the entry.
+ * For Query:
+ * Read operation
+ */
+ MLXSW_REG_RMFT2_OP_READ_WRITE,
+};
+
+/* reg_rmft2_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rmft2, op, 0x00, 20, 2);
+
+/* reg_rmft2_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the specific
+ * entry.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rmft2, a, 0x00, 16, 1);
+
+/* reg_rmft2_offset
+ * Offset within the multicast forwarding table to write to.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rmft2, offset, 0x00, 0, 16);
+
+/* reg_rmft2_virtual_router
+ * Virtual Router ID. Range from 0..cap_max_virtual_routers-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, virtual_router, 0x04, 0, 16);
+
+enum mlxsw_reg_rmft2_irif_mask {
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE,
+ MLXSW_REG_RMFT2_IRIF_MASK_COMPARE
+};
+
+/* reg_rmft2_irif_mask
+ * Ingress RIF mask.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1);
+
+/* reg_rmft2_irif
+ * Ingress RIF index.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16);
+
+/* reg_rmft2_dip4
+ * Destination IPv4 address
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32);
+
+/* reg_rmft2_dip4_mask
+ * A bit that is set directs the TCAM to compare the corresponding bit in key. A
+ * bit that is clear directs the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32);
+
+/* reg_rmft2_sip4
+ * Source IPv4 address
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32);
+
+/* reg_rmft2_sip4_mask
+ * A bit that is set directs the TCAM to compare the corresponding bit in key. A
+ * bit that is clear directs the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32);
+
+/* reg_rmft2_flexible_action_set
+ * ACL action set. The only supported action types in this field and in any
+ * action-set pointed from here are as follows:
+ * 00h: ACTION_NULL
+ * 01h: ACTION_MAC_TTL, only TTL configuration is supported.
+ * 03h: ACTION_TRAP
+ * 06h: ACTION_QOS
+ * 08h: ACTION_POLICING_MONITORING
+ * 10h: ACTION_ROUTER_MC
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80,
+ MLXSW_REG_FLEX_ACTION_SET_LEN);
+
+static inline void
+mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
+ const char *flexible_action_set)
+{
+ MLXSW_REG_ZERO(rmft2, payload);
+ mlxsw_reg_rmft2_v_set(payload, v);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
+ mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE);
+ mlxsw_reg_rmft2_offset_set(payload, offset);
+ mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask);
+ mlxsw_reg_rmft2_irif_set(payload, irif);
+ mlxsw_reg_rmft2_dip4_set(payload, dip4);
+ mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask);
+ mlxsw_reg_rmft2_sip4_set(payload, sip4);
+ mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask);
+ if (flexible_action_set)
+ mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
+ flexible_action_set);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -6885,6 +7609,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
+ MLXSW_REG(cwtp),
+ MLXSW_REG(cwtpm),
MLXSW_REG(ppbt),
MLXSW_REG(pacl),
MLXSW_REG(pagt),
@@ -6911,9 +7637,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
MLXSW_REG(ritr),
+ MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
MLXSW_REG(ricnt),
+ MLXSW_REG(rrcr),
MLXSW_REG(ralta),
MLXSW_REG(ralst),
MLXSW_REG(raltb),
@@ -6921,6 +7649,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rauht),
MLXSW_REG(raleu),
MLXSW_REG(rauhtd),
+ MLXSW_REG(rigr2),
+ MLXSW_REG(recr2),
+ MLXSW_REG(rmft2),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 9556d934714b..087aad52c195 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -63,6 +63,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
+ MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
MLXSW_RES_ID_MAX_LPM_TREES,
/* Internal resources.
@@ -100,6 +101,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+ [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 696b99e65a5a..2d46ec84ebdf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -53,6 +53,7 @@
#include <linux/notifier.h>
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
+#include <linux/netlink.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -69,11 +70,12 @@
#include "txheader.h"
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
+#include "spectrum_acl_flex_actions.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1420
-#define MLXSW_FWREV_SUBMINOR 122
+#define MLXSW_FWREV_MINOR 1530
+#define MLXSW_FWREV_SUBMINOR 152
static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
.major = MLXSW_FWREV_MAJOR,
@@ -1322,20 +1324,54 @@ out:
return err;
}
+static void
+mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
+ struct mlxsw_sp_port_xstats *xstats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err, i;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+ ppcnt_pl);
+ if (!err)
+ xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
+
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev,
+ MLXSW_REG_PPCNT_TC_CONG_TC,
+ i, ppcnt_pl);
+ if (!err)
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->backlog[i] =
+ mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
+ xstats->tail_drop[i] =
+ mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
+ }
+}
+
static void update_stats_cache(struct work_struct *work)
{
struct mlxsw_sp_port *mlxsw_sp_port =
container_of(work, struct mlxsw_sp_port,
- hw_stats.update_dw.work);
+ periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev))
goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
- mlxsw_sp_port->hw_stats.cache);
+ &mlxsw_sp_port->periodic_hw_stats.stats);
+ mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
+ &mlxsw_sp_port->periodic_hw_stats.xstats);
out:
- mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
MLXSW_HW_STATS_UPDATE_TIME);
}
@@ -1348,7 +1384,7 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+ memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
}
static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1695,17 +1731,9 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f)
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
{
- bool ingress;
-
- if (is_classid_clsact_ingress(f->common.classid))
- ingress = true;
- else if (is_classid_clsact_egress(f->common.classid))
- ingress = false;
- else
- return -EOPNOTSUPP;
-
if (f->common.chain_index)
return -EOPNOTSUPP;
@@ -1723,17 +1751,9 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ bool ingress)
{
- bool ingress;
-
- if (is_classid_clsact_ingress(f->common.classid))
- ingress = true;
- else if (is_classid_clsact_egress(f->common.classid))
- ingress = false;
- else
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
@@ -1747,16 +1767,72 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
+
+ if (!tc_can_offload(mlxsw_sp_port->dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
+ ingress);
+ case TC_SETUP_CLSFLOWER:
+ return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
+ ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_block_offload *f)
+{
+ tc_setup_cb_t *cb;
+
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ cb = mlxsw_sp_setup_tc_block_cb_ig;
+ else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ cb = mlxsw_sp_setup_tc_block_cb_eg;
+ else
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
switch (type) {
- case TC_SETUP_CLSMATCHALL:
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
- case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
+ case TC_SETUP_BLOCK:
+ return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_RED:
+ return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -2868,14 +2944,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_sample;
}
- mlxsw_sp_port->hw_stats.cache =
- kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
-
- if (!mlxsw_sp_port->hw_stats.cache) {
- err = -ENOMEM;
- goto err_alloc_hw_stats;
- }
- INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
&update_stats_cache);
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
@@ -2974,6 +3043,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
mlxsw_sp_port->local_port);
+ err = PTR_ERR(mlxsw_sp_port_vlan);
goto err_port_vlan_get;
}
@@ -2989,7 +3059,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
mlxsw_sp_port, dev, mlxsw_sp_port->split,
module);
- mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
err_register_netdev:
@@ -3012,8 +3082,6 @@ err_dev_addr_init:
err_port_swid_set:
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
err_port_module_map:
- kfree(mlxsw_sp_port->hw_stats.cache);
-err_alloc_hw_stats:
kfree(mlxsw_sp_port->sample);
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -3028,7 +3096,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -3038,7 +3106,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
- kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
@@ -3311,6 +3378,14 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
+static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb,
+ u8 local_port, void *priv)
+{
+ skb->offload_mr_fwd_mark = 1;
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
+}
+
static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
@@ -3354,6 +3429,10 @@ out:
MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
+#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
#define MLXSW_SP_EVENTL(_func, _trap_id) \
MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
@@ -3420,6 +3499,11 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
false, SP_IP2ME, DISCARD),
/* ACL trap */
MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
+ MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3445,6 +3529,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
rate = 128;
burst_size = 7;
break;
@@ -3460,6 +3546,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
rate = 1024;
burst_size = 7;
break;
@@ -3505,6 +3592,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
priority = 5;
tc = 5;
break;
@@ -3521,12 +3609,14 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
priority = 2;
tc = 2;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
priority = 1;
tc = 1;
break;
@@ -3642,6 +3732,9 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3663,10 +3756,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_kvdl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
+ return err;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
- return err;
+ goto err_fids_init;
}
err = mlxsw_sp_traps_init(mlxsw_sp);
@@ -3693,12 +3792,34 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_switchdev_init;
}
+ err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+ goto err_counter_pool_init;
+ }
+
+ err = mlxsw_sp_afa_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
+ goto err_afa_init;
+ }
+
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
}
+ /* Initialize netdevice notifier after router is initialized, so that
+ * the event handler can use router structures.
+ */
+ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
+ err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
+ goto err_netdev_notifier;
+ }
+
err = mlxsw_sp_span_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
@@ -3711,12 +3832,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
- err = mlxsw_sp_counter_pool_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
- goto err_counter_pool_init;
- }
-
err = mlxsw_sp_dpipe_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
@@ -3734,14 +3849,18 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_ports_create:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- mlxsw_sp_counter_pool_fini(mlxsw_sp);
-err_counter_pool_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
err_span_init:
+ unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_afa_fini(mlxsw_sp);
+err_afa_init:
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
mlxsw_sp_lag_fini(mlxsw_sp);
@@ -3751,6 +3870,8 @@ err_buffers_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_traps_init:
mlxsw_sp_fids_fini(mlxsw_sp);
+err_fids_init:
+ mlxsw_sp_kvdl_fini(mlxsw_sp);
return err;
}
@@ -3760,15 +3881,18 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
+ unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_afa_fini(mlxsw_sp);
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_kvdl_fini(mlxsw_sp);
}
static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -3791,8 +3915,8 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_pkey = 0,
.used_kvd_split_data = 1,
.kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
- .kvd_hash_single_parts = 2,
- .kvd_hash_double_parts = 1,
+ .kvd_hash_single_parts = 59,
+ .kvd_hash_double_parts = 41,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
.swid_config = {
{
@@ -3986,14 +4110,21 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
- struct netdev_lag_upper_info *lag_upper_info)
+ struct netdev_lag_upper_info *lag_upper_info,
+ struct netlink_ext_ack *extack)
{
u16 lag_id;
- if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
+ if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Exceeded number of supported LAG devices");
return false;
- if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ }
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: LAG device using unsupported Tx type");
return false;
+ }
return true;
}
@@ -4198,6 +4329,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
{
struct netdev_notifier_changeupper_info *info;
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct netlink_ext_ack *extack;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
@@ -4205,6 +4337,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
info = ptr;
+ extack = netdev_notifier_info_to_extack(&info->info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
@@ -4212,25 +4345,43 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_ovs_master(upper_dev))
+ !netif_is_ovs_master(upper_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Unknown upper device type");
return -EINVAL;
+ }
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev))
+ if (netdev_has_any_upper_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
+ }
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
- info->upper_info))
+ info->upper_info, extack))
return -EINVAL;
- if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
+ if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Master device is a LAG master and this device has a VLAN");
return -EINVAL;
+ }
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
- !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
+ !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Can not put a VLAN on a LAG port");
return -EINVAL;
- if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
+ }
+ if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Master device is an OVS master and this device has a VLAN");
return -EINVAL;
- if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
+ }
+ if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Can not put a VLAN on an OVS port");
return -EINVAL;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4238,7 +4389,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
lower_dev,
- upper_dev);
+ upper_dev,
+ extack);
else
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
lower_dev,
@@ -4329,18 +4481,25 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
struct net_device *upper_dev;
int err = 0;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev))
+ if (!netif_is_bridge_master(upper_dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
return -EINVAL;
+ }
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev))
+ if (netdev_has_any_upper_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4348,7 +4507,8 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
vlan_dev,
- upper_dev);
+ upper_dev,
+ extack);
else
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
@@ -4411,13 +4571,21 @@ static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
return netif_is_l3_master(info->upper_dev);
}
-static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp *mlxsw_sp;
int err = 0;
- if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
+ mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
err = mlxsw_sp_netdevice_router_port_event(dev);
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
@@ -4431,21 +4599,20 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
return notifier_from_errno(err);
}
-static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
- .notifier_call = mlxsw_sp_netdevice_event,
+static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inetaddr_valid_event,
};
static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inetaddr_event,
- .priority = 10, /* Must be called before FIB notifier block */
};
-static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inet6addr_event,
+static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inet6addr_valid_event,
};
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
- .notifier_call = mlxsw_sp_router_netevent_event,
+static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inet6addr_event,
};
static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
@@ -4462,10 +4629,10 @@ static int __init mlxsw_sp_module_init(void)
{
int err;
- register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+ register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
- register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
@@ -4480,10 +4647,10 @@ static int __init mlxsw_sp_module_init(void)
err_pci_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
err_core_driver_register:
- unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
+ unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
- unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
return err;
}
@@ -4491,10 +4658,10 @@ static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
- unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
+ unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
- unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
}
module_init(mlxsw_sp_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 84ce83acdc19..58cf222fb985 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -48,6 +48,7 @@
#include <linux/notifier.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
+#include <net/red.h>
#include "port.h"
#include "core.h"
@@ -62,7 +63,7 @@
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
-#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
+#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
struct mlxsw_sp_port;
@@ -94,7 +95,8 @@ struct mlxsw_sp_mid {
unsigned char addr[ETH_ALEN];
u16 fid;
u16 mid;
- unsigned int ref_count;
+ bool in_hw;
+ unsigned long *ports_in_mid; /* bits array */
};
enum mlxsw_sp_span_type {
@@ -138,9 +140,11 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
+struct mlxsw_sp_mr;
struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
+struct mlxsw_sp_kvdl;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -152,11 +156,12 @@ struct mlxsw_sp {
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
+ struct mlxsw_sp_mr *mr;
+ struct mlxsw_afa *afa;
struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
- struct {
- DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
- } kvdl;
+ struct mlxsw_sp_kvdl *kvdl;
+ struct notifier_block netdevice_nb;
struct mlxsw_sp_counter_pool *counter_pool;
struct {
@@ -199,6 +204,37 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ enum mlxsw_sp_qdisc_type type;
+ struct red_stats xstats_base;
+ union {
+ struct {
+ u64 tail_drop_base;
+ u64 ecn_base;
+ u64 wred_drop_base;
+ } red;
+ } xstats;
+
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+};
+
+/* No need an internal lock; At worse - miss a single periodic iteration */
+struct mlxsw_sp_port_xstats {
+ u64 ecn;
+ u64 wred_drop[TC_MAX_QUEUE];
+ u64 tail_drop[TC_MAX_QUEUE];
+ u64 backlog[TC_MAX_QUEUE];
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
@@ -227,11 +263,13 @@ struct mlxsw_sp_port {
struct list_head mall_tc_list;
struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ
- struct rtnl_link_stats64 *cache;
+ struct rtnl_link_stats64 stats;
+ struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
- } hw_stats;
+ } periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
+ struct mlxsw_sp_qdisc root_qdisc;
};
static inline bool
@@ -322,7 +360,8 @@ void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
- struct net_device *br_dev);
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
@@ -380,23 +419,43 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
/* spectrum_router.c */
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
+int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
+int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
+bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info);
+int
+mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
+int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -466,9 +525,9 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
enum mlxsw_afk_element element,
const char *key_value,
const char *mask_value, unsigned int len);
-void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
-void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
- u16 group_id);
+int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -521,6 +580,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
+/* spectrum_qdisc.c */
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p);
+
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 4b2455e3e079..93dcd315f7d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -52,7 +52,6 @@
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
- struct mlxsw_afa *afa;
struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
@@ -333,7 +332,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
return NULL;
- rulei->act_block = mlxsw_afa_block_create(acl->afa);
+ rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
if (IS_ERR(rulei->act_block)) {
err = PTR_ERR(rulei->act_block);
goto err_afa_block_create;
@@ -379,15 +378,15 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
key_value, mask_value, len);
}
-void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
+int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
{
- mlxsw_afa_block_continue(rulei->act_block);
+ return mlxsw_afa_block_continue(rulei->act_block);
}
-void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
- u16 group_id)
+int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id)
{
- mlxsw_afa_block_jump(rulei->act_block, group_id);
+ return mlxsw_afa_block_jump(rulei->act_block, group_id);
}
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
@@ -397,7 +396,8 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
{
- return mlxsw_afa_block_append_trap(rulei->act_block);
+ return mlxsw_afa_block_append_trap(rulei->act_block,
+ MLXSW_TRAP_ID_ACL0);
}
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -653,85 +653,6 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
-
-static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
- char *enc_actions, bool is_first)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
- char pefa_pl[MLXSW_REG_PEFA_LEN];
- u32 kvdl_index;
- int err;
-
- /* The first action set of a TCAM entry is stored directly in TCAM,
- * not KVD linear area.
- */
- if (is_first)
- return 0;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
- &kvdl_index);
- if (err)
- return err;
- mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
- if (err)
- goto err_pefa_write;
- *p_kvdl_index = kvdl_index;
- return 0;
-
-err_pefa_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
- return err;
-}
-
-static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
- bool is_first)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
-
- if (is_first)
- return;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
-}
-
-static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
- u8 local_port)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
- char ppbs_pl[MLXSW_REG_PPBS_LEN];
- u32 kvdl_index;
- int err;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
- if (err)
- return err;
- mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
- if (err)
- goto err_ppbs_write;
- *p_kvdl_index = kvdl_index;
- return 0;
-
-err_ppbs_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
- return err;
-}
-
-static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
-
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
-}
-
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
- .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
- .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
- .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
- .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
-};
-
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
@@ -753,14 +674,6 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
goto err_afk_create;
}
- acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_ACTIONS_PER_SET),
- &mlxsw_sp_act_afa_ops, mlxsw_sp);
- if (IS_ERR(acl->afa)) {
- err = PTR_ERR(acl->afa);
- goto err_afa_create;
- }
-
err = rhashtable_init(&acl->ruleset_ht,
&mlxsw_sp_acl_ruleset_ht_params);
if (err)
@@ -792,8 +705,6 @@ err_acl_ops_init:
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
err_rhashtable_init:
- mlxsw_afa_destroy(acl->afa);
-err_afa_create:
mlxsw_afk_destroy(acl->afk);
err_afk_create:
kfree(acl);
@@ -810,7 +721,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
- mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
kfree(acl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
new file mode 100644
index 000000000000..4d3340ed0291
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -0,0 +1,129 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spectrum_acl_flex_actions.h"
+#include "core_acl_flex_actions.h"
+
+#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
+
+static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ u32 kvdl_index;
+ int err;
+
+ /* The first action set of a TCAM entry is stored directly in TCAM,
+ * not KVD linear area.
+ */
+ if (is_first)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
+ &kvdl_index);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_pefa_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
+ bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ if (is_first)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
+ u8 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char ppbs_pl[MLXSW_REG_PPBS_LEN];
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ if (err)
+ return err;
+ mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
+ if (err)
+ goto err_ppbs_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_ppbs_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+};
+
+int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_ACTIONS_PER_SET),
+ &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
+}
+
+void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_afa_destroy(mlxsw_sp->afa);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
new file mode 100644
index 000000000000..2726192836ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -0,0 +1,44 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+
+#include "spectrum.h"
+
+int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50b40de1fb91..7e8284b46968 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -608,7 +608,10 @@ mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
goto err_rulei_create;
}
- mlxsw_sp_acl_rulei_act_continue(rulei);
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+
err = mlxsw_sp_acl_rulei_commit(rulei);
if (err)
goto err_rulei_commit;
@@ -623,6 +626,7 @@ mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
err_rule_insert:
err_rulei_commit:
+err_rulei_act_continue:
mlxsw_sp_acl_rulei_destroy(rulei);
err_rulei_create:
parman_item_remove(region->parman, parman_prio, parman_item);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 51e6846da72b..96fdba78acab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -43,21 +43,42 @@ enum mlxsw_sp_field_metadata_id {
MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX,
};
static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = {
- { .name = "erif_port",
- .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
- .bitwidth = 32,
- .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+ {
+ .name = "erif_port",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ .bitwidth = 32,
+ .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
},
- { .name = "l3_forward",
- .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
- .bitwidth = 1,
+ {
+ .name = "l3_forward",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ .bitwidth = 1,
},
- { .name = "l3_drop",
- .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
- .bitwidth = 1,
+ {
+ .name = "l3_drop",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ .bitwidth = 1,
+ },
+ {
+ .name = "adj_index",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX,
+ .bitwidth = 32,
+ },
+ {
+ .name = "adj_size",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE,
+ .bitwidth = 32,
+ },
+ {
+ .name = "adj_hash_index",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX,
+ .bitwidth = 32,
},
};
@@ -826,6 +847,390 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
}
+static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_match match = {0};
+ int err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX;
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static int mlxsw_sp_dpipe_table_adj_actions_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+ int err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &devlink_dpipe_header_ethernet;
+ action.field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ err = devlink_dpipe_action_put(skb, &action);
+ if (err)
+ return err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ u64 size = 0;
+
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
+ if (mlxsw_sp_nexthop_offload(nh) &&
+ !mlxsw_sp_nexthop_group_has_ipip(nh))
+ size++;
+ return size;
+}
+
+enum mlxsw_sp_dpipe_table_adj_match {
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT,
+};
+
+enum mlxsw_sp_dpipe_table_adj_action {
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC,
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT,
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT,
+};
+
+static void
+mlxsw_sp_dpipe_table_adj_match_action_prepare(struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_action *actions)
+{
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &devlink_dpipe_header_ethernet;
+ action->field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &mlxsw_sp_dpipe_header_metadata;
+ action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_values,
+ struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_value *action_values,
+ struct devlink_dpipe_action *actions)
+{ struct devlink_dpipe_value *action_value;
+ struct devlink_dpipe_value *match_value;
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+
+ entry->match_values = match_values;
+ entry->match_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT;
+
+ entry->action_values = action_values;
+ entry->action_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u64);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u32);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+__mlxsw_sp_dpipe_table_adj_entry_fill(struct devlink_dpipe_entry *entry,
+ u32 adj_index, u32 adj_size,
+ u32 adj_hash_index, unsigned char *ha,
+ struct mlxsw_sp_rif *rif)
+{
+ struct devlink_dpipe_value *value;
+ u32 *p_rif_value;
+ u32 *p_index;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ p_index = value->value;
+ *p_index = adj_index;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ p_index = value->value;
+ *p_index = adj_size;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ p_index = value->value;
+ *p_index = adj_hash_index;
+
+ value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ ether_addr_copy(value->value, ha);
+
+ value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ p_rif_value = value->value;
+ *p_rif_value = mlxsw_sp_rif_index(rif);
+ value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ value->mapping_valid = true;
+}
+
+static void mlxsw_sp_dpipe_table_adj_entry_fill(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct devlink_dpipe_entry *entry)
+{
+ struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
+ unsigned char *ha = mlxsw_sp_nexthop_ha(nh);
+ u32 adj_hash_index = 0;
+ u32 adj_index = 0;
+ u32 adj_size = 0;
+ int err;
+
+ mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, &adj_hash_index);
+ __mlxsw_sp_dpipe_table_adj_entry_fill(entry, adj_index, adj_size,
+ adj_hash_index, ha, rif);
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &entry->counter);
+ if (!err)
+ entry->counter_valid = true;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct mlxsw_sp_nexthop *nh;
+ int entry_index = 0;
+ int nh_count_max;
+ int nh_count = 0;
+ int nh_skip;
+ int j;
+ int err;
+
+ rtnl_lock();
+ nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ goto err_ctx_prepare;
+ j = 0;
+ nh_skip = nh_count;
+ nh_count = 0;
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!mlxsw_sp_nexthop_offload(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
+ continue;
+
+ if (nh_count < nh_skip)
+ goto skip;
+
+ mlxsw_sp_dpipe_table_adj_entry_fill(mlxsw_sp, nh, entry);
+ entry->index = entry_index;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ break;
+ }
+ goto err_entry_append;
+ }
+ entry_index++;
+ j++;
+skip:
+ nh_count++;
+ }
+
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (nh_count != nh_count_max)
+ goto start_again;
+ rtnl_unlock();
+
+ return 0;
+
+err_ctx_prepare:
+err_entry_append:
+ rtnl_unlock();
+ return err;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink_dpipe_value action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT];
+ struct devlink_dpipe_value match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT];
+ struct devlink_dpipe_action actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT];
+ struct devlink_dpipe_match matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT];
+ struct devlink_dpipe_entry entry = {0};
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ memset(matches, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT *
+ sizeof(matches[0]));
+ memset(match_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT *
+ sizeof(match_values[0]));
+ memset(actions, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT *
+ sizeof(actions[0]));
+ memset(action_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT *
+ sizeof(action_values[0]));
+
+ mlxsw_sp_dpipe_table_adj_match_action_prepare(matches, actions);
+ err = mlxsw_sp_dpipe_table_adj_entry_prepare(&entry,
+ match_values, matches,
+ action_values, actions);
+ if (err)
+ goto out;
+
+ err = mlxsw_sp_dpipe_table_adj_entries_get(mlxsw_sp, &entry,
+ counters_enabled, dump_ctx);
+out:
+ devlink_dpipe_entry_clear(&entry);
+ return err;
+}
+
+static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_nexthop *nh;
+ u32 adj_hash_index = 0;
+ u32 adj_index = 0;
+ u32 adj_size = 0;
+
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!mlxsw_sp_nexthop_offload(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
+ continue;
+
+ mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
+ &adj_hash_index);
+ if (enable)
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ else
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_update(mlxsw_sp,
+ adj_index + adj_hash_index, nh);
+ }
+ return 0;
+}
+
+static u64
+mlxsw_sp_dpipe_table_adj_size_get(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u64 size;
+
+ rtnl_lock();
+ size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+ rtnl_unlock();
+
+ return size;
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
+ .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
+ .counters_set_update = mlxsw_sp_dpipe_table_adj_counters_update,
+ .size_get = mlxsw_sp_dpipe_table_adj_size_get,
+};
+
+static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ return devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+}
+
+static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+}
+
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -846,8 +1251,14 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
err = mlxsw_sp_dpipe_host6_table_init(mlxsw_sp);
if (err)
goto err_host6_table_init;
- return 0;
+ err = mlxsw_sp_dpipe_adj_table_init(mlxsw_sp);
+ if (err)
+ goto err_adj_table_init;
+
+ return 0;
+err_adj_table_init:
+ mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
err_host6_table_init:
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
err_host4_table_init:
@@ -861,6 +1272,7 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ mlxsw_sp_dpipe_adj_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index 283fde4e6783..815d543cf114 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -56,5 +56,6 @@ static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6"
+#define MLXSW_SP_DPIPE_TABLE_NAME_ADJ "mlxsw_adj"
#endif /* _MLXSW_PIPELINE_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 8aace9a06a5d..2f0e57857ea4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -63,7 +63,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- if (is_tcf_gact_shot(a)) {
+ if (is_tcf_gact_ok(a)) {
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (err)
+ return err;
+ } else if (is_tcf_gact_shot(a)) {
err = mlxsw_sp_acl_rulei_act_drop(rulei);
if (err)
return err;
@@ -84,7 +88,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(ruleset);
group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
- mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
+ err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
+ if (err)
+ return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 702fe945227c..7502e53447bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -36,36 +36,123 @@
#include "spectrum_ipip.h"
-static bool
-mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
+struct ip_tunnel_parm
+mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
- return !!(tun->parms.i_flags & TUNNEL_KEY);
+ return tun->parms;
}
-static bool
-mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms)
{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
+ return !!(parms.i_flags & TUNNEL_KEY);
+}
- return !!(tun->parms.o_flags & TUNNEL_KEY);
+static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms)
+{
+ return !!(parms.o_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
+static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms)
{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
+ return mlxsw_sp_ipip_parms_has_ikey(parms) ?
+ be32_to_cpu(parms.i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms)
+{
+ return mlxsw_sp_ipip_parms_has_okey(parms) ?
+ be32_to_cpu(parms.o_key) : 0;
+}
- return mlxsw_sp_ipip_netdev_has_ikey(ol_dev) ?
- be32_to_cpu(tun->parms.i_key) : 0;
+static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms)
+{
+ return parms.iph.saddr;
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto,
+ struct ip_tunnel_parm parms)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = mlxsw_sp_ipip_parms_saddr4(parms),
+ };
+ case MLXSW_SP_L3_PROTO_IPV6:
+ break;
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = 0,
+ };
+}
+
+static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms)
+{
+ return parms.iph.daddr;
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto,
+ struct ip_tunnel_parm parms)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = mlxsw_sp_ipip_parms_daddr4(parms),
+ };
+ case MLXSW_SP_L3_PROTO_IPV6:
+ break;
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = 0,
+ };
+}
+
+static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
}
static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev)
{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
+ return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_saddr(proto,
+ mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
- return mlxsw_sp_ipip_netdev_has_okey(ol_dev) ?
- be32_to_cpu(tun->parms.o_key) : 0;
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_daddr(proto,
+ mlxsw_sp_ipip_netdev_parms(ol_dev));
}
static int
@@ -200,6 +287,73 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
};
}
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ union mlxsw_sp_l3addr old_saddr, new_saddr;
+ union mlxsw_sp_l3addr old_daddr, new_daddr;
+ struct ip_tunnel_parm new_parms;
+ bool update_tunnel = false;
+ bool update_decap = false;
+ bool update_nhs = false;
+ int err = 0;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev);
+
+ new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ new_parms);
+ old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ ipip_entry->parms);
+ new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
+ new_parms);
+ old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
+ ipip_entry->parms);
+
+ if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
+
+ /* Since the local address has changed, if there is another
+ * tunnel with a matching saddr, both need to be demoted.
+ */
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
+ MLXSW_SP_L3_PROTO_IPV4,
+ new_saddr, ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ update_tunnel = true;
+ } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) !=
+ mlxsw_sp_ipip_parms_okey(new_parms)) ||
+ ipip_entry->parms.link != new_parms.link) {
+ update_tunnel = true;
+ } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ update_nhs = true;
+ } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) !=
+ mlxsw_sp_ipip_parms_ikey(new_parms)) {
+ update_decap = true;
+ }
+
+ if (update_tunnel)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, true,
+ extack);
+ else if (update_nhs)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true,
+ extack);
+ else if (update_decap)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, false,
+ extack);
+
+ ipip_entry->parms = new_parms;
+ return err;
+}
+
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
@@ -207,6 +361,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
+ .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
};
const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 1c2db831d83b..04b08d9d76e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -38,6 +38,13 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
+struct ip_tunnel_parm
+mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev);
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev);
+
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
MLXSW_SP_IPIP_TYPE_MAX,
@@ -47,9 +54,9 @@ struct mlxsw_sp_ipip_entry {
enum mlxsw_sp_ipip_type ipipt;
struct net_device *ol_dev; /* Overlay. */
struct mlxsw_sp_rif_ipip_lb *ol_lb;
- unsigned int ref_count; /* Number of next hops using the tunnel. */
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
+ struct ip_tunnel_parm parms;
};
struct mlxsw_sp_ipip_ops {
@@ -72,6 +79,10 @@ struct mlxsw_sp_ipip_ops {
struct mlxsw_sp_ipip_entry *ipip_entry,
enum mlxsw_reg_ralue_op op,
u32 tunnel_index);
+
+ int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack);
};
extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 26c26cd30c3d..310c38247b5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -39,55 +39,276 @@
#define MLXSW_SP_KVDL_SINGLE_BASE 0
#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP_KVDL_SINGLE_END \
+ (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
+
#define MLXSW_SP_KVDL_CHUNKS_BASE \
(MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE \
- (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
+#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP_KVDL_CHUNKS_END \
+ (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
+
#define MLXSW_SP_CHUNK_MAX 32
+#define MLXSW_SP_LARGE_CHUNK_MAX 512
+
+struct mlxsw_sp_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+};
+
+struct mlxsw_sp_kvdl_part {
+ struct list_head list;
+ const struct mlxsw_sp_kvdl_part_info *info;
+ unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp_kvdl {
+ struct list_head parts_list;
+};
+
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
+
+ list_for_each_entry(part, &kvdl->parts_list, list) {
+ if (alloc_size <= part->info->alloc_size &&
+ (!min_part ||
+ part->info->alloc_size <= min_part->info->alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ list_for_each_entry(part, &kvdl->parts_list, list) {
+ if (kvdl_index >= part->info->start_index &&
+ kvdl_index <= part->info->end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp_kvdl_part_info *info = part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info,
+ entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
+ u32 kvdl_index)
+{
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp_kvdl_index_entry_index(part->info,
+ kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index)
{
- int entry_index;
- int size;
- int type_base;
- int type_size;
- int type_entries;
-
- if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) {
- return -EINVAL;
- } else if (entry_count == 1) {
- type_base = MLXSW_SP_KVDL_SINGLE_BASE;
- type_size = MLXSW_SP_KVDL_SINGLE_SIZE;
- type_entries = 1;
- } else {
- type_base = MLXSW_SP_KVDL_CHUNKS_BASE;
- type_size = MLXSW_SP_KVDL_CHUNKS_SIZE;
- type_entries = MLXSW_SP_CHUNK_MAX;
+ struct mlxsw_sp_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
+}
+
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp_kvdl_part_free(part, entry_index);
+}
+
+int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info->alloc_size;
+
+ return 0;
+}
+
+static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = {
+ {
+ .part_index = 0,
+ .start_index = MLXSW_SP_KVDL_SINGLE_BASE,
+ .end_index = MLXSW_SP_KVDL_SINGLE_END,
+ .alloc_size = 1,
+ },
+ {
+ .part_index = 1,
+ .start_index = MLXSW_SP_KVDL_CHUNKS_BASE,
+ .end_index = MLXSW_SP_KVDL_CHUNKS_END,
+ .alloc_size = MLXSW_SP_CHUNK_MAX,
+ },
+ {
+ .part_index = 2,
+ .start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE,
+ .end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END,
+ .alloc_size = MLXSW_SP_LARGE_CHUNK_MAX,
+ },
+};
+
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) {
+ if (part->info->part_index == part_index)
+ return part;
}
- entry_index = type_base;
- size = type_base + type_size;
- for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) {
- int i;
+ return NULL;
+}
+
+static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ unsigned int part_index)
+{
+ const struct mlxsw_sp_kvdl_part_info *info;
+ struct mlxsw_sp_kvdl_part *part;
+ unsigned int nr_entries;
+ size_t usage_size;
+
+ info = &kvdl_parts_info[part_index];
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return -ENOMEM;
+
+ part->info = info;
+ list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+
+ return 0;
+}
+
+static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
+ unsigned int part_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index);
+ if (!part)
+ return;
+
+ list_del(&part->list);
+ kfree(part);
+}
+
+static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err, i;
+
+ INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list);
- for (i = 0; i < type_entries; i++)
- set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
- *p_entry_index = entry_index;
- return 0;
+ for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) {
+ err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i);
+ if (err)
+ goto err_kvdl_part_init;
}
- return -ENOBUFS;
+
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
+ return err;
}
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
{
- int type_entries;
int i;
- if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE)
- type_entries = 1;
- else
- type_entries = MLXSW_SP_CHUNK_MAX;
- for (i = 0; i < type_entries; i++)
- clear_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+ for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--)
+ mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
+}
+
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl *kvdl;
+ int err;
+
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ mlxsw_sp->kvdl = kvdl;
+
+ err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
+ if (err)
+ goto err_kvdl_parts_init;
+
+ return 0;
+
+err_kvdl_parts_init:
+ kfree(mlxsw_sp->kvdl);
+ return err;
+}
+
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
+ kfree(mlxsw_sp->kvdl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
new file mode 100644
index 000000000000..d20b143de3b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -0,0 +1,1012 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/rhashtable.h>
+
+#include "spectrum_mr.h"
+#include "spectrum_router.h"
+
+struct mlxsw_sp_mr {
+ const struct mlxsw_sp_mr_ops *mr_ops;
+ void *catchall_route_priv;
+ struct delayed_work stats_update_dw;
+ struct list_head table_list;
+#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_mr_vif {
+ struct net_device *dev;
+ const struct mlxsw_sp_rif *rif;
+ unsigned long vif_flags;
+
+ /* A list of route_vif_entry structs that point to routes that the VIF
+ * instance is used as one of the egress VIFs
+ */
+ struct list_head route_evif_list;
+
+ /* A list of route_vif_entry structs that point to routes that the VIF
+ * instance is used as an ingress VIF
+ */
+ struct list_head route_ivif_list;
+};
+
+struct mlxsw_sp_mr_route_vif_entry {
+ struct list_head vif_node;
+ struct list_head route_node;
+ struct mlxsw_sp_mr_vif *mr_vif;
+ struct mlxsw_sp_mr_route *mr_route;
+};
+
+struct mlxsw_sp_mr_table {
+ struct list_head node;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp *mlxsw_sp;
+ u32 vr_id;
+ struct mlxsw_sp_mr_vif vifs[MAXVIFS];
+ struct list_head route_list;
+ struct rhashtable route_ht;
+ char catchall_route_priv[0];
+ /* catchall_route_priv has to be always the last item */
+};
+
+struct mlxsw_sp_mr_route {
+ struct list_head node;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mr_route_key key;
+ enum mlxsw_sp_mr_route_action route_action;
+ u16 min_mtu;
+ struct mfc_cache *mfc4;
+ void *route_priv;
+ const struct mlxsw_sp_mr_table *mr_table;
+ /* A list of route_vif_entry structs that point to the egress VIFs */
+ struct list_head evif_list;
+ /* A route_vif_entry struct that point to the ingress VIF */
+ struct mlxsw_sp_mr_route_vif_entry ivif;
+};
+
+static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_mr_route_key),
+ .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
+ .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
+ .automatic_shrinking = true,
+};
+
+static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
+}
+
+static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
+{
+ return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
+}
+
+static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
+{
+ return vif->dev;
+}
+
+static bool
+mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
+{
+ vifi_t ivif;
+
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ ivif = mr_route->mfc4->mfc_parent;
+ return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return false;
+}
+
+static int
+mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ int valid_evifs;
+
+ valid_evifs = 0;
+ list_for_each_entry(rve, &mr_route->evif_list, route_node)
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ valid_evifs++;
+ return valid_evifs;
+}
+
+static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
+{
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return false;
+}
+
+static enum mlxsw_sp_mr_route_action
+mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ /* If the ingress port is not regular and resolved, trap the route */
+ if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* The kernel does not match a (*,G) route that the ingress interface is
+ * not one of the egress interfaces, so trap these kind of routes.
+ */
+ if (mlxsw_sp_mr_route_starg(mr_route) &&
+ !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* If the route has no valid eVIFs, trap it. */
+ if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* If one of the eVIFs has no RIF, trap-and-forward the route as there
+ * is some more routing to do in software too.
+ */
+ list_for_each_entry(rve, &mr_route->evif_list, route_node)
+ if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
+
+ return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
+}
+
+static enum mlxsw_sp_mr_route_prio
+mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
+{
+ return mlxsw_sp_mr_route_starg(mr_route) ?
+ MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
+}
+
+static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ const struct mfc_cache *mfc)
+{
+ bool starg = (mfc->mfc_origin == htonl(INADDR_ANY));
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = mr_table->proto;
+ key->group.addr4 = mfc->mfc_mcastgrp;
+ key->group_mask.addr4 = htonl(0xffffffff);
+ key->source.addr4 = mfc->mfc_origin;
+ key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
+}
+
+static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ rve = kzalloc(sizeof(*rve), GFP_KERNEL);
+ if (!rve)
+ return -ENOMEM;
+ rve->mr_route = mr_route;
+ rve->mr_vif = mr_vif;
+ list_add_tail(&rve->route_node, &mr_route->evif_list);
+ list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ list_del(&rve->route_node);
+ list_del(&rve->vif_node);
+ kfree(rve);
+}
+
+static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ mr_route->ivif.mr_route = mr_route;
+ mr_route->ivif.mr_vif = mr_vif;
+ list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
+}
+
+static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
+{
+ list_del(&mr_route->ivif.vif_node);
+}
+
+static int
+mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ u16 *erif_indices;
+ u16 irif_index;
+ u16 erif = 0;
+
+ erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
+ GFP_KERNEL);
+ if (!erif_indices)
+ return -ENOMEM;
+
+ list_for_each_entry(rve, &mr_route->evif_list, route_node) {
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
+
+ erif_indices[erif++] = rifi;
+ }
+ }
+
+ if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
+ irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
+ else
+ irif_index = 0;
+
+ route_info->irif_index = irif_index;
+ route_info->erif_indices = erif_indices;
+ route_info->min_mtu = mr_route->min_mtu;
+ route_info->route_action = mr_route->route_action;
+ route_info->erif_num = erif;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
+{
+ kfree(route_info->erif_indices);
+}
+
+static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route,
+ bool replace)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr_route_info route_info;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ int err;
+
+ err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
+ if (err)
+ return err;
+
+ if (!replace) {
+ struct mlxsw_sp_mr_route_params route_params;
+
+ mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
+ GFP_KERNEL);
+ if (!mr_route->route_priv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ route_params.key = mr_route->key;
+ route_params.value = route_info;
+ route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
+ err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
+ mr_route->route_priv,
+ &route_params);
+ if (err)
+ kfree(mr_route->route_priv);
+ } else {
+ err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
+ &route_info);
+ }
+out:
+ mlxsw_sp_mr_route_info_destroy(&route_info);
+ return err;
+}
+
+static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
+ kfree(mr_route->route_priv);
+}
+
+static struct mlxsw_sp_mr_route *
+mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
+ struct mlxsw_sp_mr_route *mr_route;
+ int err = 0;
+ int i;
+
+ /* Allocate and init a new route and fill it with parameters */
+ mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
+ if (!mr_route)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&mr_route->evif_list);
+ mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
+
+ /* Find min_mtu and link iVIF and eVIFs */
+ mr_route->min_mtu = ETH_MAX_MTU;
+ ipmr_cache_hold(mfc);
+ mr_route->mfc4 = mfc;
+ mr_route->mr_table = mr_table;
+ for (i = 0; i < MAXVIFS; i++) {
+ if (mfc->mfc_un.res.ttls[i] != 255) {
+ err = mlxsw_sp_mr_route_evif_link(mr_route,
+ &mr_table->vifs[i]);
+ if (err)
+ goto err;
+ if (mr_table->vifs[i].dev &&
+ mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
+ mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
+ }
+ }
+ mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
+
+ mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
+ return mr_route;
+err:
+ ipmr_cache_put(mfc);
+ list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
+ mlxsw_sp_mr_route_evif_unlink(rve);
+ kfree(mr_route);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
+
+ mlxsw_sp_mr_route_ivif_unlink(mr_route);
+ ipmr_cache_put(mr_route->mfc4);
+ list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
+ mlxsw_sp_mr_route_evif_unlink(rve);
+ kfree(mr_route);
+}
+
+static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ switch (mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
+ bool offload)
+{
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ if (offload)
+ mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
+ else
+ mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
+{
+ bool offload;
+
+ offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
+}
+
+static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ mlxsw_sp_mr_mfc_offload_set(mr_route, false);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
+ rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ list_del(&mr_route->node);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_route);
+}
+
+int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc, bool replace)
+{
+ struct mlxsw_sp_mr_route *mr_orig_route = NULL;
+ struct mlxsw_sp_mr_route *mr_route;
+ int err;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (mfc->mfc_origin == htonl(INADDR_ANY) &&
+ mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Create a new route */
+ mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
+ if (IS_ERR(mr_route))
+ return PTR_ERR(mr_route);
+
+ /* Find any route with a matching key */
+ mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
+ &mr_route->key,
+ mlxsw_sp_mr_route_ht_params);
+ if (replace) {
+ /* On replace case, make the route point to the new route_priv.
+ */
+ if (WARN_ON(!mr_orig_route)) {
+ err = -ENOENT;
+ goto err_no_orig_route;
+ }
+ mr_route->route_priv = mr_orig_route->route_priv;
+ } else if (mr_orig_route) {
+ /* On non replace case, if another route with the same key was
+ * found, abort, as duplicate routes are used for proxy routes.
+ */
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ err = -EINVAL;
+ goto err_duplicate_route;
+ }
+
+ /* Put it in the table data-structures */
+ list_add_tail(&mr_route->node, &mr_table->route_list);
+ err = rhashtable_insert_fast(&mr_table->route_ht,
+ &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Write the route to the hardware */
+ err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
+ if (err)
+ goto err_mr_route_write;
+
+ /* Destroy the original route */
+ if (replace) {
+ rhashtable_remove_fast(&mr_table->route_ht,
+ &mr_orig_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ list_del(&mr_orig_route->node);
+ mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
+ }
+
+ mlxsw_sp_mr_mfc_offload_update(mr_route);
+ return 0;
+
+err_mr_route_write:
+ rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+err_rhashtable_insert:
+ list_del(&mr_route->node);
+err_no_orig_route:
+err_duplicate_route:
+ mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
+ return err;
+}
+
+void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc)
+{
+ struct mlxsw_sp_mr_route *mr_route;
+ struct mlxsw_sp_mr_route_key key;
+
+ mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
+ mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
+ mlxsw_sp_mr_route_ht_params);
+ if (mr_route)
+ __mlxsw_sp_mr_route_del(mr_table, mr_route);
+}
+
+/* Should be called after the VIF struct is updated */
+static int
+mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 irif_index;
+ int err;
+
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return 0;
+
+ /* rve->mr_vif->rif is guaranteed to be valid at this stage */
+ irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
+ irif_index);
+ if (err)
+ return err;
+
+ err = mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+ /* No need to rollback here because the iRIF change only takes
+ * place after the action has been updated.
+ */
+ return err;
+
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP);
+ rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+}
+
+/* Should be called after the RIF struct is updated */
+static int
+mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 erif_index = 0;
+ int err;
+
+ /* Update the route action, as the new eVIF can be a tunnel or a pimreg
+ * device which will require updating the action.
+ */
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action != rve->mr_route->route_action) {
+ err = mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+ return err;
+ }
+
+ /* Add the eRIF */
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_erif_add(mlxsw_sp,
+ rve->mr_route->route_priv,
+ erif_index);
+ if (err)
+ goto err_route_erif_add;
+ }
+
+ /* Update the minimum MTU */
+ if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
+ rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
+ err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->min_mtu);
+ if (err)
+ goto err_route_min_mtu_update;
+ }
+
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+ return 0;
+
+err_route_min_mtu_update:
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+ erif_index);
+err_route_erif_add:
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->route_action);
+ return err;
+}
+
+/* Should be called before the RIF struct is updated */
+static void
+mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 rifi;
+
+ /* If the unresolved RIF was not valid, no need to delete it */
+ if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ return;
+
+ /* Update the route action: if there is only one valid eVIF in the
+ * route, set the action to trap as the VIF deletion will lead to zero
+ * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
+ * determine the route action.
+ */
+ if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
+ route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ else
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+
+ /* Delete the erif from the route */
+ rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+}
+
+static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev,
+ struct mlxsw_sp_mr_vif *mr_vif,
+ unsigned long vif_flags,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
+ int err;
+
+ /* Update the VIF */
+ mr_vif->dev = dev;
+ mr_vif->rif = rif;
+ mr_vif->vif_flags = vif_flags;
+
+ /* Update all routes where this VIF is used as an unresolved iRIF */
+ list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
+ err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
+ if (err)
+ goto err_irif_unresolve;
+ }
+
+ /* Update all routes where this VIF is used as an unresolved eRIF */
+ list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
+ err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
+ if (err)
+ goto err_erif_unresolve;
+ }
+ return 0;
+
+err_erif_unresolve:
+ list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
+ vif_node)
+ mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
+err_irif_unresolve:
+ list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
+ vif_node)
+ mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
+ mr_vif->rif = NULL;
+ return err;
+}
+
+static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ /* Update all routes where this VIF is used as an unresolved eRIF */
+ list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
+ mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
+
+ /* Update all routes where this VIF is used as an unresolved iRIF */
+ list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
+ mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
+
+ /* Update the VIF */
+ mr_vif->dev = dev;
+ mr_vif->rif = NULL;
+}
+
+int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev, vifi_t vif_index,
+ unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
+
+ if (WARN_ON(vif_index >= MAXVIFS))
+ return -EINVAL;
+ if (mr_vif->dev)
+ return -EEXIST;
+ return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
+}
+
+void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
+{
+ struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
+
+ if (WARN_ON(vif_index >= MAXVIFS))
+ return;
+ if (WARN_ON(!mr_vif->dev))
+ return;
+ mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
+}
+
+static struct mlxsw_sp_mr_vif *
+mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
+ const struct net_device *dev)
+{
+ vifi_t vif_index;
+
+ for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
+ if (mr_table->vifs[vif_index].dev == dev)
+ return &mr_table->vifs[vif_index];
+ return NULL;
+}
+
+int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return 0;
+
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return 0;
+ return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
+ mr_vif->vif_flags, rif);
+}
+
+void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return;
+
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return;
+ mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
+}
+
+void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif, int mtu)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return;
+
+ /* Search for a VIF that use that RIF */
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return;
+
+ /* Update all the routes that uses that VIF as eVIF */
+ list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
+ if (mtu < rve->mr_route->min_mtu) {
+ rve->mr_route->min_mtu = mtu;
+ mr->mr_ops->route_min_mtu_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ mtu);
+ }
+ }
+}
+
+struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
+ u32 vr_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_mr_route_params catchall_route_params = {
+ .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ .key = {
+ .vrid = vr_id,
+ },
+ .value = {
+ .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
+ }
+ };
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ struct mlxsw_sp_mr_table *mr_table;
+ int err;
+ int i;
+
+ mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
+ GFP_KERNEL);
+ if (!mr_table)
+ return ERR_PTR(-ENOMEM);
+
+ mr_table->vr_id = vr_id;
+ mr_table->mlxsw_sp = mlxsw_sp;
+ mr_table->proto = proto;
+ INIT_LIST_HEAD(&mr_table->route_list);
+
+ err = rhashtable_init(&mr_table->route_ht,
+ &mlxsw_sp_mr_route_ht_params);
+ if (err)
+ goto err_route_rhashtable_init;
+
+ for (i = 0; i < MAXVIFS; i++) {
+ INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
+ INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
+ }
+
+ err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
+ mr_table->catchall_route_priv,
+ &catchall_route_params);
+ if (err)
+ goto err_ops_route_create;
+ list_add_tail(&mr_table->node, &mr->table_list);
+ return mr_table;
+
+err_ops_route_create:
+ rhashtable_destroy(&mr_table->route_ht);
+err_route_rhashtable_init:
+ kfree(mr_table);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
+ list_del(&mr_table->node);
+ mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
+ &mr_table->catchall_route_priv);
+ rhashtable_destroy(&mr_table->route_ht);
+ kfree(mr_table);
+}
+
+void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
+{
+ struct mlxsw_sp_mr_route *mr_route, *tmp;
+ int i;
+
+ list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
+ __mlxsw_sp_mr_route_del(mr_table, mr_route);
+
+ for (i = 0; i < MAXVIFS; i++) {
+ mr_table->vifs[i].dev = NULL;
+ mr_table->vifs[i].rif = NULL;
+ }
+}
+
+bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
+{
+ int i;
+
+ for (i = 0; i < MAXVIFS; i++)
+ if (mr_table->vifs[i].dev)
+ return false;
+ return list_empty(&mr_table->route_list);
+}
+
+static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u64 packets, bytes;
+
+ if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return;
+
+ mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
+ &bytes);
+
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ if (mr_route->mfc4->mfc_un.res.pkt != packets)
+ mr_route->mfc4->mfc_un.res.lastuse = jiffies;
+ mr_route->mfc4->mfc_un.res.pkt = packets;
+ mr_route->mfc4->mfc_un.res.bytes = bytes;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void mlxsw_sp_mr_stats_update(struct work_struct *work)
+{
+ struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
+ stats_update_dw.work);
+ struct mlxsw_sp_mr_table *mr_table;
+ struct mlxsw_sp_mr_route *mr_route;
+ unsigned long interval;
+
+ rtnl_lock();
+ list_for_each_entry(mr_table, &mr->table_list, node)
+ list_for_each_entry(mr_route, &mr_table->route_list, node)
+ mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
+ mr_route);
+ rtnl_unlock();
+
+ interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
+ mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
+}
+
+int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mr_ops *mr_ops)
+{
+ struct mlxsw_sp_mr *mr;
+ unsigned long interval;
+ int err;
+
+ mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
+ if (!mr)
+ return -ENOMEM;
+ mr->mr_ops = mr_ops;
+ mlxsw_sp->mr = mr;
+ INIT_LIST_HEAD(&mr->table_list);
+
+ err = mr_ops->init(mlxsw_sp, mr->priv);
+ if (err)
+ goto err;
+
+ /* Create the delayed work for counter updates */
+ INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
+ interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
+ mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
+ return 0;
+err:
+ kfree(mr);
+ return err;
+}
+
+void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ cancel_delayed_work_sync(&mr->stats_update_dw);
+ mr->mr_ops->fini(mr->priv);
+ kfree(mr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
new file mode 100644
index 000000000000..5d26a122af49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -0,0 +1,134 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_MCROUTER_H
+#define _MLXSW_SPECTRUM_MCROUTER_H
+
+#include <linux/mroute.h>
+#include "spectrum_router.h"
+#include "spectrum.h"
+
+enum mlxsw_sp_mr_route_action {
+ MLXSW_SP_MR_ROUTE_ACTION_FORWARD,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
+};
+
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key {
+ int vrid;
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr group;
+ union mlxsw_sp_l3addr group_mask;
+ union mlxsw_sp_l3addr source;
+ union mlxsw_sp_l3addr source_mask;
+};
+
+struct mlxsw_sp_mr_route_info {
+ enum mlxsw_sp_mr_route_action route_action;
+ u16 irif_index;
+ u16 *erif_indices;
+ size_t erif_num;
+ u16 min_mtu;
+};
+
+struct mlxsw_sp_mr_route_params {
+ struct mlxsw_sp_mr_route_key key;
+ struct mlxsw_sp_mr_route_info value;
+ enum mlxsw_sp_mr_route_prio prio;
+};
+
+struct mlxsw_sp_mr_ops {
+ int priv_size;
+ int route_priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_params *route_params);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_info *route_info);
+ int (*route_stats)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u64 *packets, u64 *bytes);
+ int (*route_action_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ enum mlxsw_sp_mr_route_action route_action);
+ int (*route_min_mtu_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 min_mtu);
+ int (*route_irif_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 irif_index);
+ int (*route_erif_add)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 erif_index);
+ int (*route_erif_del)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 erif_index);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv);
+ void (*fini)(void *priv);
+};
+
+struct mlxsw_sp_mr;
+struct mlxsw_sp_mr_table;
+
+int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mr_ops *mr_ops);
+void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc, bool replace);
+void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc);
+int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev, vifi_t vif_index,
+ unsigned long vif_flags,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index);
+int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif, int mtu);
+struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto);
+void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table);
+void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table);
+bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
new file mode 100644
index 000000000000..34a0b632e5dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -0,0 +1,839 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/parman.h>
+
+#include "spectrum_mr_tcam.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp_mr_tcam {
+ struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
+};
+
+/* This struct maps to one RIGR2 register entry */
+struct mlxsw_sp_mr_erif_sublist {
+ struct list_head list;
+ u32 rigr2_kvdl_index;
+ int num_erifs;
+ u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
+ bool synced;
+};
+
+struct mlxsw_sp_mr_tcam_erif_list {
+ struct list_head erif_sublists;
+ u32 kvdl_index;
+};
+
+static bool
+mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist)
+{
+ int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MC_ERIF_LIST_ENTRIES);
+
+ return erif_sublist->num_erifs == erif_list_entries;
+}
+
+static void
+mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ INIT_LIST_HEAD(&erif_list->erif_sublists);
+}
+
+#define MLXSW_SP_KVDL_RIGR2_SIZE 1
+
+static struct mlxsw_sp_mr_erif_sublist *
+mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist;
+ int err;
+
+ erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
+ if (!erif_sublist)
+ return ERR_PTR(-ENOMEM);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
+ &erif_sublist->rigr2_kvdl_index);
+ if (err) {
+ kfree(erif_sublist);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
+ return erif_sublist;
+}
+
+static void
+mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist)
+{
+ list_del(&erif_sublist->list);
+ mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+ kfree(erif_sublist);
+}
+
+static int
+mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list,
+ u16 erif_index)
+{
+ struct mlxsw_sp_mr_erif_sublist *sublist;
+
+ /* If either there is no erif_entry or the last one is full, allocate a
+ * new one.
+ */
+ if (list_empty(&erif_list->erif_sublists)) {
+ sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
+ if (IS_ERR(sublist))
+ return PTR_ERR(sublist);
+ erif_list->kvdl_index = sublist->rigr2_kvdl_index;
+ } else {
+ sublist = list_last_entry(&erif_list->erif_sublists,
+ struct mlxsw_sp_mr_erif_sublist,
+ list);
+ sublist->synced = false;
+ if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
+ sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
+ erif_list);
+ if (IS_ERR(sublist))
+ return PTR_ERR(sublist);
+ }
+ }
+
+ /* Add the eRIF to the last entry's last index */
+ sublist->erif_indices[sublist->num_erifs++] = erif_index;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
+
+ list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
+ list)
+ mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
+}
+
+static int
+mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *curr_sublist;
+ char rigr2_pl[MLXSW_REG_RIGR2_LEN];
+ int err;
+ int i;
+
+ list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
+ if (curr_sublist->synced)
+ continue;
+
+ /* If the sublist is not the last one, pack the next index */
+ if (list_is_last(&curr_sublist->list,
+ &erif_list->erif_sublists)) {
+ mlxsw_reg_rigr2_pack(rigr2_pl,
+ curr_sublist->rigr2_kvdl_index,
+ false, 0);
+ } else {
+ struct mlxsw_sp_mr_erif_sublist *next_sublist;
+
+ next_sublist = list_next_entry(curr_sublist, list);
+ mlxsw_reg_rigr2_pack(rigr2_pl,
+ curr_sublist->rigr2_kvdl_index,
+ true,
+ next_sublist->rigr2_kvdl_index);
+ }
+
+ /* Pack all the erifs */
+ for (i = 0; i < curr_sublist->num_erifs; i++) {
+ u16 erif_index = curr_sublist->erif_indices[i];
+
+ mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
+ erif_index);
+ }
+
+ /* Write the entry */
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
+ rigr2_pl);
+ if (err)
+ /* No need of a rollback here because this
+ * hardware entry should not be pointed yet.
+ */
+ return err;
+ curr_sublist->synced = true;
+ }
+ return 0;
+}
+
+static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
+ struct mlxsw_sp_mr_tcam_erif_list *from)
+{
+ list_splice(&from->erif_sublists, &to->erif_sublists);
+ to->kvdl_index = from->kvdl_index;
+}
+
+struct mlxsw_sp_mr_tcam_route {
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ u32 counter_index;
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+ enum mlxsw_sp_mr_route_action action;
+ struct mlxsw_sp_mr_route_key key;
+ u16 irif_index;
+ u16 min_mtu;
+};
+
+static struct mlxsw_afa_block *
+mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_mr_route_action route_action,
+ u16 irif_index, u32 counter_index,
+ u16 min_mtu,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (!afa_block)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+ if (err)
+ goto err;
+
+ switch (route_action) {
+ case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
+ err = mlxsw_afa_block_append_trap(afa_block,
+ MLXSW_TRAP_ID_ACL1);
+ if (err)
+ goto err;
+ break;
+ case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
+ case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
+ /* If we are about to append a multicast router action, commit
+ * the erif_list.
+ */
+ err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
+ if (err)
+ goto err;
+
+ err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
+ min_mtu, false,
+ erif_list->kvdl_index);
+ if (err)
+ goto err;
+
+ if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
+ err = mlxsw_afa_block_append_trap_and_forward(afa_block,
+ MLXSW_TRAP_ID_ACL2);
+ if (err)
+ goto err;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mlxsw_afa_block_commit(afa_block);
+ if (err)
+ goto err;
+ return afa_block;
+err:
+ mlxsw_afa_block_destroy(afa_block);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
+{
+ mlxsw_afa_block_destroy(afa_block);
+}
+
+static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
+ struct parman_item *parman_item)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
+ 0, 0, 0, 0, 0, 0, NULL);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int
+mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < route_info->erif_num; i++) {
+ u16 erif_index = route_info->erif_indices[i];
+
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
+ erif_index);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
+ struct mlxsw_sp_mr_tcam_route *route,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct parman_prio *parman_prio = NULL;
+ int err;
+
+ switch (route->key.proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
+ err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
+ parman_prio, &route->parman_item);
+ if (err)
+ return err;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ route->parman_prio = parman_prio;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
+ struct mlxsw_sp_mr_tcam_route *route)
+{
+ switch (route->key.proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
+ route->parman_prio, &route->parman_item);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static int
+mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_params *route_params)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ int err;
+
+ route->key = route_params->key;
+ route->irif_index = route_params->value.irif_index;
+ route->min_mtu = route_params->value.min_mtu;
+ route->action = route_params->value.route_action;
+
+ /* Create the egress RIFs list */
+ mlxsw_sp_mr_erif_list_init(&route->erif_list);
+ err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
+ &route_params->value);
+ if (err)
+ goto err_erif_populate;
+
+ /* Create the flow counter */
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
+ if (err)
+ goto err_counter_alloc;
+
+ /* Create the flexible action block */
+ route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route->action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &route->erif_list);
+ if (IS_ERR(route->afa_block)) {
+ err = PTR_ERR(route->afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Allocate place in the TCAM */
+ err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
+ route_params->prio);
+ if (err)
+ goto err_parman_item_add;
+
+ /* Write the route to the TCAM */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, route->afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+err_parman_item_add:
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+err_afa_block_create:
+ mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
+err_erif_populate:
+err_counter_alloc:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ return err;
+}
+
+static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
+ void *priv, void *route_priv)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
+ &route->parman_item);
+ mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+}
+
+static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u64 *packets,
+ u64 *bytes)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
+ packets, bytes);
+}
+
+static int
+mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ enum mlxsw_sp_mr_route_action route_action)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new flexible action block */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &route->erif_list);
+ if (IS_ERR(afa_block))
+ return PTR_ERR(afa_block);
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err;
+
+ /* Delete the old one */
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ route->afa_block = afa_block;
+ route->action = route_action;
+ return 0;
+err:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 min_mtu)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new flexible action block */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route->action,
+ route->irif_index,
+ route->counter_index,
+ min_mtu,
+ &route->erif_list);
+ if (IS_ERR(afa_block))
+ return PTR_ERR(afa_block);
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err;
+
+ /* Delete the old one */
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ route->afa_block = afa_block;
+ route->min_mtu = min_mtu;
+ return 0;
+err:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 irif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+
+ if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return -EINVAL;
+ route->irif_index = irif_index;
+ return 0;
+}
+
+static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 erif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ int err;
+
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
+ erif_index);
+ if (err)
+ return err;
+
+ /* Commit the action only if the route action is not TRAP */
+ if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
+ &route->erif_list);
+ return 0;
+}
+
+static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 erif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist;
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+ int i;
+
+ /* Create a copy of the original erif_list without the deleted entry */
+ mlxsw_sp_mr_erif_list_init(&erif_list);
+ list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
+ for (i = 0; i < erif_sublist->num_erifs; i++) {
+ u16 curr_erif = erif_sublist->erif_indices[i];
+
+ if (curr_erif == erif_index)
+ continue;
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
+ curr_erif);
+ if (err)
+ goto err_erif_list_add;
+ }
+ }
+
+ /* Create the flexible action block pointing to the new erif_list */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &erif_list);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err_route_write;
+
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ route->afa_block = afa_block;
+ mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
+ return 0;
+
+err_route_write:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+err_afa_block_create:
+err_erif_list_add:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
+ return err;
+}
+
+static int
+mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new erif_list */
+ mlxsw_sp_mr_erif_list_init(&erif_list);
+ err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
+ if (err)
+ goto err_erif_populate;
+
+ /* Create the flexible action block pointing to the new erif_list */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route_info->route_action,
+ route_info->irif_index,
+ route->counter_index,
+ route_info->min_mtu,
+ &erif_list);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err_route_write;
+
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ route->afa_block = afa_block;
+ mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
+ route->action = route_info->route_action;
+ route->irif_index = route_info->irif_index;
+ route->min_mtu = route_info->min_mtu;
+ return 0;
+
+err_route_write:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+err_afa_block_create:
+err_erif_populate:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
+ return err;
+}
+
+#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
+ &mr_tcam->ipv4_tcam_region,
+ MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
+}
+
+static void mlxsw_sp_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
+}
+
+const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp_mr_tcam),
+ .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
+ .init = mlxsw_sp_mr_tcam_init,
+ .route_create = mlxsw_sp_mr_tcam_route_create,
+ .route_update = mlxsw_sp_mr_tcam_route_update,
+ .route_stats = mlxsw_sp_mr_tcam_route_stats,
+ .route_action_update = mlxsw_sp_mr_tcam_route_action_update,
+ .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
+ .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
+ .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
+ .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
+ .route_destroy = mlxsw_sp_mr_tcam_route_destroy,
+ .fini = mlxsw_sp_mr_tcam_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
new file mode 100644
index 000000000000..f9b59ee25406
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
+#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
+
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+extern const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
new file mode 100644
index 000000000000..c33beac5def0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -0,0 +1,276 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/pkt_cls.h>
+#include <net/red.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static int
+mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+ int tclass_num, u32 min, u32 max,
+ u32 probability, bool is_ecn)
+{
+ char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
+
+ mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
+ mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
+ roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
+ roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
+ probability);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
+ if (err)
+ return err;
+
+ mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+ MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+}
+
+static int
+mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+ int tclass_num)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
+ MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num)
+{
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+
+ mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
+ mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+
+ switch (mlxsw_sp_qdisc->type) {
+ case MLXSW_SP_QDISC_RED:
+ xstats_base->prob_mark = xstats->ecn;
+ xstats_base->prob_drop = xstats->wred_drop[tclass_num];
+ xstats_base->pdrop = xstats->tail_drop[tclass_num];
+
+ mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
+ xstats_base->prob_mark;
+ mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
+ xstats_base->pdrop;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->handle != handle)
+ return 0;
+
+ err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
+
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num,
+ struct tc_red_qopt_offload_params *p)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 min, max;
+ u64 prob;
+ int err = 0;
+
+ if (p->min > p->max) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: min %u is bigger then max %u\n", p->min,
+ p->max);
+ goto err_bad_param;
+ }
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: max value %u is too big\n", p->max);
+ goto err_bad_param;
+ }
+ if (p->min == 0 || p->max == 0) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: 0 value is illegal for min and max\n");
+ goto err_bad_param;
+ }
+
+ /* calculate probability in percentage */
+ prob = p->probability;
+ prob *= 100;
+ prob = DIV_ROUND_UP(prob, 1 << 16);
+ prob = DIV_ROUND_UP(prob, 1 << 16);
+ min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
+ max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
+ err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+ if (err)
+ goto err_config;
+
+ mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
+ if (mlxsw_sp_qdisc->handle != handle)
+ mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ tclass_num);
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+ err = -EINVAL;
+err_config:
+ mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
+ mlxsw_sp_qdisc, tclass_num);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num, struct red_stats *res)
+{
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+
+ if (mlxsw_sp_qdisc->handle != handle ||
+ mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
+ return -EOPNOTSUPP;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+
+ res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ res->prob_mark = xstats->ecn - xstats_base->prob_mark;
+ res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num,
+ struct tc_red_qopt_offload_stats *res)
+{
+ u64 tx_bytes, tx_packets, overlimits, drops;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+
+ if (mlxsw_sp_qdisc->handle != handle ||
+ mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
+ return -EOPNOTSUPP;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+
+ tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
+ tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
+ mlxsw_sp_qdisc->overlimits;
+ drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
+ mlxsw_sp_qdisc->drops;
+
+ _bstats_update(res->bstats, tx_bytes, tx_packets);
+ res->qstats->overlimits += overlimits;
+ res->qstats->drops += drops;
+ res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ xstats->backlog[tclass_num]);
+
+ mlxsw_sp_qdisc->drops += drops;
+ mlxsw_sp_qdisc->overlimits += overlimits;
+ mlxsw_sp_qdisc->tx_bytes += tx_bytes;
+ mlxsw_sp_qdisc->tx_packets += tx_packets;
+ return 0;
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int tclass_num;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
+ tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ switch (p->command) {
+ case TC_RED_REPLACE:
+ return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num,
+ &p->set);
+ case TC_RED_DESTROY:
+ return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num);
+ case TC_RED_XSTATS:
+ return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num,
+ p->xstats);
+ case TC_RED_STATS:
+ return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 5189022a1c8c..632c7b229054 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -46,6 +46,8 @@
#include <linux/if_bridge.h>
#include <linux/socket.h>
#include <linux/route.h>
+#include <linux/gcd.h>
+#include <linux/random.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -65,6 +67,8 @@
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
#include "spectrum_ipip.h"
+#include "spectrum_mr.h"
+#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
struct mlxsw_sp_vr;
@@ -78,6 +82,7 @@ struct mlxsw_sp_router {
struct rhashtable neigh_ht;
struct rhashtable nexthop_group_ht;
struct rhashtable nexthop_ht;
+ struct list_head nexthop_list;
struct {
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
@@ -92,6 +97,7 @@ struct mlxsw_sp_router {
struct list_head ipip_list;
bool aborted;
struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
};
@@ -458,6 +464,7 @@ struct mlxsw_sp_vr {
unsigned int rif_count;
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
+ struct mlxsw_sp_mr_table *mr4_table;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
@@ -652,7 +659,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{
- return !!vr->fib4 || !!vr->fib6;
+ return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -692,8 +699,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
{
- /* For our purpose, squash main and local table into one */
- if (tb_id == RT_TABLE_LOCAL)
+ /* For our purpose, squash main, default and local tables into one */
+ if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
tb_id = RT_TABLE_MAIN;
return tb_id;
}
@@ -727,14 +734,17 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
- u32 tb_id)
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
- if (!vr)
+ if (!vr) {
+ NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
+ }
vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib4);
@@ -743,9 +753,18 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(vr->fib6);
goto err_fib6_create;
}
+ vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr->mr4_table)) {
+ err = PTR_ERR(vr->mr4_table);
+ goto err_mr_table_create;
+ }
vr->tb_id = tb_id;
return vr;
+err_mr_table_create:
+ mlxsw_sp_fib_destroy(vr->fib6);
+ vr->fib6 = NULL;
err_fib6_create:
mlxsw_sp_fib_destroy(vr->fib4);
vr->fib4 = NULL;
@@ -754,27 +773,31 @@ err_fib6_create:
static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
{
+ mlxsw_sp_mr_table_destroy(vr->mr4_table);
+ vr->mr4_table = NULL;
mlxsw_sp_fib_destroy(vr->fib6);
vr->fib6 = NULL;
mlxsw_sp_fib_destroy(vr->fib4);
vr->fib4 = NULL;
}
-static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_vr *vr;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
if (!vr)
- vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
+ vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
return vr;
}
static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
- list_empty(&vr->fib6->node_list))
+ list_empty(&vr->fib6->node_list) &&
+ mlxsw_sp_mr_table_empty(vr->mr4_table))
mlxsw_sp_vr_destroy(vr);
}
@@ -920,7 +943,7 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
return __dev_get_by_index(net, tun->parms.link);
}
-static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
@@ -932,12 +955,14 @@ static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif_params *params);
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack);
static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_ipip_type ipipt,
- struct net_device *ol_dev)
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_params_ipip_lb lb_params;
const struct mlxsw_sp_ipip_ops *ipip_ops;
@@ -950,7 +975,7 @@ mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
};
- rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
if (IS_ERR(rif))
return ERR_CAST(rif);
return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
@@ -969,7 +994,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
- ol_dev);
+ ol_dev, NULL);
if (IS_ERR(ipip_entry->ol_lb)) {
ret = ERR_CAST(ipip_entry->ol_lb);
goto err_ol_ipip_lb_create;
@@ -977,6 +1002,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
return ipip_entry;
@@ -986,72 +1012,12 @@ err_ol_ipip_lb_create:
}
static void
-mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp_ipip_entry *ipip_entry)
+mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
{
- WARN_ON(ipip_entry->ref_count > 0);
mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
kfree(ipip_entry);
}
-static __be32
-mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
-
- return tun->parms.iph.saddr;
-}
-
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
-{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- };
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
-
- return tun->parms.iph.daddr;
-}
-
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
-{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- };
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
- const union mlxsw_sp_l3addr *addr2)
-{
- return !memcmp(addr1, addr2, sizeof(*addr1));
-}
-
static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
const enum mlxsw_sp_l3proto ul_proto,
@@ -1184,60 +1150,28 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_ipip_entry *
-mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_ipip_type ipipt,
- struct net_device *ol_dev)
+mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt,
+ struct net_device *ol_dev)
{
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
- struct mlxsw_sp_router *router = mlxsw_sp->router;
- struct mlxsw_sp_fib_entry *decap_fib_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
- enum mlxsw_sp_l3proto ul_proto;
- union mlxsw_sp_l3addr saddr;
-
- list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
- ipip_list_node) {
- if (ipip_entry->ol_dev == ol_dev)
- goto inc_ref_count;
-
- /* The configuration where several tunnels have the same local
- * address in the same underlay table needs special treatment in
- * the HW. That is currently not implemented in the driver.
- */
- ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
- if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
- ul_tb_id, ipip_entry))
- return ERR_PTR(-EEXIST);
- }
ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
if (IS_ERR(ipip_entry))
return ipip_entry;
- decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
- if (decap_fib_entry)
- mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
- decap_fib_entry);
-
list_add_tail(&ipip_entry->ipip_list_node,
&mlxsw_sp->router->ipip_list);
-inc_ref_count:
- ++ipip_entry->ref_count;
return ipip_entry;
}
static void
-mlxsw_sp_ipip_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry)
+mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
- if (--ipip_entry->ref_count == 0) {
- list_del(&ipip_entry->ipip_list_node);
- if (ipip_entry->decap_fib_entry)
- mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
- mlxsw_sp_ipip_entry_destroy(ipip_entry);
- }
+ list_del(&ipip_entry->ipip_list_node);
+ mlxsw_sp_ipip_entry_dealloc(ipip_entry);
}
static bool
@@ -1279,6 +1213,455 @@ mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev,
+ enum mlxsw_sp_ipip_type *p_type)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ enum mlxsw_sp_ipip_type ipipt;
+
+ for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
+ ipip_ops = router->ipip_ops_arr[ipipt];
+ if (dev->type == ipip_ops->dev_type) {
+ if (p_type)
+ *p_type = ipipt;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node)
+ if (ipip_entry->ol_dev == ol_dev)
+ return ipip_entry;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ul_dev,
+ struct mlxsw_sp_ipip_entry *start)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
+ ipip_list_node);
+ list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ struct net_device *ipip_ul_dev =
+ __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+
+ if (ipip_ul_dev == ul_dev)
+ return ipip_entry;
+ }
+
+ return NULL;
+}
+
+bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+}
+
+static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ops
+ = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* For deciding whether decap should be offloaded, we don't care about
+ * overlay protocol, so ask whether either one is supported.
+ */
+ return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
+ ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ enum mlxsw_sp_l3proto ul_proto;
+ enum mlxsw_sp_ipip_type ipipt;
+ union mlxsw_sp_l3addr saddr;
+ u32 ul_tb_id;
+
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
+ if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+ if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ NULL)) {
+ ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
+ ol_dev);
+ if (IS_ERR(ipip_entry))
+ return PTR_ERR(ipip_entry);
+ }
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct mlxsw_sp_fib_entry *decap_fib_entry;
+
+ decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
+ if (decap_fib_entry)
+ mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
+ decap_fib_entry);
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ if (ipip_entry->decap_fib_entry)
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
+}
+
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+static int
+mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool keep_encap,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
+ struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
+
+ new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
+ ipip_entry->ipipt,
+ ipip_entry->ol_dev,
+ extack);
+ if (IS_ERR(new_lb_rif))
+ return PTR_ERR(new_lb_rif);
+ ipip_entry->ol_lb = new_lb_rif;
+
+ if (keep_encap) {
+ list_splice_init(&old_lb_rif->common.nexthop_list,
+ &new_lb_rif->common.nexthop_list);
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
+ }
+
+ mlxsw_sp_rif_destroy(&old_lb_rif->common);
+
+ return 0;
+}
+
+/**
+ * Update the offload related to an IPIP entry. This always updates decap, and
+ * in addition to that it also:
+ * @recreate_loopback: recreates the associated loopback RIF
+ * @keep_encap: updates next hops that use the tunnel netdevice. This is only
+ * relevant when recreate_loopback is true.
+ * @update_nexthops: updates next hops, keeping the current loopback RIF. This
+ * is only relevant when recreate_loopback is false.
+ */
+int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool recreate_loopback,
+ bool keep_encap,
+ bool update_nexthops,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* RIFs can't be edited, so to update loopback, we need to destroy and
+ * recreate it. That creates a window of opportunity where RALUE and
+ * RATR registers end up referencing a RIF that's already gone. RATRs
+ * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
+ * of RALUE, demote the decap route back.
+ */
+ if (ipip_entry->decap_fib_entry)
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+
+ if (recreate_loopback) {
+ err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
+ keep_encap, extack);
+ if (err)
+ return err;
+ } else if (update_nexthops) {
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp,
+ &ipip_entry->ol_lb->common);
+ }
+
+ if (ipip_entry->ol_dev->flags & IFF_UP)
+ mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry =
+ mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+
+ if (!ipip_entry)
+ return 0;
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, false, false, extack);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev,
+ struct netlink_ext_ack *extack)
+{
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, false, extack);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev)
+{
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true, NULL);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev)
+{
+ /* A down underlay device causes encapsulated packets to not be
+ * forwarded, but decap still works. So refresh next hops without
+ * touching anything else.
+ */
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true, NULL);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ int err;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (!ipip_entry)
+ /* A change might make a tunnel eligible for offloading, but
+ * that is currently not implemented. What falls to slow path
+ * stays there.
+ */
+ return 0;
+
+ /* A change might make a tunnel not eligible for offloading. */
+ if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
+ ipip_entry->ipipt)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
+ return err;
+}
+
+void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+
+ if (ol_dev->flags & IFF_UP)
+ mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
+ mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
+}
+
+/* The configuration where several tunnels have the same local address in the
+ * same underlay table needs special treatment in the HW. That is currently not
+ * implemented in the driver. This function finds and demotes the first tunnel
+ * with a given source address, except the one passed in in the argument
+ * `except'.
+ */
+bool
+mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ const struct mlxsw_sp_ipip_entry *except)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
+
+ list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ if (ipip_entry != except &&
+ mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
+ ul_tb_id, ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ul_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
+
+ list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ struct net_device *ipip_ul_dev =
+ __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+
+ if (ipip_ul_dev == ul_dev)
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ }
+}
+
+int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct netdev_notifier_changeupper_info *chup;
+ struct netlink_ext_ack *extack;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ case NETDEV_UNREGISTER:
+ mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
+ return 0;
+ case NETDEV_UP:
+ mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
+ return 0;
+ case NETDEV_DOWN:
+ mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
+ return 0;
+ case NETDEV_CHANGEUPPER:
+ chup = container_of(info, typeof(*chup), info);
+ extack = info->extack;
+ if (netif_is_l3_master(chup->upper_dev))
+ return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
+ ol_dev,
+ extack);
+ return 0;
+ case NETDEV_CHANGE:
+ extack = info->extack;
+ return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
+ ol_dev, extack);
+ }
+ return 0;
+}
+
+static int
+__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct netdev_notifier_changeupper_info *chup;
+ struct netlink_ext_ack *extack;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ chup = container_of(info, typeof(*chup), info);
+ extack = info->extack;
+ if (netif_is_l3_master(chup->upper_dev))
+ return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
+ ipip_entry,
+ ul_dev,
+ extack);
+ break;
+
+ case NETDEV_UP:
+ return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
+ ul_dev);
+ case NETDEV_DOWN:
+ return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
+ ipip_entry,
+ ul_dev);
+ }
+ return 0;
+}
+
+int
+mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ul_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ int err;
+
+ while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
+ ul_dev,
+ ipip_entry))) {
+ err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
+ ul_dev, event, info);
+ if (err) {
+ mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
+ ul_dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
struct mlxsw_sp_neigh_key {
struct neighbour *n;
};
@@ -1316,7 +1699,7 @@ mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
typeof(*neigh_entry),
rif_list_node);
}
- if (neigh_entry->rif_list_node.next == &rif->neigh_list)
+ if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
return NULL;
return list_next_entry(neigh_entry, rif_list_node);
}
@@ -1664,7 +2047,7 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
rauhtd_pl);
if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
break;
}
num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
@@ -1857,7 +2240,7 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
}
-struct mlxsw_sp_neigh_event_work {
+struct mlxsw_sp_netevent_work {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
struct neighbour *n;
@@ -1865,11 +2248,11 @@ struct mlxsw_sp_neigh_event_work {
static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
{
- struct mlxsw_sp_neigh_event_work *neigh_work =
- container_of(work, struct mlxsw_sp_neigh_event_work, work);
- struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct neighbour *n = neigh_work->n;
+ struct neighbour *n = net_work->n;
unsigned char ha[ETH_ALEN];
bool entry_connected;
u8 nud_state, dead;
@@ -1905,18 +2288,32 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
out:
rtnl_unlock();
neigh_release(n);
- kfree(neigh_work);
+ kfree(net_work);
}
-int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ mlxsw_sp_mp_hash_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- struct mlxsw_sp_neigh_event_work *neigh_work;
+ struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
+ struct net *net;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -1950,24 +2347,39 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
if (!mlxsw_sp_port)
return NOTIFY_DONE;
- neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
- if (!neigh_work) {
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work) {
mlxsw_sp_port_dev_put(mlxsw_sp_port);
return NOTIFY_BAD;
}
- INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
- neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- neigh_work->n = n;
+ INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
+ net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ net_work->n = n;
/* Take a reference to ensure the neighbour won't be
* destructed until we drop the reference in delayed
* work.
*/
neigh_clone(n);
- mlxsw_core_schedule_work(&neigh_work->work);
+ mlxsw_core_schedule_work(&net_work->work);
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
+ case NETEVENT_MULTIPATH_HASH_UPDATE:
+ net = ptr;
+
+ if (!net_eq(net, &init_net))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ break;
}
return NOTIFY_DONE;
@@ -2004,16 +2416,25 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
+static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif *rif)
+{
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+
+ mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
+ rif->rif_index, rif->addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
+ mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
- rif_list_node) {
- mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
+ rif_list_node)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
- }
}
enum mlxsw_sp_nexthop_type {
@@ -2028,6 +2449,7 @@ struct mlxsw_sp_nexthop_key {
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head rif_list_node;
+ struct list_head router_list_node;
struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
* this belongs to
*/
@@ -2035,6 +2457,9 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_nexthop_key key;
unsigned char gw_addr[sizeof(struct in6_addr)];
int ifindex;
+ int nh_weight;
+ int norm_nh_weight;
+ int num_adj_entries;
struct mlxsw_sp_rif *rif;
u8 should_offload:1, /* set indicates this neigh is connected and
* should be put to KVD linear area of this group.
@@ -2050,6 +2475,8 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
};
+ unsigned int counter_index;
+ bool counter_valid;
};
struct mlxsw_sp_nexthop_group {
@@ -2062,10 +2489,118 @@ struct mlxsw_sp_nexthop_group {
u32 adj_index;
u16 ecmp_size;
u16 count;
+ int sum_norm_weight;
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
+void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
+ return;
+
+ if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
+ return;
+
+ nh->counter_valid = true;
+}
+
+void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->counter_valid)
+ return;
+ mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
+ nh->counter_valid = false;
+}
+
+int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh, u64 *p_counter)
+{
+ if (!nh->counter_valid)
+ return -EINVAL;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
+ p_counter, NULL);
+}
+
+struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh) {
+ if (list_empty(&router->nexthop_list))
+ return NULL;
+ else
+ return list_first_entry(&router->nexthop_list,
+ typeof(*nh), router_list_node);
+ }
+ if (list_is_last(&nh->router_list_node, &router->nexthop_list))
+ return NULL;
+ return list_next_entry(nh, router_list_node);
+}
+
+bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
+{
+ return nh->offloaded;
+}
+
+unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->offloaded)
+ return NULL;
+ return nh->neigh_entry->ha;
+}
+
+int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
+ u32 *p_adj_size, u32 *p_adj_hash_index)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ u32 adj_hash_index = 0;
+ int i;
+
+ if (!nh->offloaded || !nh_grp->adj_index_valid)
+ return -EINVAL;
+
+ *p_adj_index = nh_grp->adj_index;
+ *p_adj_size = nh_grp->ecmp_size;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+
+ if (nh_iter == nh)
+ break;
+ if (nh_iter->offloaded)
+ adj_hash_index += nh_iter->num_adj_entries;
+ }
+
+ *p_adj_hash_index = adj_hash_index;
+ return 0;
+}
+
+struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
+{
+ return nh->rif;
+}
+
+bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ int i;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+
+ if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
+ return true;
+ }
+ return false;
+}
+
static struct fib_info *
mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
{
@@ -2323,8 +2858,8 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
char ratr_pl[MLXSW_REG_RATR_LEN];
@@ -2333,12 +2868,33 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
true, MLXSW_REG_RATR_TYPE_ETHERNET,
adj_index, neigh_entry->rif);
mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ if (nh->counter_valid)
+ mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
+ else
+ mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
}
-static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
- u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ int i;
+
+ for (i = 0; i < nh->num_adj_entries; i++) {
+ int err;
+
+ err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
@@ -2346,6 +2902,24 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
}
+static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ int i;
+
+ for (i = 0; i < nh->num_adj_entries; i++) {
+ int err;
+
+ err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
+ nh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
@@ -2367,7 +2941,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
switch (nh->type) {
case MLXSW_SP_NEXTHOP_TYPE_ETH:
- err = mlxsw_sp_nexthop_mac_update
+ err = mlxsw_sp_nexthop_update
(mlxsw_sp, adj_index, nh);
break;
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
@@ -2380,7 +2954,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
nh->update = 0;
nh->offloaded = 1;
}
- adj_index++;
+ adj_index += nh->num_adj_entries;
}
return 0;
}
@@ -2425,17 +2999,118 @@ mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
}
}
+static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
+{
+ /* Valid sizes for an adjacency group are:
+ * 1-64, 512, 1024, 2048 and 4096.
+ */
+ if (*p_adj_grp_size <= 64)
+ return;
+ else if (*p_adj_grp_size <= 512)
+ *p_adj_grp_size = 512;
+ else if (*p_adj_grp_size <= 1024)
+ *p_adj_grp_size = 1024;
+ else if (*p_adj_grp_size <= 2048)
+ *p_adj_grp_size = 2048;
+ else
+ *p_adj_grp_size = 4096;
+}
+
+static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
+ unsigned int alloc_size)
+{
+ if (alloc_size >= 4096)
+ *p_adj_grp_size = 4096;
+ else if (alloc_size >= 2048)
+ *p_adj_grp_size = 2048;
+ else if (alloc_size >= 1024)
+ *p_adj_grp_size = 1024;
+ else if (alloc_size >= 512)
+ *p_adj_grp_size = 512;
+}
+
+static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size)
+{
+ unsigned int alloc_size;
+ int err;
+
+ /* Round up the requested group size to the next size supported
+ * by the device and make sure the request can be satisfied.
+ */
+ mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
+ err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
+ &alloc_size);
+ if (err)
+ return err;
+ /* It is possible the allocation results in more allocated
+ * entries than requested. Try to use as much of them as
+ * possible.
+ */
+ mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i, g = 0, sum_norm_weight = 0;
+ struct mlxsw_sp_nexthop *nh;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload)
+ continue;
+ if (g > 0)
+ g = gcd(nh->nh_weight, g);
+ else
+ g = nh->nh_weight;
+ }
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload)
+ continue;
+ nh->norm_nh_weight = nh->nh_weight / g;
+ sum_norm_weight += nh->norm_nh_weight;
+ }
+
+ nh_grp->sum_norm_weight = sum_norm_weight;
+}
+
+static void
+mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int total = nh_grp->sum_norm_weight;
+ u16 ecmp_size = nh_grp->ecmp_size;
+ int i, weight = 0, lower_bound = 0;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ int upper_bound;
+
+ if (!nh->should_offload)
+ continue;
+ weight += nh->norm_nh_weight;
+ upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
+ nh->num_adj_entries = upper_bound - lower_bound;
+ lower_bound = upper_bound;
+ }
+}
+
static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ u16 ecmp_size, old_ecmp_size;
struct mlxsw_sp_nexthop *nh;
bool offload_change = false;
u32 adj_index;
- u16 ecmp_size = 0;
bool old_adj_index_valid;
u32 old_adj_index;
- u16 old_ecmp_size;
int i;
int err;
@@ -2452,8 +3127,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
if (nh->should_offload)
nh->update = 1;
}
- if (nh->should_offload)
- ecmp_size++;
}
if (!offload_change) {
/* Nothing was added or removed, so no need to reallocate. Just
@@ -2466,12 +3139,19 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
}
return;
}
- if (!ecmp_size)
+ mlxsw_sp_nexthop_group_normalize(nh_grp);
+ if (!nh_grp->sum_norm_weight)
/* No neigh of this group is connected so we just set
* the trap and let everthing flow through kernel.
*/
goto set_trap;
+ ecmp_size = nh_grp->sum_norm_weight;
+ err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
+ if (err)
+ /* No valid allocation size available. */
+ goto set_trap;
+
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
@@ -2486,6 +3166,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
+ mlxsw_sp_nexthop_group_rebalance(nh_grp);
err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
@@ -2655,38 +3336,28 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
neigh_release(n);
}
-static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev,
- enum mlxsw_sp_ipip_type *p_type)
+static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
- const struct mlxsw_sp_ipip_ops *ipip_ops;
- enum mlxsw_sp_ipip_type ipipt;
+ struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
- for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
- ipip_ops = router->ipip_ops_arr[ipipt];
- if (dev->type == ipip_ops->dev_type) {
- if (p_type)
- *p_type = ipipt;
- return true;
- }
- }
- return false;
+ return ul_dev ? (ul_dev->flags & IFF_UP) : true;
}
static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_ipip_type ipipt,
struct mlxsw_sp_nexthop *nh,
struct net_device *ol_dev)
{
+ bool removing;
+
if (!nh->nh_grp->gateway || nh->ipip_entry)
return 0;
- nh->ipip_entry = mlxsw_sp_ipip_entry_get(mlxsw_sp, ipipt, ol_dev);
- if (IS_ERR(nh->ipip_entry))
- return PTR_ERR(nh->ipip_entry);
+ nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (!nh->ipip_entry)
+ return -ENOENT;
- __mlxsw_sp_nexthop_neigh_update(nh, false);
+ removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
return 0;
}
@@ -2699,7 +3370,6 @@ static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
return;
__mlxsw_sp_nexthop_neigh_update(nh, true);
- mlxsw_sp_ipip_entry_put(mlxsw_sp, ipip_entry);
nh->ipip_entry = NULL;
}
@@ -2743,7 +3413,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
if (err)
return err;
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
@@ -2784,11 +3454,19 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
nh->nh_grp = nh_grp;
nh->key.fib_nh = fib_nh;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ nh->nh_weight = fib_nh->nh_weight;
+#else
+ nh->nh_weight = 1;
+#endif
memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+
if (!dev)
return 0;
@@ -2812,6 +3490,8 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
}
@@ -2841,6 +3521,30 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
}
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+ bool removing;
+
+ list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ removing = false;
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
+ break;
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ }
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -3121,7 +3825,7 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
return;
if (mlxsw_sp_fib_entry_should_offload(fib_entry))
mlxsw_sp_fib_entry_offload_set(fib_entry);
- else if (!mlxsw_sp_fib_entry_should_offload(fib_entry))
+ else
mlxsw_sp_fib_entry_offload_unset(fib_entry);
return;
default:
@@ -3576,7 +4280,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
if (IS_ERR(vr))
return ERR_CAST(vr);
fib = mlxsw_sp_vr_fib(vr, proto);
@@ -4000,7 +4704,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV6)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
if (err)
return err;
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
@@ -4038,7 +4742,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
+ nh->nh_weight = 1;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
if (!dev)
return 0;
@@ -4051,6 +4759,8 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
}
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
@@ -4601,6 +5311,75 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
+ struct mfc_entry_notifier_info *men_info,
+ bool replace)
+{
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
+}
+
+static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
+ struct mfc_entry_notifier_info *men_info)
+{
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
+ if (WARN_ON(!vr))
+ return;
+
+ mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
+ mlxsw_sp_vr_put(vr);
+}
+
+static int
+mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
+ struct vif_entry_notifier_info *ven_info)
+{
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
+ return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
+ ven_info->vif_index,
+ ven_info->vif_flags, rif);
+}
+
+static void
+mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
+ struct vif_entry_notifier_info *ven_info)
+{
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
+ if (WARN_ON(!vr))
+ return;
+
+ mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
+ mlxsw_sp_vr_put(vr);
+}
+
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
@@ -4611,6 +5390,10 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
+ /* The multicast router code does not need an abort trap as by default,
+ * packets that don't match any routes are trapped to the CPU.
+ */
+
proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
MLXSW_SP_LPM_TREE_MIN + 1);
@@ -4692,6 +5475,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp_vr_is_used(vr))
continue;
+
+ mlxsw_sp_mr_table_flush(vr->mr4_table);
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
/* If virtual router was only used for IPv4, then it's no
@@ -4724,6 +5509,8 @@ struct mlxsw_sp_fib_event_work {
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
+ struct mfc_entry_notifier_info men_info;
+ struct vif_entry_notifier_info ven_info;
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
@@ -4734,7 +5521,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- struct fib_rule *rule;
bool replace, append;
int err;
@@ -4756,12 +5542,11 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- rule = fib_work->fr_info.rule;
- if (!fib4_rule_default(rule) && !rule->l3mdev)
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_rule_put(rule);
+ case FIB_EVENT_RULE_ADD:
+ /* if we get here, a rule was added that we do not support.
+ * just do the fib_abort
+ */
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
@@ -4779,7 +5564,6 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- struct fib_rule *rule;
bool replace;
int err;
@@ -4798,12 +5582,58 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- rule = fib_work->fr_info.rule;
- if (!fib6_rule_default(rule) && !rule->l3mdev)
+ case FIB_EVENT_RULE_ADD:
+ /* if we get here, a rule was added that we do not support.
+ * just do the fib_abort
+ */
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ break;
+ }
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ bool replace;
+ int err;
+
+ rtnl_lock();
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_ADD:
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
+ if (err)
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ ipmr_cache_put(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ ipmr_cache_put(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_VIF_ADD:
+ err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
+ &fib_work->ven_info);
+ if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_rule_put(rule);
+ dev_put(fib_work->ven_info.dev);
+ break;
+ case FIB_EVENT_VIF_DEL:
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
+ break;
+ case FIB_EVENT_RULE_ADD:
+ /* if we get here, a rule was added that we do not support.
+ * just do the fib_abort
+ */
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
break;
}
rtnl_unlock();
@@ -4813,25 +5643,27 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_nh_notifier_info *fnh_info;
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info));
- /* Take referece on fib_info to prevent it from being
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ fib_work->fen_info = *fen_info;
+ /* Take reference on fib_info to prevent it from being
* freed while work is queued. Release it afterwards.
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
- fib_rule_get(fib_work->fr_info.rule);
- break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
- memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info));
+ fnh_info = container_of(info, struct fib_nh_notifier_info,
+ info);
+ fib_work->fnh_info = *fnh_info;
fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
@@ -4840,21 +5672,79 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
+ struct fib6_entry_notifier_info *fen6_info;
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info));
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ fib_work->fen6_info = *fen6_info;
rt6_hold(fib_work->fen6_info.rt);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
- fib_rule_get(fib_work->fr_info.rule);
+ }
+}
+
+static void
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
+ struct fib_notifier_info *info)
+{
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ ipmr_cache_hold(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_VIF_ADD: /* fall through */
+ case FIB_EVENT_VIF_DEL:
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
+static int mlxsw_sp_router_fib_rule_event(unsigned long event,
+ struct fib_notifier_info *info,
+ struct mlxsw_sp *mlxsw_sp)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct fib_rule_notifier_info *fr_info;
+ struct fib_rule *rule;
+ int err = 0;
+
+ /* nothing to do at the moment */
+ if (event == FIB_EVENT_RULE_DEL)
+ return 0;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ fr_info = container_of(info, struct fib_rule_notifier_info, info);
+ rule = fr_info->rule;
+
+ switch (info->family) {
+ case AF_INET:
+ if (!fib4_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
+ case AF_INET6:
+ if (!fib6_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
+ case RTNL_FAMILY_IPMR:
+ if (!ipmr_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
+ }
+
+ if (err < 0)
+ NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
+
+ return err;
+}
+
/* Called with rcu_read_lock() */
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
@@ -4862,16 +5752,28 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
+ int err;
if (!net_eq(info->net, &init_net) ||
- (info->family != AF_INET && info->family != AF_INET6))
+ (info->family != AF_INET && info->family != AF_INET6 &&
+ info->family != RTNL_FAMILY_IPMR))
return NOTIFY_DONE;
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ err = mlxsw_sp_router_fib_rule_event(event, info,
+ router->mlxsw_sp);
+ if (!err)
+ return NOTIFY_DONE;
+ }
+
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
if (WARN_ON(!fib_work))
return NOTIFY_BAD;
- router = container_of(nb, struct mlxsw_sp_router, fib_nb);
fib_work->mlxsw_sp = router->mlxsw_sp;
fib_work->event = event;
@@ -4884,6 +5786,10 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
mlxsw_sp_router_fib6_event(fib_work, info);
break;
+ case RTNL_FAMILY_IPMR:
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
+ break;
}
mlxsw_core_schedule_work(&fib_work->work);
@@ -5044,9 +5950,15 @@ int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
return rif->dev->ifindex;
}
+const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
+{
+ return rif->dev;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif_params *params)
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack)
{
u32 tb_id = l3mdev_fib_table(params->dev);
const struct mlxsw_sp_rif_ops *ops;
@@ -5060,14 +5972,16 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
ops = mlxsw_sp->router->rif_ops_arr[type];
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
if (IS_ERR(vr))
return ERR_CAST(vr);
vr->rif_count++;
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
goto err_rif_index_alloc;
+ }
rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
if (!rif) {
@@ -5093,11 +6007,17 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
+ err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
+ if (err)
+ goto err_mr_rif_add;
+
mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif;
return rif;
+err_mr_rif_add:
+ ops->deconfigure(rif);
err_configure:
if (fid)
mlxsw_sp_fid_put(fid);
@@ -5122,6 +6042,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
+ mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
ops->deconfigure(rif);
if (fid)
/* Loopback RIFs are not associated with a FID. */
@@ -5147,7 +6068,8 @@ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
static int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct net_device *l3_dev)
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -5163,7 +6085,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
};
mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
- rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
}
@@ -5218,7 +6140,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
struct net_device *port_dev,
- unsigned long event, u16 vid)
+ unsigned long event, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -5230,7 +6153,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
switch (event) {
case NETDEV_UP:
return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
- l3_dev);
+ l3_dev, extack);
case NETDEV_DOWN:
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
@@ -5240,19 +6163,22 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
if (netif_is_bridge_port(port_dev) ||
netif_is_lag_port(port_dev) ||
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
+ extack);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
struct net_device *lag_dev,
- unsigned long event, u16 vid)
+ unsigned long event, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct net_device *port_dev;
struct list_head *iter;
@@ -5262,7 +6188,8 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
if (mlxsw_sp_port_dev_check(port_dev)) {
err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
port_dev,
- event, vid);
+ event, vid,
+ extack);
if (err)
return err;
}
@@ -5272,16 +6199,19 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
if (netif_is_bridge_port(lag_dev))
return 0;
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
+ extack);
}
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_rif_params params = {
@@ -5291,7 +6221,7 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
switch (event) {
case NETDEV_UP:
- rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
break;
@@ -5305,7 +6235,8 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
@@ -5315,27 +6246,28 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
- event, vid);
+ event, vid, extack);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
- vid);
+ vid, extack);
else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
return 0;
}
static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_inetaddr_port_event(dev, event);
+ return mlxsw_sp_inetaddr_port_event(dev, event, extack);
else if (netif_is_lag_master(dev))
- return mlxsw_sp_inetaddr_lag_event(dev, event);
+ return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
else if (is_vlan_dev(dev))
- return mlxsw_sp_inetaddr_vlan_event(dev, event);
+ return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
else
return 0;
}
@@ -5349,6 +6281,32 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
struct mlxsw_sp_rif *rif;
int err = 0;
+ /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
+ if (event == NETDEV_UP)
+ goto out;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
+out:
+ return notifier_from_errno(err);
+}
+
+int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *) ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
goto out;
@@ -5357,7 +6315,7 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event);
+ err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
out:
return notifier_from_errno(err);
}
@@ -5386,7 +6344,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- __mlxsw_sp_inetaddr_event(dev, event);
+ __mlxsw_sp_inetaddr_event(dev, event, NULL);
out:
rtnl_unlock();
dev_put(dev);
@@ -5401,6 +6359,10 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
struct net_device *dev = if6->idev->dev;
+ /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
+ if (event == NETDEV_UP)
+ return NOTIFY_DONE;
+
if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
return NOTIFY_DONE;
@@ -5417,6 +6379,28 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
+ struct net_device *dev = i6vi->i6vi_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
+out:
+ return notifier_from_errno(err);
+}
+
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
const char *mac, int mtu)
{
@@ -5463,6 +6447,17 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (err)
goto err_rif_fdb_op;
+ if (rif->mtu != dev->mtu) {
+ struct mlxsw_sp_vr *vr;
+
+ /* The RIF is relevant only to its mr_table instance, as unlike
+ * unicast routing, in multicast routing a RIF cannot be shared
+ * between several multicast routing tables.
+ */
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
+ mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
+ }
+
ether_addr_copy(rif->addr, dev->dev_addr);
rif->mtu = dev->mtu;
@@ -5478,7 +6473,8 @@ err_rif_edit:
}
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif *rif;
@@ -5487,9 +6483,9 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
*/
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (rif)
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
- return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
+ return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
}
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
@@ -5500,7 +6496,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif)
return;
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
}
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
@@ -5516,10 +6512,14 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
case NETDEV_PRECHANGEUPPER:
return 0;
case NETDEV_CHANGEUPPER:
- if (info->linking)
- err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
- else
+ if (info->linking) {
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
+ } else {
mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
+ }
break;
}
@@ -5625,7 +6625,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
@@ -5826,7 +6826,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *ul_vr;
int err;
- ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id);
+ ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
if (IS_ERR(ul_vr))
return PTR_ERR(ul_vr);
@@ -5930,6 +6930,64 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
mlxsw_sp_router_fib_flush(router->mlxsw_sp);
}
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
+{
+ mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
+}
+
+static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
+{
+ mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
+}
+
+static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
+{
+ bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
+
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
+ mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
+ mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
+ mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
+ if (only_l3)
+ return;
+ mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
+}
+
+static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
+{
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
+ mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
+ mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
+ mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
+}
+
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char recr2_pl[MLXSW_REG_RECR2_LEN];
+ u32 seed;
+
+ get_random_bytes(&seed, sizeof(seed));
+ mlxsw_reg_recr2_pack(recr2_pl, seed);
+ mlxsw_sp_mp4_hash_init(recr2_pl);
+ mlxsw_sp_mp6_hash_init(recr2_pl);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
+}
+#else
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return 0;
+}
+#endif
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -5990,10 +7048,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_nexthop_group_ht_init;
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
err = mlxsw_sp_lpm_init(mlxsw_sp);
if (err)
goto err_lpm_init;
+ err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
+ if (err)
+ goto err_mr_init;
+
err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err)
goto err_vrs_init;
@@ -6002,6 +7065,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_neigh_init;
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ err = mlxsw_sp_mp_hash_init(mlxsw_sp);
+ if (err)
+ goto err_mp_hash_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -6011,10 +7084,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_mp_hash_init:
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+err_register_netevent_notifier:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
+ mlxsw_sp_mr_fini(mlxsw_sp);
+err_mr_init:
mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init:
rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
@@ -6034,8 +7112,10 @@ err_router_init:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
+ mlxsw_sp_mr_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 345fcc4f38e9..1fb82246ce96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -62,13 +62,18 @@ enum mlxsw_sp_rif_counter_dir {
};
struct mlxsw_sp_neigh_entry;
+struct mlxsw_sp_nexthop;
+struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
@@ -100,12 +105,44 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
bool adding);
bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry);
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev);
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev);
-__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev);
+int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool recreate_loopback,
+ bool keep_encap,
+ bool update_nexthops,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+bool
+mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ const struct mlxsw_sp_ipip_entry *except);
+struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh);
+unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh);
+int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
+ u32 *p_adj_size, u32 *p_adj_hash_index);
+struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
+#define mlxsw_sp_nexthop_for_each(nh, router) \
+ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
+ nh = mlxsw_sp_nexthop_next(router, nh))
+int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh, u64 *p_counter);
+int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
+
+static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
+ const union mlxsw_sp_l3addr *addr2)
+{
+ return !memcmp(addr1, addr2, sizeof(*addr1));
+}
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index d39ffbfcc436..7b8548e25ae7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -46,8 +46,10 @@
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
#include <net/switchdev.h>
+#include "spectrum_router.h"
#include "spectrum.h"
#include "core.h"
#include "reg.h"
@@ -67,7 +69,6 @@ struct mlxsw_sp_bridge {
u32 ageing_time;
bool vlan_enabled_exists;
struct list_head bridges_list;
- struct list_head mids_list;
DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
@@ -77,8 +78,10 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
+ struct list_head mids_list;
u8 vlan_enabled:1,
- multicast_enabled:1;
+ multicast_enabled:1,
+ mrouter:1;
const struct mlxsw_sp_bridge_ops *ops;
};
@@ -107,7 +110,8 @@ struct mlxsw_sp_bridge_vlan {
struct mlxsw_sp_bridge_ops {
int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
@@ -121,6 +125,20 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
u16 fid_index);
+static void
+mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port);
+
+static void
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_device
+ *bridge_device);
+
+static void
+mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool add);
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
const struct net_device *br_dev)
@@ -154,6 +172,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ bridge_device->mrouter = br_multicast_router(br_dev);
INIT_LIST_HEAD(&bridge_device->ports_list);
if (vlan_enabled) {
bridge->vlan_enabled_exists = true;
@@ -161,6 +180,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
+ INIT_LIST_HEAD(&bridge_device->mids_list);
list_add(&bridge_device->list, &bridge->bridges_list);
return bridge_device;
@@ -174,6 +194,7 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
+ WARN_ON(!list_empty(&bridge_device->mids_list));
kfree(bridge_device);
}
@@ -249,7 +270,8 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
bridge_port->dev = brport_dev;
bridge_port->bridge_device = bridge_device;
bridge_port->stp_state = BR_STATE_DISABLED;
- bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
+ bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
bridge_port->ref_count = 1;
@@ -455,7 +477,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
&attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
- attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
+ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
+ BR_MCAST_FLOOD;
break;
default:
return -EOPNOTSUPP;
@@ -640,8 +663,18 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
+ if (bridge_port->bridge_device->multicast_enabled)
+ goto out;
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ brport_flags &
+ BR_MCAST_FLOOD);
+ if (err)
+ return err;
+
+out:
+ memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
return 0;
}
@@ -699,10 +732,10 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
-static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- struct net_device *orig_dev,
- bool is_port_mc_router)
+static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
@@ -720,15 +753,26 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_MC,
- is_port_mc_router);
+ is_port_mrouter);
if (err)
return err;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
out:
- bridge_port->mrouter = is_port_mc_router;
+ bridge_port->mrouter = is_port_mrouter;
return 0;
}
+static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
+{
+ const struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = bridge_port->bridge_device;
+ return bridge_device->multicast_enabled ? bridge_port->mrouter :
+ bridge_port->flags & BR_MCAST_FLOOD;
+}
+
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
@@ -749,9 +793,15 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
+ if (bridge_device->multicast_enabled != !mc_disabled) {
+ bridge_device->multicast_enabled = !mc_disabled;
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
+ bridge_device);
+ }
+
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
- bool member = mc_disabled ? true : bridge_port->mrouter;
+ bool member = mlxsw_sp_mc_flood(bridge_port);
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
bridge_port,
@@ -765,6 +815,60 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
+ u16 mid_idx, bool add)
+{
+ char *smid_pl;
+ int err;
+
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid_pack(smid_pl, mid_idx,
+ mlxsw_sp_router_port(mlxsw_sp), add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool add)
+{
+ struct mlxsw_sp_mid *mid;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list)
+ mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+}
+
+static int
+mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool is_mrouter)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ if (bridge_device->mrouter != is_mrouter)
+ mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
+ is_mrouter);
+ bridge_device->mrouter = is_mrouter;
+ return 0;
+}
+
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
@@ -793,15 +897,20 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
- err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
- attr->orig_dev,
- attr->u.mrouter);
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
attr->orig_dev,
attr->u.mc_disabled);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -810,14 +919,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
-static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
-{
- const struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = bridge_port->bridge_device;
- return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
-}
-
static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct mlxsw_sp_bridge_port *bridge_port)
@@ -955,24 +1056,28 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last;
+ bool last_port, last_vlan;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
- last = list_is_singular(&bridge_vlan->port_vlan_list);
+ last_port = list_is_singular(&bridge_vlan->port_vlan_list);
list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
mlxsw_sp_bridge_vlan_put(bridge_vlan);
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
- if (last)
+ if (last_port)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
+ if (last_vlan)
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
@@ -1182,7 +1287,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid, bool adding)
+ u16 fid, u16 mid_idx, bool adding)
{
char *sfd_pl;
int err;
@@ -1193,16 +1298,16 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid);
+ MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
return err;
}
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
- bool add, bool clear_all_ports)
+static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
+ long *ports_bitmap,
+ bool set_router_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *smid_pl;
int err, i;
@@ -1210,66 +1315,208 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
if (!smid_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
- if (clear_all_ports) {
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
+ mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
+ if (mlxsw_sp->ports[i])
+ mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
}
+
+ mlxsw_reg_smid_port_mask_set(smid_pl,
+ mlxsw_sp_router_port(mlxsw_sp), 1);
+
+ for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
+ mlxsw_reg_smid_port_set(smid_pl, i, 1);
+
+ mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
+ set_router_port);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 mid_idx, bool add)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *smid_pl;
+ int err;
+
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
kfree(smid_pl);
return err;
}
-static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
- const unsigned char *addr,
- u16 fid)
+static struct
+mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr,
+ u16 fid)
{
struct mlxsw_sp_mid *mid;
- list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
+ list_for_each_entry(mid, &bridge_device->mids_list, list) {
if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
return NULL;
}
-static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- const unsigned char *addr,
- u16 fid)
+static void
+mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ unsigned long *ports_bitmap)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u64 max_lag_members, i;
+ int lag_id;
+
+ if (!bridge_port->lagged) {
+ set_bit(bridge_port->system_port, ports_bitmap);
+ } else {
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ lag_id = bridge_port->lag_id;
+ for (i = 0; i < max_lag_members; i++) {
+ mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
+ lag_id, i);
+ if (mlxsw_sp_port)
+ set_bit(mlxsw_sp_port->local_port,
+ ports_bitmap);
+ }
+ }
+}
+
+static void
+mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->mrouter) {
+ mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
+ bridge_port,
+ flood_bitmap);
+ }
+ }
+}
+
+static bool
+mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mid *mid,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ long *flood_bitmap;
+ int num_of_ports;
+ int alloc_size;
u16 mid_idx;
+ int err;
mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
MLXSW_SP_MID_MAX);
if (mid_idx == MLXSW_SP_MID_MAX)
- return NULL;
+ return false;
+
+ num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
+ flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
+ if (!flood_bitmap)
+ return false;
+
+ bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
+ mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+
+ mid->mid = mid_idx;
+ err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
+ bridge_device->mrouter);
+ kfree(flood_bitmap);
+ if (err)
+ return false;
+
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
+ true);
+ if (err)
+ return false;
+
+ set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
+ mid->in_hw = true;
+ return true;
+}
+
+static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mid *mid)
+{
+ if (!mid->in_hw)
+ return 0;
+
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ mid->in_hw = false;
+ return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
+ false);
+}
+
+static struct
+mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr,
+ u16 fid)
+{
+ struct mlxsw_sp_mid *mid;
+ size_t alloc_size;
mid = kzalloc(sizeof(*mid), GFP_KERNEL);
if (!mid)
return NULL;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
+ alloc_size = sizeof(unsigned long) *
+ BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
+
+ mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mid->ports_in_mid)
+ goto err_ports_in_mid_alloc;
+
ether_addr_copy(mid->addr, addr);
mid->fid = fid;
- mid->mid = mid_idx;
- mid->ref_count = 0;
- list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
+ mid->in_hw = false;
+
+ if (!bridge_device->multicast_enabled)
+ goto out;
+ if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
+ goto err_write_mdb_entry;
+
+out:
+ list_add_tail(&mid->list, &bridge_device->mids_list);
return mid;
+
+err_write_mdb_entry:
+ kfree(mid->ports_in_mid);
+err_ports_in_mid_alloc:
+ kfree(mid);
+ return NULL;
}
-static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mid *mid)
{
- if (--mid->ref_count == 0) {
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err = 0;
+
+ clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
+ if (bitmap_empty(mid->ports_in_mid,
+ mlxsw_core_max_ports(mlxsw_sp->core))) {
+ err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ kfree(mid->ports_in_mid);
kfree(mid);
- return 1;
}
- return 0;
+ return err;
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1302,39 +1549,72 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
+ mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
+ fid_index);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
}
}
- mid->ref_count++;
+ set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
+
+ if (!bridge_device->multicast_enabled)
+ return 0;
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
- mid->ref_count == 1);
+ if (bridge_port->mrouter)
+ return 0;
+
+ err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
if (err) {
netdev_err(dev, "Unable to set SMID\n");
goto err_out;
}
- if (mid->ref_count == 1) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
- mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set MC SFD\n");
- goto err_out;
- }
- }
-
return 0;
err_out:
- __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
+ mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
return err;
}
+static void
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_device
+ *bridge_device)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_mid *mid;
+ bool mc_enabled;
+
+ mc_enabled = bridge_device->multicast_enabled;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list) {
+ if (mc_enabled)
+ mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
+ bridge_device);
+ else
+ mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ }
+}
+
+static void
+mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool add)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_mid *mid;
+
+ bridge_device = bridge_port->bridge_device;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list) {
+ if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
+ mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ }
+}
+
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
@@ -1399,6 +1679,30 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int
+__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_mid *mid)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ if (bridge_port->bridge_device->multicast_enabled) {
+ if (bridge_port->bridge_device->multicast_enabled) {
+ err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
+ false);
+ if (err)
+ netdev_err(dev, "Unable to remove port from SMID\n");
+ }
+ }
+
+ err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
+ if (err)
+ netdev_err(dev, "Unable to remove MC SFD\n");
+
+ return err;
+}
+
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1410,8 +1714,6 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
u16 fid_index;
- u16 mid_idx;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1426,25 +1728,33 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
+ mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
+ return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+}
- mid_idx = mid->mid;
- if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
- mid_idx, false);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
- }
+static void
+mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_mid *mid, *tmp;
- return err;
+ bridge_device = bridge_port->bridge_device;
+
+ list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
+ if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
+ __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
+ mid);
+ } else if (bridge_device->multicast_enabled &&
+ bridge_port->mrouter) {
+ mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
+ }
+ }
}
static int mlxsw_sp_port_obj_del(struct net_device *dev,
@@ -1497,12 +1807,15 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (is_vlan_dev(bridge_port->dev))
+ if (is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
+ }
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
if (WARN_ON(!mlxsw_sp_port_vlan))
@@ -1559,13 +1872,16 @@ mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 vid;
- if (!is_vlan_dev(bridge_port->dev))
+ if (!is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
return -EINVAL;
+ }
vid = vlan_dev_vlan_id(bridge_port->dev);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -1573,7 +1889,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
- netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
+ NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
return -EINVAL;
}
@@ -1616,7 +1932,8 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
@@ -1629,7 +1946,7 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_device = bridge_port->bridge_device;
err = bridge_device->ops->port_join(bridge_device, bridge_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, extack);
if (err)
goto err_port_join;
@@ -1981,17 +2298,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
}
-static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_mid *mid, *tmp;
-
- list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
- list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- kfree(mid);
- }
-}
-
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -2003,7 +2309,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
bridge->mlxsw_sp = mlxsw_sp;
INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
- INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
@@ -2014,7 +2319,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- mlxsw_sp_mids_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index f396a1fef633..ec6cef8267ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -62,6 +62,8 @@ enum {
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
+ MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_RPF = 0x5C,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61,
@@ -89,6 +91,10 @@ enum {
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
+ /* Multicast trap used for routes with trap action */
+ MLXSW_TRAP_ID_ACL1 = 0x1C1,
+ /* Multicast trap used for routes with trap-and-forward action */
+ MLXSW_TRAP_ID_ACL2 = 0x1C2,
MLXSW_TRAP_ID_MAX = 0x1FF
};
OpenPOWER on IntegriCloud