diff options
| author | James Morris <james.l.morris@oracle.com> | 2017-11-29 12:47:41 +1100 |
|---|---|---|
| committer | James Morris <james.l.morris@oracle.com> | 2017-11-29 12:47:41 +1100 |
| commit | cf40a76e7d5874bb25f4404eecc58a2e033af885 (patch) | |
| tree | 8fd81cbea03c87b3d41d7ae5b1d11eadd35d6ef5 /drivers/net/ethernet/broadcom | |
| parent | ab5348c9c23cd253f5902980d2d8fe067dc24c82 (diff) | |
| parent | 4fbd8d194f06c8a3fd2af1ce560ddb31f7ec8323 (diff) | |
| download | talos-op-linux-cf40a76e7d5874bb25f4404eecc58a2e033af885.tar.gz talos-op-linux-cf40a76e7d5874bb25f4404eecc58a2e033af885.zip | |
Merge tag 'v4.15-rc1' into next-seccomp
Linux 4.15-rc1
Diffstat (limited to 'drivers/net/ethernet/broadcom')
42 files changed, 4911 insertions, 909 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 96413808c726..af75156919ed 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,10 +61,12 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" + depends on OF && HAS_IOMEM select MII select PHYLIB select FIXED_PHY select BCM7XXX_PHY + select MDIO_BCM_UNIMAC help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. @@ -182,6 +184,7 @@ config BGMAC_PLATFORM config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" depends on OF + depends on NET_DSA || !NET_DSA select MII select PHYLIB select FIXED_PHY @@ -193,6 +196,7 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI + depends on MAY_USE_DEVLINK select FW_LOADER select LIBCRC32C ---help--- @@ -209,6 +213,15 @@ config BNXT_SRIOV Virtualization support in the NetXtreme-C/E products. This allows for virtual function acceleration in virtual environments. +config BNXT_FLOWER_OFFLOAD + bool "TC Flower offload support for NetXtreme-C/E" + depends on BNXT + default y + ---help--- + This configuration parameter enables TC Flower packet classifier + offload for eswitch. This option enables SR-IOV switchdev eswitch + offload. + config BNXT_DCB bool "Data Center Bridging (DCB) Support" default n diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 79f2372c66ec..7046ad6d3d0e 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the Broadcom network device drivers. # diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f411936b744c..e445ab724827 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -599,9 +599,9 @@ static void b44_check_phy(struct b44 *bp) } } -static void b44_timer(unsigned long __opaque) +static void b44_timer(struct timer_list *t) { - struct b44 *bp = (struct b44 *) __opaque; + struct b44 *bp = from_timer(bp, t, timer); spin_lock_irq(&bp->lock); @@ -1474,10 +1474,8 @@ static int b44_open(struct net_device *dev) goto out; } - init_timer(&bp->timer); + timer_setup(&bp->timer, b44_timer, 0); bp->timer.expires = jiffies + HZ; - bp->timer.data = (unsigned long) bp; - bp->timer.function = b44_timer; add_timer(&bp->timer); b44_enable_ints(bp); @@ -2368,6 +2366,7 @@ static int b44_init_one(struct ssb_device *sdev, bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); spin_lock_init(&bp->lock); + u64_stats_init(&bp->hw_stats.syncp); bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING; diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h index 89d2cf341163..b3e36ca0fd19 100644 --- a/drivers/net/ethernet/broadcom/b44.h +++ b/drivers/net/ethernet/broadcom/b44.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _B44_H #define _B44_H diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 61a88b64bd39..d9346e2ac720 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -295,16 +295,13 @@ static int bcm_enet_refill_rx(struct net_device *dev) /* * timer callback to defer refill rx queue in case we're OOM */ -static void bcm_enet_refill_rx_timer(unsigned long data) +static void bcm_enet_refill_rx_timer(struct timer_list *t) { - struct net_device *dev; - struct bcm_enet_priv *priv; - - dev = (struct net_device *)data; - priv = netdev_priv(dev); + struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout); + struct net_device *dev = priv->net_dev; spin_lock(&priv->rx_lock); - bcm_enet_refill_rx((struct net_device *)data); + bcm_enet_refill_rx(dev); spin_unlock(&priv->rx_lock); } @@ -1062,7 +1059,8 @@ static int bcm_enet_open(struct net_device *dev) val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->rx_chan); @@ -1721,10 +1719,8 @@ static int bcm_enet_probe(struct platform_device *pdev) const char *clk_name; int i, ret; - /* stop if shared driver failed, assume driver->probe will be - * called in the same order we register devices (correct ?) */ if (!bcm_enet_shared_base[0]) - return -ENODEV; + return -EPROBE_DEFER; res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); @@ -1768,12 +1764,14 @@ static int bcm_enet_probe(struct platform_device *pdev) clk_name = "enet1"; } - priv->mac_clk = clk_get(&pdev->dev, clk_name); + priv->mac_clk = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); goto out; } - clk_prepare_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out; /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1801,13 +1799,15 @@ static int bcm_enet_probe(struct platform_device *pdev) if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { /* using internal PHY, enable clock */ - priv->phy_clk = clk_get(&pdev->dev, "ephy"); + priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; - goto out_put_clk_mac; + goto out_disable_clk_mac; } - clk_prepare_enable(priv->phy_clk); + ret = clk_prepare_enable(priv->phy_clk); + if (ret) + goto out_disable_clk_mac; } /* do minimal hardware init to be able to probe mii bus */ @@ -1857,9 +1857,7 @@ static int bcm_enet_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ - init_timer(&priv->rx_timeout); - priv->rx_timeout.function = bcm_enet_refill_rx_timer; - priv->rx_timeout.data = (unsigned long)dev; + timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); /* init the mib update lock&work */ mutex_init(&priv->mib_update_lock); @@ -1901,14 +1899,10 @@ out_free_mdio: out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); - if (priv->phy_clk) { - clk_disable_unprepare(priv->phy_clk); - clk_put(priv->phy_clk); - } + clk_disable_unprepare(priv->phy_clk); -out_put_clk_mac: +out_disable_clk_mac: clk_disable_unprepare(priv->mac_clk); - clk_put(priv->mac_clk); out: free_netdev(dev); return ret; @@ -1944,12 +1938,8 @@ static int bcm_enet_remove(struct platform_device *pdev) } /* disable hw block clocks */ - if (priv->phy_clk) { - clk_disable_unprepare(priv->phy_clk); - clk_put(priv->phy_clk); - } + clk_disable_unprepare(priv->phy_clk); clk_disable_unprepare(priv->mac_clk); - clk_put(priv->mac_clk); free_netdev(dev); return 0; @@ -2021,9 +2011,9 @@ static inline int bcm_enet_port_is_rgmii(int portid) /* * enet sw PHY polling */ -static void swphy_poll_timer(unsigned long data) +static void swphy_poll_timer(struct timer_list *t) { - struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; + struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll); unsigned int i; for (i = 0; i < priv->num_ports; i++) { @@ -2332,11 +2322,8 @@ static int bcm_enetsw_open(struct net_device *dev) } /* start phy polling timer */ - init_timer(&priv->swphy_poll); - priv->swphy_poll.function = swphy_poll_timer; - priv->swphy_poll.data = (unsigned long)priv; - priv->swphy_poll.expires = jiffies; - add_timer(&priv->swphy_poll); + timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); + mod_timer(&priv->swphy_poll, jiffies); return 0; out: @@ -2674,7 +2661,7 @@ static int bcm_enetsw_set_ringparam(struct net_device *dev, return 0; } -static struct ethtool_ops bcm_enetsw_ethtool_ops = { +static const struct ethtool_ops bcm_enetsw_ethtool_ops = { .get_strings = bcm_enetsw_get_strings, .get_sset_count = bcm_enetsw_get_sset_count, .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, @@ -2692,11 +2679,8 @@ static int bcm_enetsw_probe(struct platform_device *pdev) struct resource *res_mem; int ret, irq_rx, irq_tx; - /* stop if shared driver failed, assume driver->probe will be - * called in the same order we register devices (correct ?) - */ if (!bcm_enet_shared_base[0]) - return -ENODEV; + return -EPROBE_DEFER; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq_rx = platform_get_irq(pdev, 0); @@ -2735,33 +2719,27 @@ static int bcm_enetsw_probe(struct platform_device *pdev) if (ret) goto out; - if (!request_mem_region(res_mem->start, resource_size(res_mem), - "bcm63xx_enetsw")) { - ret = -EBUSY; + priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); goto out; } - priv->base = ioremap(res_mem->start, resource_size(res_mem)); - if (priv->base == NULL) { - ret = -ENOMEM; - goto out_release_mem; - } - - priv->mac_clk = clk_get(&pdev->dev, "enetsw"); + priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); - goto out_unmap; + goto out; } - clk_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out; priv->rx_chan = 0; priv->tx_chan = 1; spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ - init_timer(&priv->rx_timeout); - priv->rx_timeout.function = bcm_enet_refill_rx_timer; - priv->rx_timeout.data = (unsigned long)dev; + timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); /* register netdevice */ dev->netdev_ops = &bcm_enetsw_ops; @@ -2773,7 +2751,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret) - goto out_put_clk; + goto out_disable_clk; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); @@ -2782,14 +2760,8 @@ static int bcm_enetsw_probe(struct platform_device *pdev) return 0; -out_put_clk: - clk_put(priv->mac_clk); - -out_unmap: - iounmap(priv->base); - -out_release_mem: - release_mem_region(res_mem->start, resource_size(res_mem)); +out_disable_clk: + clk_disable_unprepare(priv->mac_clk); out: free_netdev(dev); return ret; @@ -2801,17 +2773,13 @@ static int bcm_enetsw_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; - struct resource *res; /* stop netdevice */ dev = platform_get_drvdata(pdev); priv = netdev_priv(dev); unregister_netdev(dev); - /* release device resources */ - iounmap(priv->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); + clk_disable_unprepare(priv->mac_clk); free_netdev(dev); return 0; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 0a1b7b2e55bd..5a66728d4776 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef BCM63XX_ENET_H_ #define BCM63XX_ENET_H_ @@ -8,7 +9,6 @@ #include <linux/platform_device.h> #include <bcm63xx_regs.h> -#include <bcm63xx_irq.h> #include <bcm63xx_io.h> #include <bcm63xx_iudma.h> diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5333601f855f..087f01b4dc3a 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -32,13 +32,13 @@ #define BCM_SYSPORT_IO_MACRO(name, offset) \ static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ { \ - u32 reg = __raw_readl(priv->base + offset + off); \ + u32 reg = readl_relaxed(priv->base + offset + off); \ return reg; \ } \ static inline void name##_writel(struct bcm_sysport_priv *priv, \ u32 val, u32 off) \ { \ - __raw_writel(val, priv->base + offset + off); \ + writel_relaxed(val, priv->base + offset + off); \ } \ BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); @@ -59,14 +59,14 @@ static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) { if (priv->is_lite && off >= RDMA_STATUS) off += 4; - return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off); + return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); } static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) { if (priv->is_lite && off >= RDMA_STATUS) off += 4; - __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off); + writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); } static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) @@ -110,10 +110,10 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, dma_addr_t addr) { #ifdef CONFIG_PHYS_ADDR_T_64BIT - __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, + writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, d + DESC_ADDR_HI_STATUS_LEN); #endif - __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); + writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); } static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, @@ -201,10 +201,10 @@ static int bcm_sysport_set_features(struct net_device *dev, */ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* general stats */ - STAT_NETDEV(rx_packets), - STAT_NETDEV(tx_packets), - STAT_NETDEV(rx_bytes), - STAT_NETDEV(tx_bytes), + STAT_NETDEV64(rx_packets), + STAT_NETDEV64(tx_packets), + STAT_NETDEV64(rx_bytes), + STAT_NETDEV64(tx_bytes), STAT_NETDEV(rx_errors), STAT_NETDEV(tx_errors), STAT_NETDEV(rx_dropped), @@ -316,6 +316,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) { switch (type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_RXCHK: case BCM_SYSPORT_STAT_RBUF: case BCM_SYSPORT_STAT_SOFT: @@ -398,6 +399,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: @@ -430,15 +432,44 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); } +static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, + u64 *tx_bytes, u64 *tx_packets) +{ + struct bcm_sysport_tx_ring *ring; + u64 bytes = 0, packets = 0; + unsigned int start; + unsigned int q; + + for (q = 0; q < priv->netdev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + bytes = ring->bytes; + packets = ring->packets; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + + *tx_bytes += bytes; + *tx_packets += packets; + } +} + static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_stats64 *stats64 = &priv->stats64; + struct u64_stats_sync *syncp = &priv->syncp; struct bcm_sysport_tx_ring *ring; + u64 tx_bytes = 0, tx_packets = 0; + unsigned int start; int i, j; - if (netif_running(dev)) + if (netif_running(dev)) { bcm_sysport_update_mib_counters(priv); + bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); + stats64->tx_bytes = tx_bytes; + stats64->tx_packets = tx_packets; + } for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; @@ -447,10 +478,23 @@ static void bcm_sysport_get_stats(struct net_device *dev, s = &bcm_sysport_gstrings_stats[i]; if (s->type == BCM_SYSPORT_STAT_NETDEV) p = (char *)&dev->stats; + else if (s->type == BCM_SYSPORT_STAT_NETDEV64) + p = (char *)stats64; else p = (char *)priv; + + if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) + continue; p += s->stat_offset; - data[j] = *(unsigned long *)p; + + if (s->stat_sizeof == sizeof(u64) && + s->type == BCM_SYSPORT_STAT_NETDEV64) { + do { + start = u64_stats_fetch_begin_irq(syncp); + data[i] = *(u64 *)p; + } while (u64_stats_fetch_retry_irq(syncp, start)); + } else + data[i] = *(u32 *)p; j++; } @@ -593,7 +637,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) { - dev_kfree_skb_any(cb->skb); + dev_consume_skb_any(cb->skb); cb->skb = NULL; dma_unmap_addr_set(cb, dma_addr, 0); } @@ -662,6 +706,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { + struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -765,6 +810,10 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, skb->protocol = eth_type_trans(skb, ndev); ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; + u64_stats_update_begin(&priv->syncp); + stats64->rx_packets++; + stats64->rx_bytes += len; + u64_stats_update_end(&priv->syncp); napi_gro_receive(&priv->napi, skb); next: @@ -787,17 +836,15 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, struct device *kdev = &priv->pdev->dev; if (cb->skb) { - ring->bytes += cb->skb->len; *bytes_compl += cb->skb->len; dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); - ring->packets++; (*pkts_compl)++; bcm_sysport_free_cb(cb); /* SKB fragment */ } else if (dma_unmap_addr(cb, dma_addr)) { - ring->bytes += dma_unmap_len(cb, dma_len); + *bytes_compl += dma_unmap_len(cb, dma_len); dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); @@ -808,9 +855,9 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - struct net_device *ndev = priv->netdev; unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; + struct net_device *ndev = priv->netdev; struct bcm_sysport_cb *cb; u32 hw_ind; @@ -849,6 +896,11 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, last_c_index &= (num_tx_cbs - 1); } + u64_stats_update_begin(&priv->syncp); + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + u64_stats_update_end(&priv->syncp); + ring->c_index = c_index; netif_dbg(priv, tx_done, ndev, @@ -1342,6 +1394,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); if (!ring->cbs) { + dma_free_coherent(kdev, sizeof(struct dma_desc), + ring->desc_cpu, ring->desc_dma); netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); return -ENOMEM; } @@ -1362,9 +1416,37 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); - tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); + + /* Configure QID and port mapping */ + reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); + reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); + if (ring->inspect) { + reg |= ring->switch_queue & RING_QID_MASK; + reg |= ring->switch_port << RING_PORT_ID_SHIFT; + } else { + reg |= RING_IGNORE_STATUS; + } + tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + /* Enable ACB algorithm 2 */ + reg = tdma_readl(priv, TDMA_CONTROL); + reg |= tdma_control_bit(priv, ACB_ALGO); + tdma_writel(priv, reg, TDMA_CONTROL); + + /* Do not use tdma_control_bit() here because TSB_SWAP1 collides + * with the original definition of ACB_ALGO + */ + reg = tdma_readl(priv, TDMA_CONTROL); + if (priv->is_lite) + reg &= ~BIT(TSB_SWAP1); + /* Set a correct TSB format based on host endian */ + if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + reg |= tdma_control_bit(priv, TSB_SWAP0); + else + reg &= ~tdma_control_bit(priv, TSB_SWAP0); + tdma_writel(priv, reg, TDMA_CONTROL); + /* Program the number of descriptors as MAX_THRESHOLD and half of * its size for the hysteresis trigger */ @@ -1380,8 +1462,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, napi_enable(&ring->napi); netif_dbg(priv, hw, priv->netdev, - "TDMA cfg, size=%d, desc_cpu=%p\n", - ring->size, ring->desc_cpu); + "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n", + ring->size, ring->desc_cpu, ring->switch_queue, + ring->switch_port); return 0; } @@ -1671,22 +1754,23 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) return 0; } -static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev) +static void bcm_sysport_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct bcm_sysport_priv *priv = netdev_priv(dev); - unsigned long tx_bytes = 0, tx_packets = 0; - struct bcm_sysport_tx_ring *ring; - unsigned int q; + struct bcm_sysport_stats64 *stats64 = &priv->stats64; + unsigned int start; - for (q = 0; q < dev->num_tx_queues; q++) { - ring = &priv->tx_rings[q]; - tx_bytes += ring->bytes; - tx_packets += ring->packets; - } + netdev_stats_to_stats64(stats, &dev->stats); + + bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, + &stats->tx_packets); - dev->stats.tx_bytes = tx_bytes; - dev->stats.tx_packets = tx_packets; - return &dev->stats; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + stats->rx_packets = stats64->rx_packets; + stats->rx_bytes = stats64->rx_bytes; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); } static void bcm_sysport_netif_start(struct net_device *dev) @@ -1718,10 +1802,14 @@ static void rbuf_init(struct bcm_sysport_priv *priv) reg = rbuf_readl(priv, RBUF_CONTROL); reg |= RBUF_4B_ALGN | RBUF_RSB_EN; /* Set a correct RSB format on SYSTEMPORT Lite */ - if (priv->is_lite) { + if (priv->is_lite) reg &= ~RBUF_RSB_SWAP1; + + /* Set a correct RSB format based on host endian */ + if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) reg |= RBUF_RSB_SWAP0; - } + else + reg &= ~RBUF_RSB_SWAP0; rbuf_writel(priv, reg, RBUF_CONTROL); } @@ -1737,15 +1825,17 @@ static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) { - u32 __maybe_unused reg; + u32 reg; - /* Include Broadcom tag in pad extension */ + reg = gib_readl(priv, GIB_CONTROL); + /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ if (netdev_uses_dsa(priv->netdev)) { - reg = gib_readl(priv, GIB_CONTROL); reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; - gib_writel(priv, reg, GIB_CONTROL); } + reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); + reg |= 12 << GIB_IPG_LEN_SHIFT; + gib_writel(priv, reg, GIB_CONTROL); } static int bcm_sysport_open(struct net_device *dev) @@ -1939,6 +2029,29 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u16 queue = skb_get_queue_mapping(skb); + struct bcm_sysport_tx_ring *tx_ring; + unsigned int q, port; + + if (!netdev_uses_dsa(dev)) + return fallback(dev, skb); + + /* DSA tagging layer will have configured the correct queue */ + q = BRCM_TAG_GET_QUEUE(queue); + port = BRCM_TAG_GET_PORT(queue); + tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; + + if (unlikely(!tx_ring)) + return fallback(dev, skb); + + return tx_ring->index; +} + static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_start_xmit = bcm_sysport_xmit, .ndo_tx_timeout = bcm_sysport_tx_timeout, @@ -1950,9 +2063,80 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcm_sysport_poll_controller, #endif - .ndo_get_stats = bcm_sysport_get_nstats, + .ndo_get_stats64 = bcm_sysport_get_stats64, + .ndo_select_queue = bcm_sysport_select_queue, }; +static int bcm_sysport_map_queues(struct net_device *dev, + struct dsa_notifier_register_info *info) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *ring; + struct net_device *slave_dev; + unsigned int num_tx_queues; + unsigned int q, start, port; + + /* We can't be setting up queue inspection for non directly attached + * switches + */ + if (info->switch_number) + return 0; + + if (dev->netdev_ops != &bcm_sysport_netdev_ops) + return 0; + + port = info->port_number; + slave_dev = info->info.dev; + + /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a + * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of + * per-port (slave_dev) network devices queue, we achieve just that. + * This need to happen now before any slave network device is used such + * it accurately reflects the number of real TX queues. + */ + if (priv->is_lite) + netif_set_real_num_tx_queues(slave_dev, + slave_dev->num_tx_queues / 2); + num_tx_queues = slave_dev->real_num_tx_queues; + + if (priv->per_port_num_tx_queues && + priv->per_port_num_tx_queues != num_tx_queues) + netdev_warn(slave_dev, "asymetric number of per-port queues\n"); + + priv->per_port_num_tx_queues = num_tx_queues; + + start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues); + for (q = 0; q < num_tx_queues; q++) { + ring = &priv->tx_rings[q + start]; + + /* Just remember the mapping actual programming done + * during bcm_sysport_init_tx_ring + */ + ring->switch_queue = q; + ring->switch_port = port; + ring->inspect = true; + priv->ring_map[q + port * num_tx_queues] = ring; + + /* Set all queues as being used now */ + set_bit(q + start, &priv->queue_bitmap); + } + + return 0; +} + +static int bcm_sysport_dsa_notifier(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct dsa_notifier_register_info *info; + + if (event != DSA_PORT_REGISTER) + return NOTIFY_DONE; + + info = ptr; + + return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); +} + #define REV_FMT "v%2x.%02x" static const struct bcm_sysport_hw_params bcm_sysport_params[] = { @@ -2098,10 +2282,20 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); + u64_stats_init(&priv->syncp); + + priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; + + ret = register_dsa_notifier(&priv->dsa_notifier); + if (ret) { + dev_err(&pdev->dev, "failed to register DSA notifier\n"); + goto err_deregister_fixed_link; + } + ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); - goto err_deregister_fixed_link; + goto err_deregister_notifier; } priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; @@ -2114,6 +2308,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) return 0; +err_deregister_notifier: + unregister_dsa_notifier(&priv->dsa_notifier); err_deregister_fixed_link: if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); @@ -2125,11 +2321,13 @@ err_free_netdev: static int bcm_sysport_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct bcm_sysport_priv *priv = netdev_priv(dev); struct device_node *dn = pdev->dev.of_node; /* Not much to do, ndo_close has been called * and we use managed allocations */ + unregister_dsa_notifier(&priv->dsa_notifier); unregister_netdev(dev); if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 77a51c167a69..f5a984c1c986 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -404,7 +404,7 @@ struct bcm_rsb { #define RING_CONS_INDEX_MASK 0xffff #define RING_MAPPING 0x14 -#define RING_QID_MASK 0x3 +#define RING_QID_MASK 0x7 #define RING_PORT_ID_SHIFT 3 #define RING_PORT_ID_MASK 0x7 #define RING_IGNORE_STATUS (1 << 6) @@ -449,7 +449,8 @@ struct bcm_rsb { /* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we * keep the SYSTEMPORT layout here and adjust with tdma_control_bit() */ -#define TSB_SWAP 2 +#define TSB_SWAP0 2 +#define TSB_SWAP1 3 #define ACB_ALGO 3 #define BUF_DATA_OFFSET_SHIFT 4 #define BUF_DATA_OFFSET_MASK 0x3ff @@ -603,6 +604,7 @@ struct bcm_sysport_mib { /* HW maintains a large list of counters */ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_NETDEV = -1, + BCM_SYSPORT_STAT_NETDEV64, BCM_SYSPORT_STAT_MIB_RX, BCM_SYSPORT_STAT_MIB_TX, BCM_SYSPORT_STAT_RUNT, @@ -619,6 +621,13 @@ enum bcm_sysport_stat_type { .type = BCM_SYSPORT_STAT_NETDEV, \ } +#define STAT_NETDEV64(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct bcm_sysport_stats64 *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_stats64, m), \ + .type = BCM_SYSPORT_STAT_NETDEV64, \ +} + #define STAT_MIB(str, m, _type) { \ .stat_string = str, \ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ @@ -659,6 +668,14 @@ struct bcm_sysport_stats { u16 reg_offset; }; +struct bcm_sysport_stats64 { + /* 64bit stats on 32bit/64bit Machine */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + /* Software house keeping helper structure */ struct bcm_sysport_cb { struct sk_buff *skb; /* SKB for RX packets */ @@ -695,6 +712,9 @@ struct bcm_sysport_tx_ring { struct bcm_sysport_priv *priv; /* private context backpointer */ unsigned long packets; /* packets statistics */ unsigned long bytes; /* bytes statistics */ + unsigned int switch_queue; /* switch port queue number */ + unsigned int switch_port; /* switch port queue number */ + bool inspect; /* inspect switch port and queue */ }; /* Driver private structure */ @@ -743,5 +763,17 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; + + struct bcm_sysport_stats64 stats64; + + /* For atomic update generic 64bit value on 32bit Machine */ + struct u64_stats_sync syncp; + + /* map information between switch port queues and local queues */ + struct notifier_block dsa_notifier; + unsigned int per_port_num_tx_queues; + unsigned long queue_bitmap; + struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8]; + }; #endif /* __BCM_SYSPORT_H */ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 6322594ab260..6fe074c1588b 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -184,13 +184,19 @@ static int bgmac_probe(struct bcma_device *core) if (!bgmac_is_bcm4707_family(core) && !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { + struct phy_device *phydev; + mii_bus = bcma_mdio_mii_register(bgmac); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); goto err; } - bgmac->mii_bus = mii_bus; + + phydev = mdiobus_get_phy(bgmac->mii_bus, bgmac->phyaddr); + if (ci->id == BCMA_CHIP_ID_BCM53573 && phydev && + (phydev->drv->phy_id & phydev->drv->phy_id_mask) == PHY_ID_BCM54210E) + phydev->dev_flags |= PHY_BRCM_EN_MASTER_MODE; } if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index d937083db9a4..894eda5b13cf 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -131,6 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev) switch (bgmac->net_dev->phydev->speed) { default: netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n"); + /* fall through */ case SPEED_1000: val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT; break; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 48d672b204a4..1d96cd594ade 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -15,6 +15,7 @@ #include <linux/bcm47xx_nvram.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <net/dsa.h> #include "bgmac.h" static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, @@ -127,6 +128,8 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, dma_desc->ctl1 = cpu_to_le32(ctl1); } +#define ENET_BRCM_TAG_LEN 4 + static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) @@ -139,6 +142,18 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, u32 flags; int i; + /* The Ethernet switch we are interfaced with needs packets to be at + * least 64 bytes (including FCS) otherwise they will be discarded when + * they enter the switch port logic. When Broadcom tags are enabled, we + * need to make sure that packets are at least 68 bytes + * (including FCS and tag) because the length verification is done after + * the Broadcom tag is stripped off the ingress packet. + */ + if (netdev_uses_dsa(net_dev)) { + if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) + goto err_stats; + } + if (skb->len > BGMAC_DESC_CTL1_LEN) { netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; @@ -225,6 +240,7 @@ err_dma_head: err_drop: dev_kfree_skb(skb); +err_stats: net_dev->stats.tx_dropped++; net_dev->stats.tx_errors++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 443d57b10264..4040d846da8e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BGMAC_H #define _BGMAC_H diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e3af1f3cb61f..7919f6112ecf 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6183,9 +6183,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp) } static void -bnx2_timer(unsigned long data) +bnx2_timer(struct timer_list *t) { - struct bnx2 *bp = (struct bnx2 *) data; + struct bnx2 *bp = from_timer(bp, t, timer); if (!netif_running(bp->dev)) return; @@ -8462,10 +8462,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bnx2_set_default_link(bp); bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; - init_timer(&bp->timer); + timer_setup(&bp->timer, bnx2_timer, 0); bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2_timer; #ifdef BCM_CNIC if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 67fe3d826566..4c739d5355d2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4284,15 +4284,17 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; + + if (type != TC_SETUP_QDISC_MQPRIO) + return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return bnx2x_setup_tc(dev, tc->mqprio->num_tc); + return bnx2x_setup_tc(dev, mqprio->num_tc); } /* called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c26688d2f326..a5265e1344f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,8 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c12b4d3e946e..91e2a7560b48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -5761,9 +5761,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp) bp->fw_drv_pulse_wr_seq); } -static void bnx2x_timer(unsigned long data) +static void bnx2x_timer(struct timer_list *t) { - struct bnx2x *bp = (struct bnx2x *) data; + struct bnx2x *bp = from_timer(bp, t, timer); if (!netif_running(bp->dev)) return; @@ -9332,7 +9332,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); - else + else if (bp->slowpath) bnx2x_set_storm_rx_mode(bp); /* Cleanup multicast configuration */ @@ -10271,8 +10271,15 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) smp_mb(); bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); - bnx2x_nic_load(bp, LOAD_NORMAL); - + /* When ret value shows failure of allocation failure, + * the nic is rebooted again. If open still fails, a error + * message to notify the user. + */ + if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + if (bnx2x_nic_load(bp, LOAD_NORMAL)) + BNX2X_ERR("Open the NIC fails again!\n"); + } rtnl_unlock(); return; } @@ -12414,10 +12421,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; - init_timer(&bp->timer); + timer_setup(&bp->timer, bnx2x_timer, 0); bp->timer.expires = jiffies + bp->current_interval; - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2x_timer; if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9ca994d0bab6..3591077a5f6b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1074,11 +1074,6 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) } } -static int bnx2x_ari_enabled(struct pci_dev *dev) -{ - return dev->bus->self && dev->bus->self->ari_enabled; -} - static int bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { @@ -1212,7 +1207,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, err = -EIO; /* verify ari is enabled */ - if (!bnx2x_ari_enabled(bp->pdev)) { + if (!pci_ari_enabled(bp->pdev->bus)) { BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index a7ca45b251cb..59c8ec9c1cad 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o +bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e7c8539cbddf..c5c38d4b7d1c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -33,6 +33,7 @@ #include <linux/mii.h> #include <linux/if.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/rtc.h> #include <linux/bpf.h> #include <net/ip.h> @@ -48,6 +49,8 @@ #include <linux/aer.h> #include <linux/bitmap.h> #include <linux/cpu_rmap.h> +#include <linux/cpumask.h> +#include <net/pkt_cls.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -56,6 +59,9 @@ #include "bnxt_ethtool.h" #include "bnxt_dcb.h" #include "bnxt_xdp.h" +#include "bnxt_vfr.h" +#include "bnxt_tc.h" +#include "bnxt_devlink.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -101,47 +107,56 @@ enum board_idx { BCM57416_NPAR, BCM57452, BCM57454, + BCM58802, + BCM58804, + BCM58808, NETXTREME_E_VF, NETXTREME_C_VF, + NETXTREME_S_VF, }; /* indexed by enum above */ static const struct { char *name; } board_info[] = { - { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, - { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, - { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, - { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, - { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, - { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, - { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, - { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, - { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, - { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, - { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, - { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, - { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, - { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, - { "Broadcom NetXtreme-E Ethernet Virtual Function" }, - { "Broadcom NetXtreme-C Ethernet Virtual Function" }, + [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, + [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, + [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, + [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, + [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, + [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, + [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, + [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, + [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, + [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, + [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, + [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, + [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, + [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, + [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, + [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, + [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, + [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, + [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, @@ -172,8 +187,10 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, - { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, + { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, + { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, @@ -183,6 +200,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } }; @@ -203,9 +221,12 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, }; +static struct workqueue_struct *bnxt_pf_wq; + static bool bnxt_vf_pciid(enum board_idx idx) { - return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); + return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || + idx == NETXTREME_S_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -243,6 +264,16 @@ const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_2048_AND_LARGER, }; +static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) + return 0; + + return md_dst->u.port_info.port_id; +} + static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -287,7 +318,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_buf->nr_frags = last_frag; vlan_tag_flags = 0; - cfa_action = 0; + cfa_action = bnxt_xmit_get_cfa_action(skb); if (skb_vlan_tag_present(skb)) { vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | skb_vlan_tag_get(skb); @@ -322,7 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_hsize_lflags = 0; tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + tx_push1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); end = pdata + length; end = PTR_ALIGN(end, 8) - 1; @@ -427,7 +459,8 @@ normal_tx: txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); for (i = 0; i < last_frag; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1001,12 +1034,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, return 0; } +static void bnxt_queue_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + queue_work(bnxt_pf_wq, &bp->sp_task); + else + schedule_work(&bp->sp_task); +} + +static void bnxt_cancel_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + flush_workqueue(bnxt_pf_wq); + else + cancel_work_sync(&bp->sp_task); +} + static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { if (!rxr->bnapi->in_reset) { rxr->bnapi->in_reset = true; set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } rxr->rx_next_cons = 0xffff; } @@ -1032,7 +1081,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, bnxt_sched_reset(bp, rxr); return; } - + /* Store cfa_code in tpa_info to use in tpa_end + * completion processing. + */ + tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); prod_rx_buf->data = tpa_info->data; prod_rx_buf->data_ptr = tpa_info->data_ptr; @@ -1267,6 +1319,17 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, return skb; } +/* Given the cfa_code of a received packet determine which + * netdev (vf-rep or PF) the packet is destined to. + */ +static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) +{ + struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); + + /* if vf-rep dev is NULL, the must belongs to the PF */ + return dev ? dev : bp->dev; +} + static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, @@ -1360,7 +1423,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } } - skb->protocol = eth_type_trans(skb, bp->dev); + + skb->protocol = + eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); @@ -1387,6 +1452,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return skb; } +static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, + struct sk_buff *skb) +{ + if (skb->dev != bp->dev) { + /* this packet belongs to a vf-rep */ + bnxt_vf_rep_rx(bp, skb); + return; + } + skb_record_rx_queue(skb, bnapi->index); + napi_gro_receive(&bnapi->napi, skb); +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1403,7 +1480,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, struct rx_cmp *rxcmp; struct rx_cmp_ext *rxcmp1; u32 tmp_raw_cons = *raw_cons; - u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; @@ -1440,13 +1517,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, (struct rx_tpa_end_cmp *)rxcmp, (struct rx_tpa_end_cmp_ext *)rxcmp1, event); - if (unlikely(IS_ERR(skb))) + if (IS_ERR(skb)) return -EBUSY; rc = -ENOMEM; if (likely(skb)) { - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; } *event |= BNXT_RX_EVENT; @@ -1535,7 +1611,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); } - skb->protocol = eth_type_trans(skb, dev); + cfa_code = RX_CMP_CFA_CODE(rxcmp1); + skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && @@ -1560,8 +1637,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } } - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; next_rx: @@ -1667,7 +1743,7 @@ static int bnxt_async_event_process(struct bnxt *bp, default: goto async_event_process_exit; } - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); async_event_process_exit: bnxt_ulp_async_events(bp, cmpl); return 0; @@ -1701,7 +1777,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); break; case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: @@ -1802,6 +1878,13 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) &event); if (likely(rc >= 0)) rx_pkts += rc; + /* Increment rx_pkts when rc is -ENOMEM to count towards + * the NAPI budget. Otherwise, we may potentially loop + * here forever if we consistently cannot allocate + * buffers. + */ + else if (rc == -ENOMEM) + rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; } else if (unlikely((TX_CMP_TYPE(txcmp) == @@ -2752,7 +2835,8 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) if (page_mode) { if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) return -EOPNOTSUPP; - bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; + bp->dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; bp->dev->hw_features &= ~NETIF_F_LRO; @@ -2760,7 +2844,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) bp->rx_dir = DMA_BIDIRECTIONAL; bp->rx_skb_func = bnxt_rx_page_skb; } else { - bp->dev->max_mtu = BNXT_MAX_MTU; + bp->dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; @@ -3391,6 +3475,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); } +int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, + int timeout) +{ + return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); +} + int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) { int rc; @@ -4420,22 +4510,73 @@ static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings) mutex_lock(&bp->hwrm_cmd_lock); rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings); mutex_unlock(&bp->hwrm_cmd_lock); + if (!rc) + bp->tx_reserved_rings = *tx_rings; return rc; } -static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, - u32 buf_tmrs, u16 flags, +static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10801) + return 0; + + if (BNXT_VF(bp)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS); + req.num_tx_rings = cpu_to_le16(tx_rings); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -ENOMEM; + return 0; +} + +static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { + u16 val, tmr, max, flags; + + max = hw_coal->bufs_per_record * 128; + if (hw_coal->budget) + max = hw_coal->bufs_per_record * hw_coal->budget; + + val = clamp_t(u16, hw_coal->coal_bufs, 1, max); + req->num_cmpl_aggr_int = cpu_to_le16(val); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + val = min_t(u16, val, 63); + req->num_cmpl_dma_aggr = cpu_to_le16(val); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); + req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); + + tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); + tmr = max_t(u16, tmr, 1); + req->int_lat_tmr_max = cpu_to_le16(tmr); + + /* min timer set to 1/2 of interrupt timer */ + val = tmr / 2; + req->int_lat_tmr_min = cpu_to_le16(val); + + /* buf timer set to 1/4 of interrupt timer */ + val = max_t(u16, tmr / 4, 1); + req->cmpl_aggr_dma_tmr = cpu_to_le16(val); + + tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); + tmr = max_t(u16, tmr, 1); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + + flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; req->flags = cpu_to_le16(flags); - req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); - req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); - req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); - /* Minimum time between 2 interrupts set to buf_tmr x 2 */ - req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); - req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); - req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); } int bnxt_hwrm_set_coal(struct bnxt *bp) @@ -4443,51 +4584,14 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) int i, rc = 0; struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, req_tx = {0}, *req; - u16 max_buf, max_buf_irq; - u16 buf_tmr, buf_tmr_irq; - u32 flags; bnxt_hwrm_cmd_hdr_init(bp, &req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - /* Each rx completion (2 records) should be DMAed immediately. - * DMA 1/4 of the completion buffers at a time. - */ - max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); - /* max_buf must not be zero */ - max_buf = clamp_t(u16, max_buf, 1, 63); - max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); - buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); - /* buf timer set to 1/4 of interrupt timer */ - buf_tmr = max_t(u16, buf_tmr / 4, 1); - buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); - buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); - - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - - /* RING_IDLE generates more IRQs for lower latency. Enable it only - * if coal_ticks is less than 25 us. - */ - if (bp->rx_coal_ticks < 25) - flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; - - bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, - buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); - - /* max_buf must not be zero */ - max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); - max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); - buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); - /* buf timer set to 1/4 of interrupt timer */ - buf_tmr = max_t(u16, buf_tmr / 4, 1); - buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); - buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); - - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, - buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); + bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); + bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { @@ -4577,6 +4681,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u16 flags; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); @@ -4593,15 +4698,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; } #endif - if (BNXT_PF(bp)) { - u16 flags = le16_to_cpu(resp->flags); - - if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | - FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) - bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; - if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST) - bp->flags |= BNXT_FLAG_MULTI_HOST; + flags = le16_to_cpu(resp->flags); + if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | + FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { + bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) + bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; } + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -4610,6 +4715,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->port_partition_type = resp->port_partition_type; break; } + if (bp->hwrm_spec_code < 0x10707 || + resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) + bp->br_mode = BRIDGE_MODE_VEB; + else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) + bp->br_mode = BRIDGE_MODE_VEPA; + else + bp->br_mode = BRIDGE_MODE_UNDEF; + + bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); + if (!bp->max_mtu) + bp->max_mtu = BNXT_MAX_MTU; func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -4647,7 +4763,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->port_id = le16_to_cpu(resp->port_id); bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); - memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); @@ -4687,16 +4802,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); - mutex_unlock(&bp->hwrm_cmd_lock); - - if (is_valid_ether_addr(vf->mac_addr)) { - /* overwrite netdev dev_adr with admin VF MAC */ - memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); - } else { - eth_hw_addr_random(bp->dev); - rc = bnxt_approve_mac(bp, bp->dev->dev_addr); - } - return rc; #endif } @@ -4782,9 +4887,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_upd); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", + snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, - resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); + resp->hwrm_fw_rsvd); bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); if (!bp->hwrm_cmd_timeout) @@ -4810,16 +4915,14 @@ hwrm_ver_get_exit: int bnxt_hwrm_fw_set_time(struct bnxt *bp) { -#if IS_ENABLED(CONFIG_RTC_LIB) struct hwrm_fw_set_time_input req = {0}; - struct rtc_time tm; - struct timeval tv; + struct tm tm; + time64_t now = ktime_get_real_seconds(); if (bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; - do_gettimeofday(&tv); - rtc_time_to_tm(tv.tv_sec, &tm); + time64_to_tm(now, 0, &tm); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); req.year = cpu_to_le16(1900 + tm.tm_year); req.month = 1 + tm.tm_mon; @@ -4828,9 +4931,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) req.minute = tm.tm_min; req.second = tm.tm_sec; return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -#else - return -EOPNOTSUPP; -#endif } static int bnxt_hwrm_port_qstats(struct bnxt *bp) @@ -4911,6 +5011,26 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, } } +static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + if (br_mode == BRIDGE_MODE_VEB) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + else if (br_mode == BRIDGE_MODE_VEPA) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + else + return -EINVAL; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -5046,6 +5166,15 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rc); goto err_out; } + if (bp->tx_reserved_rings != bp->tx_nr_rings) { + int tx = bp->tx_nr_rings; + + if (bnxt_hwrm_reserve_tx_rings(bp, &tx) || + tx < bp->tx_nr_rings) { + rc = -ENOMEM; + goto err_out; + } + } } rc = bnxt_hwrm_ring_alloc(bp); @@ -5452,8 +5581,15 @@ static void bnxt_free_irq(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { irq = &bp->irq_tbl[i]; - if (irq->requested) + if (irq->requested) { + if (irq->have_cpumask) { + irq_set_affinity_hint(irq->vector, NULL); + free_cpumask_var(irq->cpu_mask); + irq->have_cpumask = 0; + } free_irq(irq->vector, bp->bnapi[i]); + } + irq->requested = 0; } } @@ -5486,6 +5622,21 @@ static int bnxt_request_irq(struct bnxt *bp) break; irq->requested = 1; + + if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { + int numa_node = dev_to_node(&bp->pdev->dev); + + irq->have_cpumask = 1; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + irq->cpu_mask); + rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); + if (rc) { + netdev_warn(bp->dev, + "Set affinity failed, IRQ = %d\n", + irq->vector); + break; + } + } } return rc; } @@ -5559,12 +5710,10 @@ void bnxt_tx_disable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = BNXT_DEV_STATE_CLOSING; } } @@ -5577,11 +5726,9 @@ void bnxt_tx_enable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = 0; } netif_tx_wake_all_queues(bp->dev); @@ -5646,7 +5793,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (rc) goto hwrm_phy_qcaps_exit; - if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); @@ -5661,6 +5808,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); + bp->port_count = resp->port_cnt; + hwrm_phy_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -5686,13 +5835,15 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); link_info->phy_link_status = resp->link; - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; + if (bp->hwrm_spec_code >= 0x10800) + link_info->duplex = resp->duplex_state; link_info->pause = resp->pause; link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; link_info->lp_pause = resp->link_partner_adv_pause; link_info->force_pause_setting = resp->force_pause; - link_info->duplex_setting = resp->duplex; + link_info->duplex_setting = resp->duplex_cfg; if (link_info->phy_link_status == BNXT_LINK_LINK) link_info->link_speed = le16_to_cpu(resp->link_speed); else @@ -6198,7 +6349,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } if (link_re_init) { + mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); + mutex_unlock(&bp->link_lock); if (rc) netdev_warn(bp->dev, "failed to update phy settings\n"); } @@ -6214,6 +6367,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Poll link status and check for SFP+ module status */ bnxt_get_port_module_status(bp); + /* VF-reps may need to be re-opened after the PF is re-opened */ + if (BNXT_PF(bp)) + bnxt_vf_reps_open(bp); return 0; open_err: @@ -6302,6 +6458,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (rc) netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); } + + /* Close the VF-reps before closing PF */ + if (BNXT_PF(bp)) + bnxt_vf_reps_close(bp); #endif /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); @@ -6511,7 +6671,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) vnic->rx_mask = mask; set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } } @@ -6784,7 +6944,7 @@ static void bnxt_tx_timeout(struct net_device *dev) netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -6802,9 +6962,9 @@ static void bnxt_poll_controller(struct net_device *dev) } #endif -static void bnxt_timer(unsigned long data) +static void bnxt_timer(struct timer_list *t) { - struct bnxt *bp = (struct bnxt *)data; + struct bnxt *bp = from_timer(bp, t, timer); struct net_device *dev = bp->dev; if (!netif_running(dev)) @@ -6813,9 +6973,15 @@ static void bnxt_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && + bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); + } + + if (bnxt_tc_flower_enabled(bp)) { + set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); @@ -6888,30 +7054,32 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); - /* These functions below will clear BNXT_STATE_IN_SP_TASK. They - * must be the last functions to be called before exiting. - */ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { - int rc = 0; + int rc; + mutex_lock(&bp->link_lock); if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event)) bnxt_hwrm_phy_qcaps(bp); - bnxt_rtnl_lock_sp(bp); - if (test_bit(BNXT_STATE_OPEN, &bp->state)) - rc = bnxt_update_link(bp, true); - bnxt_rtnl_unlock_sp(bp); + rc = bnxt_update_link(bp, true); + mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); } if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { - bnxt_rtnl_lock_sp(bp); - if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_get_port_module_status(bp); - bnxt_rtnl_unlock_sp(bp); + mutex_lock(&bp->link_lock); + bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); } + + if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) + bnxt_tc_flow_stats_work(bp); + + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They + * must be the last functions to be called before exiting. + */ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, false); @@ -6923,8 +7091,8 @@ static void bnxt_sp_task(struct work_struct *work) } /* Under rtnl_lock */ -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, - int tx_xdp) +int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp) { int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; @@ -6944,10 +7112,7 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (max_tx < tx_rings_needed) return -ENOMEM; - if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || - tx_rings_needed < (tx * tx_sets + tx_xdp)) - return -ENOMEM; - return 0; + return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -6975,6 +7140,32 @@ static void bnxt_cleanup_pci(struct bnxt *bp) pci_disable_device(bp->pdev); } +static void bnxt_init_dflt_coal(struct bnxt *bp) +{ + struct bnxt_coal *coal; + + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + coal = &bp->rx_coal; + coal->coal_ticks = 14; + coal->coal_bufs = 30; + coal->coal_ticks_irq = 1; + coal->coal_bufs_irq = 2; + coal->idle_thresh = 25; + coal->bufs_per_record = 2; + coal->budget = 64; /* NAPI budget */ + + coal = &bp->tx_coal; + coal->coal_ticks = 28; + coal->coal_bufs = 30; + coal->coal_ticks_irq = 2; + coal->coal_bufs_irq = 2; + coal->bufs_per_record = 1; + + bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -7043,22 +7234,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; - /* tick values in micro seconds */ - bp->rx_coal_ticks = 12; - bp->rx_coal_bufs = 30; - bp->rx_coal_ticks_irq = 1; - bp->rx_coal_bufs_irq = 2; + bnxt_init_dflt_coal(bp); - bp->tx_coal_ticks = 25; - bp->tx_coal_bufs = 30; - bp->tx_coal_ticks_irq = 2; - bp->tx_coal_bufs_irq = 2; - - bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; - - init_timer(&bp->timer); - bp->timer.data = (unsigned long)bp; - bp->timer.function = bnxt_timer; + timer_setup(&bp->timer, bnxt_timer, 0); bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); @@ -7085,13 +7263,13 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + rc = bnxt_approve_mac(bp, addr->sa_data); if (rc) return rc; - if (ether_addr_equal(addr->sa_data, dev->dev_addr)) - return 0; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (netif_running(dev)) { bnxt_close_nic(bp, false, false); @@ -7136,8 +7314,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, - sh, tc, bp->tx_nr_rings_xdp); + rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + sh, tc, bp->tx_nr_rings_xdp); if (rc) return rc; @@ -7152,6 +7330,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) bp->tx_nr_rings = bp->tx_nr_rings_per_tc; netdev_reset_tc(dev); } + bp->tx_nr_rings += bp->tx_nr_rings_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings; @@ -7162,15 +7341,58 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { - if (ntc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct bnxt *bp = cb_priv; + + if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct bnxt *bp = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, + bp, bp); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return bnxt_setup_tc_block(dev, type_data); + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_qopt *mqprio = type_data; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); + return bnxt_setup_mq_tc(dev, mqprio->num_tc); + } + default: + return -EOPNOTSUPP; + } } #ifdef CONFIG_RFS_ACCEL @@ -7280,7 +7502,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_unlock_bh(&bp->ntp_fltr_lock); set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); return new_fltr->sw_id; @@ -7363,7 +7585,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, if (bp->vxlan_port_cnt == 1) { bp->vxlan_port = ti->port; set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } break; case UDP_TUNNEL_TYPE_GENEVE: @@ -7380,7 +7602,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, return; } - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); } static void bnxt_udp_tunnel_del(struct net_device *dev, @@ -7419,9 +7641,105 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, return; } - schedule_work(&bp->sp_task); + bnxt_queue_sp_work(bp); +} + +static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct bnxt *bp = netdev_priv(dev); + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, + nlflags, filter_mask, NULL); +} + +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem, rc = 0; + + if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode == bp->br_mode) + break; + + rc = bnxt_hwrm_set_br_mode(bp, mode); + if (!rc) + bp->br_mode = mode; + break; + } + return rc; +} + +static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + rc = snprintf(buf, len, "p%d", bp->pf.port_id); + + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +{ + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + /* In SRIOV each PF-pool (PF + child VFs) serves as a + * switching domain, the PF's perm mac-addr can be used + * as the unique parent-id + */ + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_swdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return bnxt_port_attr_get(netdev_priv(dev), attr); } +static const struct switchdev_ops bnxt_switchdev_ops = { + .switchdev_port_attr_get = bnxt_swdev_port_attr_get +}; + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -7452,7 +7770,10 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, - .ndo_xdp = bnxt_xdp, + .ndo_bpf = bnxt_xdp, + .ndo_bridge_getlink = bnxt_bridge_getlink, + .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_get_phys_port_name = bnxt_get_phys_port_name }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -7460,12 +7781,15 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bnxt_sriov_disable(bp); + bnxt_dl_unregister(bp); + } pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); - cancel_work_sync(&bp->sp_task); + bnxt_shutdown_tc(bp); + bnxt_cancel_sp_work(bp); bp->sp_event = 0; bnxt_clear_int_mode(bp); @@ -7493,6 +7817,7 @@ static int bnxt_probe_phy(struct bnxt *bp) rc); return rc; } + mutex_init(&bp->link_lock); rc = bnxt_update_link(bp, false); if (rc) { @@ -7633,6 +7958,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); + /* Reduce default rings to reduce memory usage on multi-port cards */ + if (bp->port_count > 1) + dflt_rings = min_t(int, dflt_rings, 4); rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; @@ -7661,12 +7989,34 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp) bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); } +static int bnxt_init_mac_addr(struct bnxt *bp) +{ + int rc = 0; + + if (BNXT_PF(bp)) { + memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + + if (is_valid_ether_addr(vf->mac_addr)) { + /* overwrite netdev dev_adr with admin VF MAC */ + memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + } else { + eth_hw_addr_random(bp->dev); + rc = bnxt_approve_mac(bp, bp->dev->dev_addr); + } +#endif + } + return rc; +} + static void bnxt_parse_log_pcie_link(struct bnxt *bp) { enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; - if (pcie_get_minimum_link(bp->pdev, &speed, &width) || + if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) || speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); else @@ -7710,6 +8060,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -7758,12 +8109,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_UNICAST_FLT; - /* MTU range: 60 - 9500 */ - dev->min_mtu = ETH_ZLEN; - dev->max_mtu = BNXT_MAX_MTU; - #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); + mutex_init(&bp->sriov_lock); #endif bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4_PLUS(bp)) @@ -7789,7 +8137,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = -1; goto init_err_pci_clean; } - + rc = bnxt_init_mac_addr(bp); + if (rc) { + dev_err(&pdev->dev, "Unable to initialize mac address.\n"); + rc = -EADDRNOTAVAIL; + goto init_err_pci_clean; + } rc = bnxt_hwrm_queue_qportcfg(bp); if (rc) { netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", @@ -7803,6 +8156,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_ethtool_init(bp); bnxt_dcb_init(bp); + /* MTU range: 60 - FW defined max */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = bp->max_mtu; + + rc = bnxt_probe_phy(bp); + if (rc) + goto init_err_pci_clean; + bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); @@ -7837,10 +8198,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; - rc = bnxt_probe_phy(bp); - if (rc) - goto init_err_pci_clean; - rc = bnxt_init_int_mode(bp); if (rc) goto init_err_pci_clean; @@ -7851,9 +8208,24 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else device_set_wakeup_capable(&pdev->dev, false); + if (BNXT_PF(bp)) { + if (!bnxt_pf_wq) { + bnxt_pf_wq = + create_singlethread_workqueue("bnxt_pf_wq"); + if (!bnxt_pf_wq) { + dev_err(&pdev->dev, "Unable to create workqueue.\n"); + goto init_err_pci_clean; + } + } + bnxt_init_tc(bp); + } + rc = register_netdev(dev); if (rc) - goto init_err_clr_int; + goto init_err_cleanup_tc; + + if (BNXT_PF(bp)) + bnxt_dl_register(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, @@ -7863,7 +8235,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -init_err_clr_int: +init_err_cleanup_tc: + bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); init_err_pci_clean: @@ -8081,4 +8454,17 @@ static struct pci_driver bnxt_pci_driver = { #endif }; -module_pci_driver(bnxt_pci_driver); +static int __init bnxt_init(void) +{ + return pci_register_driver(&bnxt_pci_driver); +} + +static void __exit bnxt_exit(void) +{ + pci_unregister_driver(&bnxt_pci_driver); + if (bnxt_pf_wq) + destroy_workqueue(bnxt_pf_wq); +} + +module_init(bnxt_init); +module_exit(bnxt_exit); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f34691f85602..5359a1f0045f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,13 +12,17 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.7.0" +#define DRV_MODULE_VERSION "1.8.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 7 +#define DRV_VER_MIN 8 #define DRV_VER_UPD 0 #include <linux/interrupt.h> +#include <linux/rhashtable.h> +#include <net/devlink.h> +#include <net/dst_metadata.h> +#include <net/switchdev.h> struct tx_bd { __le32 tx_bd_len_flags_type; @@ -242,6 +246,10 @@ struct rx_cmp_ext { ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) +#define RX_CMP_CFA_CODE(rxcmpl1) \ + ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT) + struct rx_agg_cmp { __le32 rx_agg_cmp_len_flags_type; #define RX_AGG_CMP_TYPE (0x3f << 0) @@ -311,6 +319,10 @@ struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_hdr_info; }; +#define TPA_START_CFA_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -618,6 +630,8 @@ struct bnxt_tpa_info { #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) + + u16 cfa_code; /* cfa_code in TPA start compl */ }; struct bnxt_rx_ring_info { @@ -688,8 +702,10 @@ struct bnxt_napi { struct bnxt_irq { irq_handler_t handler; unsigned int vector; - u8 requested; + u8 requested:1; + u8 have_cpumask:1; char name[IFNAMSIZ + 2]; + cpumask_var_t cpu_mask; }; #define HWRM_RING_ALLOC_TX 0x1 @@ -825,8 +841,8 @@ struct bnxt_link_info { u8 loop_back; u8 link_up; u8 duplex; -#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF -#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL u8 pause; #define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX @@ -928,6 +944,78 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 +struct bnxt_coal { + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + /* RING_IDLE enabled when coal ticks < idle_thresh */ + u16 idle_thresh; + u8 bufs_per_record; + u8 budget; +}; + +struct bnxt_tc_flow_stats { + u64 packets; + u64 bytes; +}; + +struct bnxt_tc_info { + bool enabled; + + /* hash table to store TC offloaded flows */ + struct rhashtable flow_table; + struct rhashtable_params flow_ht_params; + + /* hash table to store L2 keys of TC flows */ + struct rhashtable l2_table; + struct rhashtable_params l2_ht_params; + /* hash table to store L2 keys for TC tunnel decap */ + struct rhashtable decap_l2_table; + struct rhashtable_params decap_l2_ht_params; + /* hash table to store tunnel decap entries */ + struct rhashtable decap_table; + struct rhashtable_params decap_ht_params; + /* hash table to store tunnel encap entries */ + struct rhashtable encap_table; + struct rhashtable_params encap_ht_params; + + /* lock to atomically add/del an l2 node when a flow is + * added or deleted. + */ + struct mutex lock; + + /* Fields used for batching stats query */ + struct rhashtable_iter iter; +#define BNXT_FLOW_STATS_BATCH_MAX 10 + struct bnxt_tc_stats_batch { + void *flow_node; + struct bnxt_tc_flow_stats hw_stats; + } stats_batch[BNXT_FLOW_STATS_BATCH_MAX]; + + /* Stat counter mask (width) */ + u64 bytes_mask; + u64 packets_mask; +}; + +struct bnxt_vf_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct bnxt_vf_rep { + struct bnxt *bp; + struct net_device *dev; + struct metadata_dst *dst; + u16 vf_idx; + u16 tx_cfa_action; + u16 rx_cfa_code; + + struct bnxt_vf_rep_stats rx_stats; + struct bnxt_vf_rep_stats tx_stats; +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -957,6 +1045,10 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 +#define CHIP_NUM_58802 0xd802 +#define CHIP_NUM_58804 0xd804 +#define CHIP_NUM_58808 0xd808 + #define BNXT_CHIP_NUM_5730X(chip_num) \ ((chip_num) >= CHIP_NUM_57301 && \ (chip_num) <= CHIP_NUM_57304) @@ -988,6 +1080,11 @@ struct bnxt { #define BNXT_CHIP_NUM_57X1X(chip_num) \ (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) +#define BNXT_CHIP_NUM_588XX(chip_num) \ + ((chip_num) == CHIP_NUM_58802 || \ + (chip_num) == CHIP_NUM_58804 || \ + (chip_num) == CHIP_NUM_58808) + struct net_device *dev; struct pci_dev *pdev; @@ -1027,6 +1124,7 @@ struct bnxt { #define BNXT_FLAG_MULTI_HOST 0x100000 #define BNXT_FLAG_SHORT_CMD 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 + #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -1045,6 +1143,7 @@ struct bnxt { #define BNXT_CHIP_P4_PLUS(bp) \ (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \ BNXT_CHIP_NUM_5745X((bp)->chip_num) || \ + BNXT_CHIP_NUM_588XX((bp)->chip_num) || \ (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ !BNXT_CHIP_TYPE_NITRO_A0(bp))) @@ -1086,6 +1185,7 @@ struct bnxt { int tx_nr_rings; int tx_nr_rings_per_tc; int tx_nr_rings_xdp; + int tx_reserved_rings; int tx_wake_thresh; int tx_push_thresh; @@ -1105,6 +1205,7 @@ struct bnxt { int nr_vnics; u32 rss_hash_cfg; + u16 max_mtu; u8 max_tc; u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; @@ -1164,15 +1265,11 @@ struct bnxt { u8 nge_port_cnt; __le16 nge_fw_dst_port_id; u8 port_partition_type; + u8 port_count; + u16 br_mode; - u16 rx_coal_ticks; - u16 rx_coal_ticks_irq; - u16 rx_coal_bufs; - u16 rx_coal_bufs_irq; - u16 tx_coal_ticks; - u16 tx_coal_ticks_irq; - u16 tx_coal_bufs; - u16 tx_coal_bufs_irq; + struct bnxt_coal rx_coal; + struct bnxt_coal tx_coal; #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) @@ -1198,6 +1295,7 @@ struct bnxt { #define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 #define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 +#define BNXT_FLOW_STATS_SP_EVENT 15 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV @@ -1206,6 +1304,12 @@ struct bnxt { wait_queue_head_t sriov_cfg_wait; bool sriov_cfg; #define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) + + /* lock to protect VF-rep creation/cleanup via + * multiple paths such as ->sriov_configure() and + * devlink ->eswitch_mode_set() + */ + struct mutex sriov_lock; #endif #define BNXT_NTP_FLTR_MAX_FLTR 4096 @@ -1217,6 +1321,10 @@ struct bnxt { unsigned long *ntp_fltr_bmap; int ntp_fltr_count; + /* To protect link related settings during link changes and + * ethtool settings changes. + */ + struct mutex link_lock; struct bnxt_link_info link_info; struct ethtool_eee eee; u32 lpi_tmr_lo; @@ -1232,6 +1340,13 @@ struct bnxt { struct bnxt_led_info leds[BNXT_MAX_LED]; struct bpf_prog *xdp_prog; + + /* devlink interface and vf-rep structs */ + struct devlink *dl; + enum devlink_eswitch_mode eswitch_mode; + struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ + u16 *cfa_code_map; /* cfa_code -> vf_idx map */ + struct bnxt_tc_info *tc_info; }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1278,6 +1393,7 @@ void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); +int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); int hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, @@ -1301,9 +1417,10 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, - int tx_xdp); +int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 5c6dd0ce209f..fed37cd9ae1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; int i, j; @@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) } } } + mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -93,6 +96,12 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cos2bw.tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS; cos2bw.bw_weight = ets->tc_tx_bw[i]; + /* older firmware requires min_bw to be set to the + * same weight value in percent. + */ + cos2bw.min_bw = + cpu_to_le32((ets->tc_tx_bw[i] * 100) | + BW_VALUE_UNIT_PERCENT1_100); } memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (i == 0) { @@ -113,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) int rc, i; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); return rc; + } data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { @@ -137,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) } } } + mutex_unlock(&bp->hwrm_cmd_lock); return 0; } @@ -234,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); return rc; + } pri_mask = le32_to_cpu(resp->flags); pfc->pfc_en = pri_mask; + mutex_unlock(&bp->hwrm_cmd_lock); return 0; } @@ -549,13 +568,18 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct bnxt *bp = netdev_priv(dev); - /* only support IEEE */ - if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) + /* All firmware DCBX settings are set in NVRAM */ + if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return 1; if (mode & DCB_CAP_DCBX_HOST) { if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) return 1; + + /* only support IEEE */ + if ((mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE)) + return 1; } if (mode == bp->dcbx_cap) @@ -584,7 +608,7 @@ void bnxt_dcb_init(struct bnxt *bp) bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; - else + else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT) bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; bp->dev->dcbnl_ops = &dcbnl_ops; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index ecd0a5e46a49..d2e0af960bf5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -26,6 +26,7 @@ struct bnxt_cos2bw_cfg { u8 queue_id; __le32 min_bw; __le32 max_bw; +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 tsa; u8 pri_lvl; u8 bw_weight; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c new file mode 100644 index 000000000000..402fa32f7a88 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -0,0 +1,65 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_devlink.h" + +static const struct devlink_ops bnxt_dl_ops = { +#ifdef CONFIG_BNXT_SRIOV + .eswitch_mode_set = bnxt_dl_eswitch_mode_set, + .eswitch_mode_get = bnxt_dl_eswitch_mode_get, +#endif /* CONFIG_BNXT_SRIOV */ +}; + +int bnxt_dl_register(struct bnxt *bp) +{ + struct devlink *dl; + int rc; + + if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + if (bp->hwrm_spec_code < 0x10803) { + netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + return -ENOTSUPP; + } + + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (!dl) { + netdev_warn(bp->dev, "devlink_alloc failed"); + return -ENOMEM; + } + + bnxt_link_bp_to_dl(bp, dl); + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); + if (rc) { + bnxt_link_bp_to_dl(bp, NULL); + devlink_free(dl); + netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + return rc; + } + + return 0; +} + +void bnxt_dl_unregister(struct bnxt *bp) +{ + struct devlink *dl = bp->dl; + + if (!dl) + return; + + devlink_unregister(dl); + devlink_free(dl); +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h new file mode 100644 index 000000000000..e92a35d8b642 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -0,0 +1,39 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_DEVLINK_H +#define BNXT_DEVLINK_H + +/* Struct to hold housekeeping info needed by devlink interface */ +struct bnxt_dl { + struct bnxt *bp; /* back ptr to the controlling dev */ +}; + +static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->bp; +} + +/* To clear devlink pointer from bp, pass NULL dl */ +static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) +{ + bp->dl = dl; + + /* add a back pointer in dl to bp */ + if (dl) { + struct bnxt_dl *bp_dl = devlink_priv(dl); + + bp_dl->bp = bp; + } +} + +int bnxt_dl_register(struct bnxt *bp); +void bnxt_dl_unregister(struct bnxt *bp); + +#endif /* BNXT_DEVLINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index be6acadcb202..7ce1d4b7e67d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -26,8 +26,6 @@ #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) -static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen); - static u32 bnxt_get_msglevel(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -46,19 +44,24 @@ static int bnxt_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct bnxt *bp = netdev_priv(dev); + struct bnxt_coal *hw_coal; + u16 mult; memset(coal, 0, sizeof(*coal)); - coal->rx_coalesce_usecs = bp->rx_coal_ticks; - /* 2 completion records per rx packet */ - coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2; - coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq; - coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2; + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + coal->rx_coalesce_usecs = hw_coal->coal_ticks; + coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; - coal->tx_coalesce_usecs = bp->tx_coal_ticks; - coal->tx_max_coalesced_frames = bp->tx_coal_bufs; - coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; - coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; + hw_coal = &bp->tx_coal; + mult = hw_coal->bufs_per_record; + coal->tx_coalesce_usecs = hw_coal->coal_ticks; + coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; @@ -70,25 +73,32 @@ static int bnxt_set_coalesce(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); bool update_stats = false; + struct bnxt_coal *hw_coal; int rc = 0; - - bp->rx_coal_ticks = coal->rx_coalesce_usecs; - /* 2 completion records per rx packet */ - bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2; - bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq; - bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; - - bp->tx_coal_ticks = coal->tx_coalesce_usecs; - bp->tx_coal_bufs = coal->tx_max_coalesced_frames; - bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; - bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; + u16 mult; + + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + hw_coal->coal_ticks = coal->rx_coalesce_usecs; + hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; + hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; + hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; + + hw_coal = &bp->tx_coal; + mult = hw_coal->bufs_per_record; + hw_coal->coal_ticks = coal->tx_coalesce_usecs; + hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; + hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; + hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { u32 stats_ticks = coal->stats_block_coalesce_usecs; - stats_ticks = clamp_t(u32, stats_ticks, - BNXT_MIN_STATS_COAL_TICKS, - BNXT_MAX_STATS_COAL_TICKS); + /* Allow 0, which means disable. */ + if (stats_ticks) + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); bp->stats_coal_ticks = stats_ticks; update_stats = true; @@ -198,19 +208,23 @@ static const struct { #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +static int bnxt_get_num_stats(struct bnxt *bp) +{ + int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + return num_stats; +} + static int bnxt_get_sset_count(struct net_device *dev, int sset) { struct bnxt *bp = netdev_priv(dev); switch (sset) { - case ETH_SS_STATS: { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; - - if (bp->flags & BNXT_FLAG_PORT_STATS) - num_stats += BNXT_NUM_PORT_STATS; - - return num_stats; - } + case ETH_SS_STATS: + return bnxt_get_num_stats(bp); case ETH_SS_TEST: if (!bp->num_tests) return -EOPNOTSUPP; @@ -225,11 +239,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; - memset(buf, 0, buf_size); - if (!bp->bnapi) return; @@ -432,8 +443,7 @@ static int bnxt_set_channels(struct net_device *dev, } tx_xdp = req_rx_rings; } - rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, - tx_xdp); + rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); if (rc) { netdev_warn(dev, "Unable to allocate the requested rings\n"); return rc; @@ -520,7 +530,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) struct flow_keys *fkeys; int i, rc = -EINVAL; - if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) return rc; for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { @@ -820,28 +830,17 @@ static void bnxt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnxt *bp = netdev_priv(dev); - char *pkglog; - char *pkgver = NULL; - pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); - if (pkglog) - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) - snprintf(info->fw_version, sizeof(info->fw_version) - 1, - "%s pkg %s", bp->fw_ver_str, pkgver); - else - strlcpy(info->fw_version, bp->fw_ver_str, - sizeof(info->fw_version)); + strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + info->n_stats = bnxt_get_num_stats(bp); info->testinfo_len = bp->num_tests; /* TODO CHIMP_FW: eeprom dump details */ info->eedump_len = 0; /* TODO CHIMP FW: reg dump details */ info->regdump_len = 0; - kfree(pkglog); } static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -1050,6 +1049,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, u32 ethtool_speed; ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + mutex_lock(&bp->link_lock); bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); @@ -1097,6 +1097,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, base->port = PORT_FIBRE; } base->phy_address = link_info->phy_addr; + mutex_unlock(&bp->link_lock); return 0; } @@ -1188,6 +1189,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, if (!BNXT_SINGLE_PF(bp)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); if (base->autoneg == AUTONEG_ENABLE) { BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, advertising); @@ -1232,6 +1234,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); set_setting_exit: + mutex_unlock(&bp->link_lock); return rc; } @@ -1344,7 +1347,6 @@ static int bnxt_firmware_reset(struct net_device *dev, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); - /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* (e.g. when firmware isn't already running) */ switch (dir_type) { @@ -1370,6 +1372,10 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNX_DIR_TYPE_BONO_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; break; + case BNXT_FW_RESET_CHIP: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + break; default: return -EINVAL; } @@ -1767,6 +1773,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, dma_addr_t dma_handle; struct hwrm_nvm_read_input req = {0}; + if (!length) + return -EINVAL; + buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, GFP_KERNEL); if (!buf) { @@ -1803,7 +1812,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, req.dir_ordinal = cpu_to_le16(ordinal); req.dir_ext = cpu_to_le16(ext); req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); @@ -1812,6 +1822,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, if (data_length) *data_length = le32_to_cpu(output->dir_data_length); } + mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -2487,13 +2498,59 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } } +static int bnxt_reset(struct net_device *dev, u32 *flags) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + if (!BNXT_PF(bp)) { + netdev_err(dev, "Reset is not supported from a VF\n"); + return -EOPNOTSUPP; + } + + if (pci_vfs_assigned(bp->pdev)) { + netdev_err(dev, + "Reset not allowed when VFs are assigned to VMs\n"); + return -EBUSY; + } + + if (*flags == ETH_RESET_ALL) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code < 0x10803) + return -EOPNOTSUPP; + + rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); + if (!rc) + netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + } else { + rc = -EINVAL; + } + + return rc; +} + void bnxt_ethtool_init(struct bnxt *bp) { struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_selftest_qlist_input req = {0}; struct bnxt_test_info *test_info; + struct net_device *dev = bp->dev; + char *pkglog; int i, rc; + pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); + if (pkglog) { + char *pkgver; + int len; + + pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { + len = strlen(bp->fw_ver_str); + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + "/pkg %s", pkgver); + } + kfree(pkglog); + } if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; @@ -2584,4 +2641,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { .nway_reset = bnxt_nway_reset, .set_phys_id = bnxt_set_phys_id, .self_test = bnxt_self_test, + .reset = bnxt_reset, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index f1bc90b6fb5b..ff601b42fcc8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -34,6 +34,8 @@ struct bnxt_led_cfg { #define BNXT_LED_DFLT_ENABLES(x) \ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) +#define BNXT_FW_RESET_CHIP 0xffff + extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 7dc71bb95837..c99f4d0880e4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,21 +11,21 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.7.6 */ +/* HSI and HWRM Specification 1.8.3 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 6 +#define HWRM_VERSION_MINOR 8 +#define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */ +#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */ -#define HWRM_VERSION_STR "1.7.6.2" +#define HWRM_VERSION_STR "1.8.3.1" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. */ #define HWRM_NA_SIGNATURE ((__le32)(-1)) #define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */ +#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */ #define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ @@ -111,6 +111,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL __le32 event_data2; u8 opaque_v; @@ -813,7 +814,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL - #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -835,9 +836,7 @@ struct hwrm_func_qcfg_output { u8 port_pf_cnt; #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL __le16 dflt_vnic_id; - u8 host_cnt; - #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL - u8 unused_0; + __le16 max_mtu_configured; __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 @@ -874,12 +873,56 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_1; + u8 unused_0; __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; + u8 unused_1; + u8 valid; +}; + +/* hwrm_func_vlan_cfg */ +/* Input (48 bytes) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0; + u8 unused_1; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; + u8 unused_2; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_3; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + __le32 unused_4; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; u8 unused_2; + u8 unused_3; u8 valid; }; @@ -902,6 +945,8 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1456,9 +1501,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL - u8 duplex; - #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL - #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL u8 pause; #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL @@ -1573,6 +1618,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -1651,14 +1699,16 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL u8 unused_1; - u8 unused_2; char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - __le32 unused_3; + __le32 unused_2; + u8 unused_3; u8 unused_4; u8 unused_5; - u8 unused_6; u8 valid; }; @@ -1744,6 +1794,51 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; +/* hwrm_port_mac_ptp_qcfg */ +/* Input (24 bytes) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (80 bytes) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0; + __le16 unused_1; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_port_qstats */ /* Input (40 bytes) */ struct hwrm_port_qstats_input { @@ -1874,11 +1969,16 @@ struct hwrm_port_phy_qcaps_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - u8 eee_supported; - #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL - #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1 - u8 unused_0; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL __le16 supported_speeds_force_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL @@ -3152,6 +3252,95 @@ struct hwrm_queue_cos2bw_cfg_output { u8 valid; }; +/* hwrm_queue_dscp_qcaps */ +/* Input (24 bytes) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg */ +/* Input (32 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_vnic_alloc */ /* Input (24 bytes) */ struct hwrm_vnic_alloc_input { @@ -3218,6 +3407,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -3274,6 +3464,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL __le32 unused_2; u8 unused_3; u8 unused_4; @@ -3805,6 +3996,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL u8 unused_7; __le16 dst_id; @@ -3933,6 +4125,14 @@ struct hwrm_cfa_l2_set_rx_mask_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL + u8 unused_0[7]; +}; + /* hwrm_cfa_tunnel_filter_alloc */ /* Input (88 bytes) */ struct hwrm_cfa_tunnel_filter_alloc_input { @@ -3972,6 +4172,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input { #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL u8 unused_0; __le32 vni; @@ -4038,7 +4239,7 @@ struct hwrm_cfa_encap_record_alloc_input { #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL u8 unused_0; __le16 unused_1; - __le32 encap_data[16]; + __le32 encap_data[20]; }; /* Output (16 bytes) */ @@ -4120,8 +4321,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL u8 ip_protocol; #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; @@ -4134,6 +4335,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL u8 pri_hint; #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL @@ -4166,6 +4368,14 @@ struct hwrm_cfa_ntuple_filter_alloc_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL + u8 unused_0[7]; +}; + /* hwrm_cfa_ntuple_filter_free */ /* Input (24 bytes) */ struct hwrm_cfa_ntuple_filter_free_input { @@ -4224,6 +4434,326 @@ struct hwrm_cfa_ntuple_filter_cfg_output { u8 valid; }; +/* hwrm_cfa_decap_filter_alloc */ +/* Input (104 bytes) */ +struct hwrm_cfa_decap_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2; + u8 unused_3; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + u8 unused_4; + u8 unused_5; + u8 unused_6[3]; + u8 unused_7; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_decap_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_decap_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_decap_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc */ +/* Input (128 bytes) */ +struct hwrm_cfa_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_cfa_flow_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + __le16 unused_0[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_cfa_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_flow_stats */ +/* Input (40 bytes) */ +struct hwrm_cfa_flow_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + __le16 unused_0; +}; + +/* Output (176 bytes) */ +struct hwrm_cfa_flow_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc */ +/* Input (32 bytes) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + __le32 unused_0; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_vfr_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_tunnel_dst_port_query */ /* Input (24 bytes) */ struct hwrm_tunnel_dst_port_query_input { @@ -4235,6 +4765,7 @@ struct hwrm_tunnel_dst_port_query_input { u8 tunnel_type; #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL u8 unused_0[7]; }; @@ -4263,9 +4794,10 @@ struct hwrm_tunnel_dst_port_alloc_input { u8 tunnel_type; #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL u8 unused_0; __be16 tunnel_dst_port_val; - __le32 unused_1; + __be32 unused_1; }; /* Output (16 bytes) */ @@ -4294,6 +4826,7 @@ struct hwrm_tunnel_dst_port_free_input { u8 tunnel_type; #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL u8 unused_0; __le16 tunnel_dst_port_id; __le32 unused_1; @@ -4448,12 +4981,15 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - __le16 unused_0[3]; + u8 host_idx; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4487,7 +5023,9 @@ struct hwrm_fw_qstatus_input { #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL u8 unused_0[7]; }; @@ -4572,6 +5110,16 @@ struct hwrm_fw_set_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_fw_get_structured_data */ /* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { @@ -4611,6 +5159,14 @@ struct hwrm_fw_get_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_exec_fwd_resp */ /* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { @@ -4906,6 +5462,32 @@ struct hwrm_wol_reason_qcfg_output { u8 valid; }; +/* hwrm_dbg_read_direct */ +/* Input (32 bytes) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* Output (16 bytes) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_nvm_read */ /* Input (40 bytes) */ struct hwrm_nvm_read_input { @@ -5258,6 +5840,105 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +/* hwrm_nvm_get_variable */ +/* Input (40 bytes) */ +struct hwrm_nvm_get_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_get_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_get_variable_cmd_err { + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + u8 unused_0[7]; +}; + +/* hwrm_nvm_set_variable */ +/* Input (40 bytes) */ +struct hwrm_nvm_set_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_set_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_set_variable_cmd_err { + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + u8 unused_0[7]; +}; + /* hwrm_selftest_qlist */ /* Input (16 bytes) */ struct hwrm_selftest_qlist_input { @@ -5268,7 +5949,7 @@ struct hwrm_selftest_qlist_input { __le64 resp_addr; }; -/* Output (248 bytes) */ +/* Output (280 bytes) */ struct hwrm_selftest_qlist_output { __le16 error_code; __le16 req_type; @@ -5280,11 +5961,15 @@ struct hwrm_selftest_qlist_output { #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL u8 offline_tests; #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL u8 unused_0; __le16 test_timeout; u8 unused_1; @@ -5297,6 +5982,11 @@ struct hwrm_selftest_qlist_output { char test5_name[32]; char test6_name[32]; char test7_name[32]; + __le32 unused_3; + u8 unused_4; + u8 unused_5; + u8 unused_6; + u8 valid; }; /* hwrm_selftest_exec */ @@ -5312,6 +6002,8 @@ struct hwrm_selftest_exec_input { #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL u8 unused_0[7]; }; @@ -5326,12 +6018,21 @@ struct hwrm_selftest_exec_output { #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL u8 test_success; #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL - __le16 unused_0[3]; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; }; /* hwrm_selftest_irq */ @@ -5344,12 +6045,50 @@ struct hwrm_selftest_irq_input { __le64 resp_addr; }; -/* Output (8 bytes) */ +/* Output (16 bytes) */ struct hwrm_selftest_irq_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_selftest_retrieve_serdes_data */ +/* Input (32 bytes) */ +struct hwrm_selftest_retrieve_serdes_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le32 resp_data_offset; + __le16 data_len; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_selftest_retrieve_serdes_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 copied_data_len; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; }; /* Hardware Resource Manager Specification */ @@ -5411,7 +6150,7 @@ struct cmd_nums { #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) #define HWRM_PORT_PHY_QCFG (0x27UL) #define HWRM_PORT_MAC_QCFG (0x28UL) - #define RESERVED7 (0x29UL) + #define HWRM_PORT_MAC_PTP_QCFG (0x29UL) #define HWRM_PORT_PHY_QCAPS (0x2aUL) #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) #define HWRM_PORT_PHY_I2C_READ (0x2cUL) @@ -5421,14 +6160,17 @@ struct cmd_nums { #define HWRM_QUEUE_QPORTCFG (0x30UL) #define HWRM_QUEUE_QCFG (0x31UL) #define HWRM_QUEUE_CFG (0x32UL) - #define RESERVED2 (0x33UL) - #define RESERVED3 (0x34UL) + #define HWRM_FUNC_VLAN_CFG (0x33UL) + #define HWRM_FUNC_VLAN_QCFG (0x34UL) #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) + #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL) + #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL) + #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL) #define HWRM_VNIC_ALLOC (0x40UL) #define HWRM_VNIC_FREE (0x41UL) #define HWRM_VNIC_CFG (0x42UL) @@ -5455,7 +6197,7 @@ struct cmd_nums { #define HWRM_CFA_L2_FILTER_FREE (0x91UL) #define HWRM_CFA_L2_FILTER_CFG (0x92UL) #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define RESERVED4 (0x94UL) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL) #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) @@ -5494,6 +6236,8 @@ struct cmd_nums { #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) + #define HWRM_CFA_VFR_ALLOC (0xfdUL) + #define HWRM_CFA_VFR_FREE (0xfeUL) #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) #define HWRM_CFA_VF_PAIR_FREE (0x101UL) #define HWRM_CFA_VF_PAIR_INFO (0x102UL) @@ -5502,14 +6246,29 @@ struct cmd_nums { #define HWRM_CFA_FLOW_FLUSH (0x105UL) #define HWRM_CFA_FLOW_STATS (0x106UL) #define HWRM_CFA_FLOW_INFO (0x107UL) + #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) + #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL) + #define HWRM_CFA_PAIR_ALLOC (0x10dUL) + #define HWRM_CFA_PAIR_FREE (0x10eUL) + #define HWRM_CFA_PAIR_INFO (0x10fUL) + #define HWRM_FW_IPC_MSG (0x110UL) #define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_IRQ (0x202UL) + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_DBG_ERASE_NVM (0xff15UL) + #define HWRM_DBG_CFG (0xff16UL) + #define HWRM_DBG_COREDUMP_LIST (0xff17UL) + #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL) + #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL) #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_FLUSH (0xfff0UL) @@ -5684,6 +6443,58 @@ struct rx_port_stats { __le64 rx_stat_err; }; +/* VXLAN IPv4 encapsulation structure (16 bytes) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* VXLAN IPv6 encapsulation structure (32 bytes) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* VXLAN encapsulation structure (72 bytes) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + __be16 src_port; + __be16 dst_port; + __be32 vni; +}; + /* Periodic Statistics Context DMA to host (160 bytes) */ struct ctx_hw_stats { __le64 rx_ucast_pkts; @@ -5720,6 +6531,7 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL __le16 len; u8 version; u8 count; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index b8e7248294d9..5ee18660bc33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -18,6 +18,7 @@ #include "bnxt.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" +#include "bnxt_vfr.h" #include "bnxt_ethtool.h" #ifdef CONFIG_BNXT_SRIOV @@ -501,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) int rc = 0, vfs_supported; int min_rx_rings, min_tx_rings, min_rss_ctxs; int tx_ok = 0, rx_ok = 0, rss_ok = 0; + int avail_cp, avail_stat; /* Check if we can enable requested num of vf's. At a mininum * we require 1 RX 1 TX rings for each VF. In this minimum conf @@ -508,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) */ vfs_supported = *num_vfs; + avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings; + avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs; + avail_cp = min_t(int, avail_cp, avail_stat); + while (vfs_supported) { min_rx_rings = vfs_supported; min_tx_rings = vfs_supported; @@ -522,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) min_rx_rings) rx_ok = 1; } - if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) + if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings || + avail_cp < min_rx_rings) rx_ok = 0; - if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) + if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings && + avail_cp >= min_tx_rings) tx_ok = 1; if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) @@ -587,6 +595,10 @@ void bnxt_sriov_disable(struct bnxt *bp) if (!num_vfs) return; + /* synchronize VF and VF-rep create and destroy */ + mutex_lock(&bp->sriov_lock); + bnxt_vf_reps_destroy(bp); + if (pci_vfs_assigned(bp->pdev)) { bnxt_hwrm_fwd_async_event_cmpl( bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); @@ -597,6 +609,7 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } + mutex_unlock(&bp->sriov_lock); bnxt_free_vf_resources(bp); @@ -794,8 +807,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) PORT_PHY_QCFG_RESP_LINK_LINK; phy_qcfg_resp.link_speed = cpu_to_le16( PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); - phy_qcfg_resp.duplex = - PORT_PHY_QCFG_RESP_DUPLEX_FULL; + phy_qcfg_resp.duplex_cfg = + PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; phy_qcfg_resp.pause = (PORT_PHY_QCFG_RESP_PAUSE_TX | PORT_PHY_QCFG_RESP_PAUSE_RX); @@ -804,7 +819,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) /* force link down */ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; phy_qcfg_resp.link_speed = 0; - phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; phy_qcfg_resp.pause = 0; } rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c new file mode 100644 index 000000000000..d5031f436f83 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -0,0 +1,1616 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/if_vlan.h> +#include <net/flow_dissector.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_skbedit.h> +#include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_vlan.h> +#include <net/tc_act/tc_tunnel_key.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_sriov.h" +#include "bnxt_tc.h" +#include "bnxt_vfr.h" + +#define BNXT_FID_INVALID 0xffff +#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) + +/* Return the dst fid of the func for flow forwarding + * For PFs: src_fid is the fid of the PF + * For VF-reps: src_fid the fid of the VF + */ +static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) +{ + struct bnxt *bp; + + /* check if dev belongs to the same switch */ + if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) { + netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", + dev->ifindex); + return BNXT_FID_INVALID; + } + + /* Is dev a VF-rep? */ + if (dev != pf_bp->dev) + return bnxt_vf_rep_get_fid(dev); + + bp = netdev_priv(dev); + return bp->pf.fw_fid; +} + +static int bnxt_tc_parse_redir(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + int ifindex = tcf_mirred_ifindex(tc_act); + struct net_device *dev; + u16 dst_fid; + + dev = __dev_get_by_index(dev_net(bp->dev), ifindex); + if (!dev) { + netdev_info(bp->dev, "no dev for ifindex=%d", ifindex); + return -EINVAL; + } + + /* find the FID from dev */ + dst_fid = bnxt_flow_get_dst_fid(bp, dev); + if (dst_fid == BNXT_FID_INVALID) { + netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex); + return -EINVAL; + } + + actions->flags |= BNXT_TC_ACTION_FLAG_FWD; + actions->dst_fid = dst_fid; + actions->dst_dev = dev; + return 0; +} + +static void bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { + actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; + } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { + actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; + actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); + actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + } +} + +static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); + struct ip_tunnel_key *tun_key = &tun_info->key; + + if (ip_tunnel_info_af(tun_info) != AF_INET) { + netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); + return -EOPNOTSUPP; + } + + actions->tun_encap_key = *tun_key; + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP; + return 0; +} + +static int bnxt_tc_parse_actions(struct bnxt *bp, + struct bnxt_tc_actions *actions, + struct tcf_exts *tc_exts) +{ + const struct tc_action *tc_act; + LIST_HEAD(tc_actions); + int rc; + + if (!tcf_exts_has_actions(tc_exts)) { + netdev_info(bp->dev, "no actions"); + return -EINVAL; + } + + tcf_exts_to_list(tc_exts, &tc_actions); + list_for_each_entry(tc_act, &tc_actions, list) { + /* Drop action */ + if (is_tcf_gact_shot(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_DROP; + return 0; /* don't bother with other actions */ + } + + /* Redirect action */ + if (is_tcf_mirred_egress_redirect(tc_act)) { + rc = bnxt_tc_parse_redir(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Push/pop VLAN */ + if (is_tcf_vlan(tc_act)) { + bnxt_tc_parse_vlan(bp, actions, tc_act); + continue; + } + + /* Tunnel encap */ + if (is_tcf_tunnel_set(tc_act)) { + rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Tunnel decap */ + if (is_tcf_tunnel_release(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; + continue; + } + } + + if (rc) + return rc; + + /* Tunnel encap/decap action must be accompanied by a redirect action */ + if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP || + actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) && + !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) { + netdev_info(bp->dev, + "error: no redir action along with encap/decap"); + return -EINVAL; + } + + return rc; +} + +#define GET_KEY(flow_cmd, key_type) \ + skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ + (flow_cmd)->key) +#define GET_MASK(flow_cmd, key_type) \ + skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ + (flow_cmd)->mask) + +static int bnxt_tc_parse_flow(struct bnxt *bp, + struct tc_cls_flower_offload *tc_flow_cmd, + struct bnxt_tc_flow *flow) +{ + struct flow_dissector *dissector = tc_flow_cmd->dissector; + u16 addr_type = 0; + + /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ + if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", + dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); + + addr_type = key->addr_type; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + struct flow_dissector_key_basic *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + + flow->l2_key.ether_type = key->n_proto; + flow->l2_mask.ether_type = mask->n_proto; + + if (key->n_proto == htons(ETH_P_IP) || + key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = key->ip_proto; + flow->l4_mask.ip_proto = mask->ip_proto; + } + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + struct flow_dissector_key_eth_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; + ether_addr_copy(flow->l2_key.dmac, key->dst); + ether_addr_copy(flow->l2_mask.dmac, mask->dst); + ether_addr_copy(flow->l2_key.smac, key->src); + ether_addr_copy(flow->l2_mask.smac, mask->src); + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + struct flow_dissector_key_vlan *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + + flow->l2_key.inner_vlan_tci = + cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); + flow->l2_mask.inner_vlan_tci = + cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); + flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); + flow->l2_mask.inner_vlan_tpid = htons(0xffff); + flow->l2_key.num_vlans = 1; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + struct flow_dissector_key_ipv4_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; + flow->l3_key.ipv4.daddr.s_addr = key->dst; + flow->l3_mask.ipv4.daddr.s_addr = mask->dst; + flow->l3_key.ipv4.saddr.s_addr = key->src; + flow->l3_mask.ipv4.saddr.s_addr = mask->src; + } else if (dissector_uses_key(dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_dissector_key_ipv6_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); + struct flow_dissector_key_ipv6_addrs *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; + flow->l3_key.ipv6.daddr = key->dst; + flow->l3_mask.ipv6.daddr = mask->dst; + flow->l3_key.ipv6.saddr = key->src; + flow->l3_mask.ipv6.saddr = mask->src; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + struct flow_dissector_key_ports *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; + flow->l4_key.ports.dport = key->dst; + flow->l4_mask.ports.dport = mask->dst; + flow->l4_key.ports.sport = key->src; + flow->l4_mask.ports.sport = mask->src; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_dissector_key_icmp *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + struct flow_dissector_key_icmp *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + + flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; + flow->l4_key.icmp.type = key->type; + flow->l4_key.icmp.code = key->code; + flow->l4_mask.icmp.type = mask->type; + flow->l4_mask.icmp.code = mask->code; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_dissector_key_control *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); + + addr_type = key->addr_type; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + struct flow_dissector_key_ipv4_addrs *mask = + GET_MASK(tc_flow_cmd, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; + flow->tun_key.u.ipv4.dst = key->dst; + flow->tun_mask.u.ipv4.dst = mask->dst; + flow->tun_key.u.ipv4.src = key->src; + flow->tun_mask.u.ipv4.src = mask->src; + } else if (dissector_uses_key(dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { + return -EOPNOTSUPP; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + struct flow_dissector_key_keyid *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; + flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); + flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_dissector_key_ports *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + struct flow_dissector_key_ports *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; + flow->tun_key.tp_dst = key->dst; + flow->tun_mask.tp_dst = mask->dst; + flow->tun_key.tp_src = key->src; + flow->tun_mask.tp_src = mask->src; + } + + return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); +} + +static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) +{ + struct hwrm_cfa_flow_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); + req.flow_handle = flow_handle; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", + __func__, flow_handle, rc); + return rc; +} + +static int ipv6_mask_len(struct in6_addr *mask) +{ + int mask_len = 0, i; + + for (i = 0; i < 4; i++) + mask_len += inet_mask_len(mask->s6_addr32[i]); + + return mask_len; +} + +static bool is_wildcard(void *mask, int len) +{ + const u8 *p = mask; + int i; + + for (i = 0; i < len; i++) { + if (p[i] != 0) + return false; + } + return true; +} + +static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, + __le16 ref_flow_handle, + __le32 tunnel_handle, __le16 *flow_handle) +{ + struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_tc_actions *actions = &flow->actions; + struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; + struct bnxt_tc_l3_key *l3_key = &flow->l3_key; + struct hwrm_cfa_flow_alloc_input req = { 0 }; + u16 flow_flags = 0, action_flags = 0; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); + + req.src_fid = cpu_to_le16(flow->src_fid); + req.ref_flow_handle = ref_flow_handle; + + if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || + actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { + req.tunnel_handle = tunnel_handle; + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; + } + + req.ethertype = flow->l2_key.ether_type; + req.ip_proto = flow->l4_key.ip_proto; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { + memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); + } + + if (flow->l2_key.num_vlans > 0) { + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE; + /* FW expects the inner_vlan_tci value to be set + * in outer_vlan_tci when num_vlans is 1 (which is + * always the case in TC.) + */ + req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; + } + + /* If all IP and L4 fields are wildcarded then this is an L2 flow */ + if (is_wildcard(&l3_mask, sizeof(l3_mask)) && + is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; + } else { + flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? + CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 : + CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { + req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req.ip_dst_mask_len = + inet_mask_len(l3_mask->ipv4.daddr.s_addr); + req.ip_src[0] = l3_key->ipv4.saddr.s_addr; + req.ip_src_mask_len = + inet_mask_len(l3_mask->ipv4.saddr.s_addr); + } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { + memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req.ip_dst)); + req.ip_dst_mask_len = + ipv6_mask_len(&l3_mask->ipv6.daddr); + memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req.ip_src)); + req.ip_src_mask_len = + ipv6_mask_len(&l3_mask->ipv6.saddr); + } + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { + req.l4_src_port = flow->l4_key.ports.sport; + req.l4_src_port_mask = flow->l4_mask.ports.sport; + req.l4_dst_port = flow->l4_key.ports.dport; + req.l4_dst_port_mask = flow->l4_mask.ports.dport; + } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { + /* l4 ports serve as type/code when ip_proto is ICMP */ + req.l4_src_port = htons(flow->l4_key.icmp.type); + req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req.l4_dst_port = htons(flow->l4_key.icmp.code); + req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + } + req.flags = cpu_to_le16(flow_flags); + + if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; + } else { + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; + req.dst_fid = cpu_to_le16(actions->dst_fid); + } + if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req.l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); + memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + } + if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + /* Rewrite config with tpid = 0 implies vlan pop */ + req.l2_rewrite_vlan_tpid = 0; + memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); + memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + } + } + req.action_flags = cpu_to_le16(action_flags); + + mutex_lock(&bp->hwrm_cmd_lock); + + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *flow_handle = resp->flow_handle; + + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_l2_key *l2_info, + __le32 ref_decap_handle, + __le32 *decap_filter_handle) +{ + struct hwrm_cfa_decap_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; + struct ip_tunnel_key *tun_key = &flow->tun_key; + u32 enables = 0; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); + + req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; + req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; + /* tunnel_id is wrongly defined in hsi defn. as __le32 */ + req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR; + ether_addr_copy(req.dst_macaddr, l2_info->dmac); + ether_addr_copy(req.src_macaddr, l2_info->smac); + } + if (l2_info->num_vlans) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; + req.t_ivlan_vid = l2_info->inner_vlan_tci; + } + + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; + req.ethertype = htons(ETH_P_IP); + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; + req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req.dst_ipaddr[0] = tun_key->u.ipv4.dst; + req.src_ipaddr[0] = tun_key->u.ipv4.src; + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; + req.dst_port = tun_key->tp_dst; + } + + /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc + * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. + */ + req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req.enables = cpu_to_le32(enables); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *decap_filter_handle = resp->decap_filter_id; + else + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int hwrm_cfa_decap_filter_free(struct bnxt *bp, + __le32 decap_filter_handle) +{ + struct hwrm_cfa_decap_filter_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); + req.decap_filter_id = decap_filter_handle; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + return rc; +} + +static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, + struct ip_tunnel_key *encap_key, + struct bnxt_tc_l2_key *l2_info, + __le32 *encap_record_handle) +{ + struct hwrm_cfa_encap_record_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_encap_record_alloc_input req = { 0 }; + struct hwrm_cfa_encap_data_vxlan *encap = + (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = + (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); + + req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + + ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); + ether_addr_copy(encap->src_mac_addr, l2_info->smac); + if (l2_info->num_vlans) { + encap->num_vlan_tags = l2_info->num_vlans; + encap->ovlan_tci = l2_info->inner_vlan_tci; + encap->ovlan_tpid = l2_info->inner_vlan_tpid; + } + + encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; + encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; + encap_ipv4->ttl = encap_key->ttl; + + encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; + encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; + encap_ipv4->protocol = IPPROTO_UDP; + + encap->dst_port = encap_key->tp_dst; + encap->vni = tunnel_id_to_key32(encap_key->tun_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *encap_record_handle = resp->encap_record_id; + else + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; +} + +static int hwrm_cfa_encap_record_free(struct bnxt *bp, + __le32 encap_record_handle) +{ + struct hwrm_cfa_encap_record_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); + req.encap_record_id = encap_record_handle; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + return rc; +} + +static int bnxt_tc_put_l2_node(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + /* remove flow_node from the L2 shared flow list */ + list_del(&flow_node->l2_list_node); + if (--l2_node->refcount == 0) { + rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node, + tc_info->l2_ht_params); + if (rc) + netdev_err(bp->dev, + "Error: %s: rhashtable_remove_fast: %d", + __func__, rc); + kfree_rcu(l2_node, rcu); + } + return 0; +} + +static struct bnxt_tc_l2_node * +bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, + struct rhashtable_params ht_params, + struct bnxt_tc_l2_key *l2_key) +{ + struct bnxt_tc_l2_node *l2_node; + int rc; + + l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params); + if (!l2_node) { + l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL); + if (!l2_node) { + rc = -ENOMEM; + return NULL; + } + + l2_node->key = *l2_key; + rc = rhashtable_insert_fast(l2_table, &l2_node->node, + ht_params); + if (rc) { + kfree_rcu(l2_node, rcu); + netdev_err(bp->dev, + "Error: %s: rhashtable_insert_fast: %d", + __func__, rc); + return NULL; + } + INIT_LIST_HEAD(&l2_node->common_l2_flows); + } + return l2_node; +} + +/* Get the ref_flow_handle for a flow by checking if there are any other + * flows that share the same L2 key as this flow. + */ +static int +bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le16 *ref_flow_handle) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *ref_flow_node; + struct bnxt_tc_l2_node *l2_node; + + l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table, + tc_info->l2_ht_params, + &flow->l2_key); + if (!l2_node) + return -1; + + /* If any other flow is using this l2_node, use it's flow_handle + * as the ref_flow_handle + */ + if (l2_node->refcount > 0) { + ref_flow_node = list_first_entry(&l2_node->common_l2_flows, + struct bnxt_tc_flow_node, + l2_list_node); + *ref_flow_handle = ref_flow_node->flow_handle; + } else { + *ref_flow_handle = cpu_to_le16(0xffff); + } + + /* Insert the l2_node into the flow_node so that subsequent flows + * with a matching l2 key can use the flow_handle of this flow + * as their ref_flow_handle + */ + flow_node->l2_node = l2_node; + list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows); + l2_node->refcount++; + return 0; +} + +/* After the flow parsing is done, this routine is used for checking + * if there are any aspects of the flow that prevent it from being + * offloaded. + */ +static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) +{ + /* If L4 ports are specified then ip_proto must be TCP or UDP */ + if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && + (flow->l4_key.ip_proto != IPPROTO_TCP && + flow->l4_key.ip_proto != IPPROTO_UDP)) { + netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports", + flow->l4_key.ip_proto); + return false; + } + + return true; +} + +/* Returns the final refcount of the node on success + * or a -ve error code on failure + */ +static int bnxt_tc_put_tunnel_node(struct bnxt *bp, + struct rhashtable *tunnel_table, + struct rhashtable_params *ht_params, + struct bnxt_tc_tunnel_node *tunnel_node) +{ + int rc; + + if (--tunnel_node->refcount == 0) { + rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, + *ht_params); + if (rc) { + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + rc = -1; + } + kfree_rcu(tunnel_node, rcu); + return rc; + } else { + return tunnel_node->refcount; + } +} + +/* Get (or add) either encap or decap tunnel node from/to the supplied + * hash table. + */ +static struct bnxt_tc_tunnel_node * +bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, + struct rhashtable_params *ht_params, + struct ip_tunnel_key *tun_key) +{ + struct bnxt_tc_tunnel_node *tunnel_node; + int rc; + + tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params); + if (!tunnel_node) { + tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL); + if (!tunnel_node) { + rc = -ENOMEM; + goto err; + } + + tunnel_node->key = *tun_key; + tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE; + rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node, + *ht_params); + if (rc) { + kfree_rcu(tunnel_node, rcu); + goto err; + } + } + tunnel_node->refcount++; + return tunnel_node; +err: + netdev_info(bp->dev, "error rc=%d", rc); + return NULL; +} + +static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_l2_key *l2_key, + struct bnxt_tc_flow_node *flow_node, + __le32 *ref_decap_handle) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *ref_flow_node; + struct bnxt_tc_l2_node *decap_l2_node; + + decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table, + tc_info->decap_l2_ht_params, + l2_key); + if (!decap_l2_node) + return -1; + + /* If any other flow is using this decap_l2_node, use it's decap_handle + * as the ref_decap_handle + */ + if (decap_l2_node->refcount > 0) { + ref_flow_node = + list_first_entry(&decap_l2_node->common_l2_flows, + struct bnxt_tc_flow_node, + decap_l2_list_node); + *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle; + } else { + *ref_decap_handle = INVALID_TUNNEL_HANDLE; + } + + /* Insert the l2_node into the flow_node so that subsequent flows + * with a matching decap l2 key can use the decap_filter_handle of + * this flow as their ref_decap_handle + */ + flow_node->decap_l2_node = decap_l2_node; + list_add(&flow_node->decap_l2_list_node, + &decap_l2_node->common_l2_flows); + decap_l2_node->refcount++; + return 0; +} + +static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + /* remove flow_node from the decap L2 sharing flow list */ + list_del(&flow_node->decap_l2_list_node); + if (--decap_l2_node->refcount == 0) { + rc = rhashtable_remove_fast(&tc_info->decap_l2_table, + &decap_l2_node->node, + tc_info->decap_l2_ht_params); + if (rc) + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + kfree_rcu(decap_l2_node, rcu); + } +} + +static void bnxt_tc_put_decap_handle(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + __le32 decap_handle = flow_node->decap_node->tunnel_handle; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + if (flow_node->decap_l2_node) + bnxt_tc_put_decap_l2_node(bp, flow_node); + + rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + flow_node->decap_node); + if (!rc && decap_handle != INVALID_TUNNEL_HANDLE) + hwrm_cfa_decap_filter_free(bp, decap_handle); +} + +static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct net_device *real_dst_dev) +{ +#ifdef CONFIG_INET + struct flowi4 flow = { {0} }; + struct net_device *dst_dev; + struct neighbour *nbr; + struct rtable *rt; + int rc; + + flow.flowi4_proto = IPPROTO_UDP; + flow.fl4_dport = tun_key->tp_dst; + flow.daddr = tun_key->u.ipv4.dst; + + rt = ip_route_output_key(dev_net(real_dst_dev), &flow); + if (IS_ERR(rt)) { + netdev_info(bp->dev, "no route to %pI4b", &flow.daddr); + return -EOPNOTSUPP; + } + + /* The route must either point to the real_dst_dev or a dst_dev that + * uses the real_dst_dev. + */ + dst_dev = rt->dst.dev; + if (is_vlan_dev(dst_dev)) { +#if IS_ENABLED(CONFIG_VLAN_8021Q) + struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev); + + if (vlan->real_dev != real_dst_dev) { + netdev_info(bp->dev, + "dst_dev(%s) doesn't use PF-if(%s)", + netdev_name(dst_dev), + netdev_name(real_dst_dev)); + rc = -EOPNOTSUPP; + goto put_rt; + } + l2_info->inner_vlan_tci = htons(vlan->vlan_id); + l2_info->inner_vlan_tpid = vlan->vlan_proto; + l2_info->num_vlans = 1; +#endif + } else if (dst_dev != real_dst_dev) { + netdev_info(bp->dev, + "dst_dev(%s) for %pI4b is not PF-if(%s)", + netdev_name(dst_dev), &flow.daddr, + netdev_name(real_dst_dev)); + rc = -EOPNOTSUPP; + goto put_rt; + } + + nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); + if (!nbr) { + netdev_info(bp->dev, "can't lookup neighbor for %pI4b", + &flow.daddr); + rc = -EOPNOTSUPP; + goto put_rt; + } + + tun_key->u.ipv4.src = flow.saddr; + tun_key->ttl = ip4_dst_hoplimit(&rt->dst); + neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev); + ether_addr_copy(l2_info->smac, dst_dev->dev_addr); + neigh_release(nbr); + ip_rt_put(rt); + + return 0; +put_rt: + ip_rt_put(rt); + return rc; +#else + return -EOPNOTSUPP; +#endif +} + +static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *decap_filter_handle) +{ + struct ip_tunnel_key *decap_key = &flow->tun_key; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_l2_key l2_info = { {0} }; + struct bnxt_tc_tunnel_node *decap_node; + struct ip_tunnel_key tun_key = { 0 }; + struct bnxt_tc_l2_key *decap_l2_info; + __le32 ref_decap_handle; + int rc; + + /* Check if there's another flow using the same tunnel decap. + * If not, add this tunnel to the table and resolve the other + * tunnel header fileds + */ + decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + decap_key); + if (!decap_node) + return -ENOMEM; + + flow_node->decap_node = decap_node; + + if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) + goto done; + + /* Resolve the L2 fields for tunnel decap + * Resolve the route for remote vtep (saddr) of the decap key + * Find it's next-hop mac addrs + */ + tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; + tun_key.tp_dst = flow->tun_key.tp_dst; + rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev); + if (rc) + goto put_decap; + + decap_key->ttl = tun_key.ttl; + decap_l2_info = &decap_node->l2_info; + ether_addr_copy(decap_l2_info->dmac, l2_info.smac); + ether_addr_copy(decap_l2_info->smac, l2_info.dmac); + if (l2_info.num_vlans) { + decap_l2_info->num_vlans = l2_info.num_vlans; + decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; + decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci; + } + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS; + + /* For getting a decap_filter_handle we first need to check if + * there are any other decap flows that share the same tunnel L2 + * key and if so, pass that flow's decap_filter_handle as the + * ref_decap_handle for this flow. + */ + rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node, + &ref_decap_handle); + if (rc) + goto put_decap; + + /* Issue the hwrm cmd to allocate a decap filter handle */ + rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info, + ref_decap_handle, + &decap_node->tunnel_handle); + if (rc) + goto put_decap_l2; + +done: + *decap_filter_handle = decap_node->tunnel_handle; + return 0; + +put_decap_l2: + bnxt_tc_put_decap_l2_node(bp, flow_node); +put_decap: + bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + flow_node->decap_node); + return rc; +} + +static void bnxt_tc_put_encap_handle(struct bnxt *bp, + struct bnxt_tc_tunnel_node *encap_node) +{ + __le32 encap_handle = encap_node->tunnel_handle; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, encap_node); + if (!rc && encap_handle != INVALID_TUNNEL_HANDLE) + hwrm_cfa_encap_record_free(bp, encap_handle); +} + +/* Lookup the tunnel encap table and check if there's an encap_handle + * alloc'd already. + * If not, query L2 info via a route lookup and issue an encap_record_alloc + * cmd to FW. + */ +static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *encap_handle) +{ + struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_tunnel_node *encap_node; + int rc; + + /* Check if there's another flow using the same tunnel encap. + * If not, add this tunnel to the table and resolve the other + * tunnel header fileds + */ + encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, + encap_key); + if (!encap_node) + return -ENOMEM; + + flow_node->encap_node = encap_node; + + if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) + goto done; + + rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info, + flow->actions.dst_dev); + if (rc) + goto put_encap; + + /* Allocate a new tunnel encap record */ + rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info, + &encap_node->tunnel_handle); + if (rc) + goto put_encap; + +done: + *encap_handle = encap_node->tunnel_handle; + return 0; + +put_encap: + bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, encap_node); + return rc; +} + +static void bnxt_tc_put_tunnel_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + bnxt_tc_put_decap_handle(bp, flow_node); + else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) + bnxt_tc_put_encap_handle(bp, flow_node->encap_node); +} + +static int bnxt_tc_get_tunnel_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *tunnel_handle) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + return bnxt_tc_get_decap_handle(bp, flow, flow_node, + tunnel_handle); + else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) + return bnxt_tc_get_encap_handle(bp, flow, flow_node, + tunnel_handle); + else + return 0; +} +static int __bnxt_tc_del_flow(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + /* send HWRM cmd to free the flow-id */ + bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); + + mutex_lock(&tc_info->lock); + + /* release references to any tunnel encap/decap nodes */ + bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); + + /* release reference to l2 node */ + bnxt_tc_put_l2_node(bp, flow_node); + + mutex_unlock(&tc_info->lock); + + rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, + tc_info->flow_ht_params); + if (rc) + netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d", + __func__, rc); + + kfree_rcu(flow_node, rcu); + return 0; +} + +/* Add a new flow or replace an existing flow. + * Notes on locking: + * There are essentially two critical sections here. + * 1. while adding a new flow + * a) lookup l2-key + * b) issue HWRM cmd and get flow_handle + * c) link l2-key with flow + * 2. while deleting a flow + * a) unlinking l2-key from flow + * A lock is needed to protect these two critical sections. + * + * The hash-tables are already protected by the rhashtable API. + */ +static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *tc_flow_cmd) +{ + struct bnxt_tc_flow_node *new_node, *old_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow *flow; + __le32 tunnel_handle = 0; + __le16 ref_flow_handle; + int rc; + + /* allocate memory for the new flow and it's node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) { + rc = -ENOMEM; + goto done; + } + new_node->cookie = tc_flow_cmd->cookie; + flow = &new_node->flow; + + rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); + if (rc) + goto free_node; + flow->src_fid = src_fid; + + if (!bnxt_tc_can_offload(bp, flow)) { + rc = -ENOSPC; + goto free_node; + } + + /* If a flow exists with the same cookie, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (old_node) + __bnxt_tc_del_flow(bp, old_node); + + /* Check if the L2 part of the flow has been offloaded already. + * If so, bump up it's refcnt and get it's reference handle. + */ + mutex_lock(&tc_info->lock); + rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle); + if (rc) + goto unlock; + + /* If the flow involves tunnel encap/decap, get tunnel_handle */ + rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle); + if (rc) + goto put_l2; + + /* send HWRM cmd to alloc the flow */ + rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, + tunnel_handle, &new_node->flow_handle); + if (rc) + goto put_tunnel; + + flow->lastused = jiffies; + spin_lock_init(&flow->stats_lock); + /* add new flow to flow-table */ + rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, + tc_info->flow_ht_params); + if (rc) + goto hwrm_flow_free; + + mutex_unlock(&tc_info->lock); + return 0; + +hwrm_flow_free: + bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); +put_tunnel: + bnxt_tc_put_tunnel_handle(bp, flow, new_node); +put_l2: + bnxt_tc_put_l2_node(bp, new_node); +unlock: + mutex_unlock(&tc_info->lock); +free_node: + kfree_rcu(new_node, rcu); +done: + netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", + __func__, tc_flow_cmd->cookie, rc); + return rc; +} + +static int bnxt_tc_del_flow(struct bnxt *bp, + struct tc_cls_flower_offload *tc_flow_cmd) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *flow_node; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx", + tc_flow_cmd->cookie); + return -EINVAL; + } + + return __bnxt_tc_del_flow(bp, flow_node); +} + +static int bnxt_tc_get_flow_stats(struct bnxt *bp, + struct tc_cls_flower_offload *tc_flow_cmd) +{ + struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *flow_node; + struct bnxt_tc_flow *flow; + unsigned long lastused; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &tc_flow_cmd->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + netdev_info(bp->dev, "Error: no flow_node for cookie %lx", + tc_flow_cmd->cookie); + return -1; + } + + flow = &flow_node->flow; + curr_stats = &flow->stats; + prev_stats = &flow->prev_stats; + + spin_lock(&flow->stats_lock); + stats.packets = curr_stats->packets - prev_stats->packets; + stats.bytes = curr_stats->bytes - prev_stats->bytes; + *prev_stats = *curr_stats; + lastused = flow->lastused; + spin_unlock(&flow->stats_lock); + + tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, + lastused); + return 0; +} + +static int +bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, + struct bnxt_tc_stats_batch stats_batch[]) +{ + struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_flow_stats_input req = { 0 }; + __le16 *req_flow_handles = &req.flow_handle_0; + int rc, i; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); + req.num_flows = cpu_to_le16(num_flows); + for (i = 0; i < num_flows; i++) { + struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; + + req_flow_handles[i] = flow_node->flow_handle; + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + __le64 *resp_packets = &resp->packet_0; + __le64 *resp_bytes = &resp->byte_0; + + for (i = 0; i < num_flows; i++) { + stats_batch[i].hw_stats.packets = + le64_to_cpu(resp_packets[i]); + stats_batch[i].hw_stats.bytes = + le64_to_cpu(resp_bytes[i]); + } + } else { + netdev_info(bp->dev, "error rc=%d", rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +/* Add val to accum while handling a possible wraparound + * of val. Eventhough val is of type u64, its actual width + * is denoted by mask and will wrap-around beyond that width. + */ +static void accumulate_val(u64 *accum, u64 val, u64 mask) +{ +#define low_bits(x, mask) ((x) & (mask)) +#define high_bits(x, mask) ((x) & ~(mask)) + bool wrapped = val < low_bits(*accum, mask); + + *accum = high_bits(*accum, mask) + val; + if (wrapped) + *accum += (mask + 1); +} + +/* The HW counters' width is much less than 64bits. + * Handle possible wrap-around while updating the stat counters + */ +static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info, + struct bnxt_tc_flow_stats *acc_stats, + struct bnxt_tc_flow_stats *hw_stats) +{ + accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask); + accumulate_val(&acc_stats->packets, hw_stats->packets, + tc_info->packets_mask); +} + +static int +bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows, + struct bnxt_tc_stats_batch stats_batch[]) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc, i; + + rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch); + if (rc) + return rc; + + for (i = 0; i < num_flows; i++) { + struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; + struct bnxt_tc_flow *flow = &flow_node->flow; + + spin_lock(&flow->stats_lock); + bnxt_flow_stats_accum(tc_info, &flow->stats, + &stats_batch[i].hw_stats); + if (flow->stats.packets != flow->prev_stats.packets) + flow->lastused = jiffies; + spin_unlock(&flow->stats_lock); + } + + return 0; +} + +static int +bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, + struct bnxt_tc_stats_batch stats_batch[], + int *num_flows) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct rhashtable_iter *iter = &tc_info->iter; + void *flow_node; + int rc, i; + + rc = rhashtable_walk_start(iter); + if (rc && rc != -EAGAIN) { + i = 0; + goto done; + } + + rc = 0; + for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { + flow_node = rhashtable_walk_next(iter); + if (IS_ERR(flow_node)) { + i = 0; + if (PTR_ERR(flow_node) == -EAGAIN) { + continue; + } else { + rc = PTR_ERR(flow_node); + goto done; + } + } + + /* No more flows */ + if (!flow_node) + goto done; + + stats_batch[i].flow_node = flow_node; + } +done: + rhashtable_walk_stop(iter); + *num_flows = i; + return rc; +} + +void bnxt_tc_flow_stats_work(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int num_flows, rc; + + num_flows = atomic_read(&tc_info->flow_table.nelems); + if (!num_flows) + return; + + rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter); + + for (;;) { + rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch, + &num_flows); + if (rc) { + if (rc == -EAGAIN) + continue; + break; + } + + if (!num_flows) + break; + + bnxt_tc_flow_stats_batch_update(bp, num_flows, + tc_info->stats_batch); + } + + rhashtable_walk_exit(&tc_info->iter); +} + +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *cls_flower) +{ + int rc = 0; + + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); + break; + + case TC_CLSFLOWER_DESTROY: + rc = bnxt_tc_del_flow(bp, cls_flower); + break; + + case TC_CLSFLOWER_STATS: + rc = bnxt_tc_get_flow_stats(bp, cls_flower); + break; + } + return rc; +} + +static const struct rhashtable_params bnxt_tc_flow_ht_params = { + .head_offset = offsetof(struct bnxt_tc_flow_node, node), + .key_offset = offsetof(struct bnxt_tc_flow_node, cookie), + .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie), + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_l2_ht_params = { + .head_offset = offsetof(struct bnxt_tc_l2_node, node), + .key_offset = offsetof(struct bnxt_tc_l2_node, key), + .key_len = BNXT_TC_L2_KEY_LEN, + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = { + .head_offset = offsetof(struct bnxt_tc_l2_node, node), + .key_offset = offsetof(struct bnxt_tc_l2_node, key), + .key_len = BNXT_TC_L2_KEY_LEN, + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_tunnel_ht_params = { + .head_offset = offsetof(struct bnxt_tc_tunnel_node, node), + .key_offset = offsetof(struct bnxt_tc_tunnel_node, key), + .key_len = sizeof(struct ip_tunnel_key), + .automatic_shrinking = true +}; + +/* convert counter width in bits to a mask */ +#define mask(width) ((u64)~0 >> (64 - (width))) + +int bnxt_init_tc(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info; + int rc; + + if (bp->hwrm_spec_code < 0x10803) { + netdev_warn(bp->dev, + "Firmware does not support TC flower offload.\n"); + return -ENOTSUPP; + } + + tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL); + if (!tc_info) + return -ENOMEM; + mutex_init(&tc_info->lock); + + /* Counter widths are programmed by FW */ + tc_info->bytes_mask = mask(36); + tc_info->packets_mask = mask(28); + + tc_info->flow_ht_params = bnxt_tc_flow_ht_params; + rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); + if (rc) + goto free_tc_info; + + tc_info->l2_ht_params = bnxt_tc_l2_ht_params; + rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); + if (rc) + goto destroy_flow_table; + + tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params; + rc = rhashtable_init(&tc_info->decap_l2_table, + &tc_info->decap_l2_ht_params); + if (rc) + goto destroy_l2_table; + + tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params; + rc = rhashtable_init(&tc_info->decap_table, + &tc_info->decap_ht_params); + if (rc) + goto destroy_decap_l2_table; + + tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params; + rc = rhashtable_init(&tc_info->encap_table, + &tc_info->encap_ht_params); + if (rc) + goto destroy_decap_table; + + tc_info->enabled = true; + bp->dev->hw_features |= NETIF_F_HW_TC; + bp->dev->features |= NETIF_F_HW_TC; + bp->tc_info = tc_info; + return 0; + +destroy_decap_table: + rhashtable_destroy(&tc_info->decap_table); +destroy_decap_l2_table: + rhashtable_destroy(&tc_info->decap_l2_table); +destroy_l2_table: + rhashtable_destroy(&tc_info->l2_table); +destroy_flow_table: + rhashtable_destroy(&tc_info->flow_table); +free_tc_info: + kfree(tc_info); + return rc; +} + +void bnxt_shutdown_tc(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + + if (!bnxt_tc_flower_enabled(bp)) + return; + + rhashtable_destroy(&tc_info->flow_table); + rhashtable_destroy(&tc_info->l2_table); + rhashtable_destroy(&tc_info->decap_l2_table); + rhashtable_destroy(&tc_info->decap_table); + rhashtable_destroy(&tc_info->encap_table); + kfree(tc_info); + bp->tc_info = NULL; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h new file mode 100644 index 000000000000..97e09a880693 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -0,0 +1,230 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_TC_H +#define BNXT_TC_H + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +#include <net/ip_tunnels.h> + +/* Structs used for storing the filter/actions of the TC cmd. + */ +struct bnxt_tc_l2_key { + u8 dmac[ETH_ALEN]; + u8 smac[ETH_ALEN]; + __be16 inner_vlan_tpid; + __be16 inner_vlan_tci; + __be16 ether_type; + u8 num_vlans; +}; + +struct bnxt_tc_l3_key { + union { + struct { + struct in_addr daddr; + struct in_addr saddr; + } ipv4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } ipv6; + }; +}; + +struct bnxt_tc_l4_key { + u8 ip_proto; + union { + struct { + __be16 sport; + __be16 dport; + } ports; + struct { + u8 type; + u8 code; + } icmp; + }; +}; + +struct bnxt_tc_tunnel_key { + struct bnxt_tc_l2_key l2; + struct bnxt_tc_l3_key l3; + struct bnxt_tc_l4_key l4; + __be32 id; +}; + +struct bnxt_tc_actions { + u32 flags; +#define BNXT_TC_ACTION_FLAG_FWD BIT(0) +#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1) +#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3) +#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4) +#define BNXT_TC_ACTION_FLAG_DROP BIT(5) +#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6) +#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7) + + u16 dst_fid; + struct net_device *dst_dev; + __be16 push_vlan_tpid; + __be16 push_vlan_tci; + + /* tunnel encap */ + struct ip_tunnel_key tun_encap_key; +}; + +struct bnxt_tc_flow { + u32 flags; +#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1) +#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2) +#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3) +#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4) +#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5) +#define BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS BIT(6) +#define BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS BIT(7) +#define BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS BIT(8) +#define BNXT_TC_FLOW_FLAGS_TUNL_PORTS BIT(9) +#define BNXT_TC_FLOW_FLAGS_TUNL_ID BIT(10) +#define BNXT_TC_FLOW_FLAGS_TUNNEL (BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS | \ + BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS | \ + BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS |\ + BNXT_TC_FLOW_FLAGS_TUNL_PORTS |\ + BNXT_TC_FLOW_FLAGS_TUNL_ID) + + /* flow applicable to pkts ingressing on this fid */ + u16 src_fid; + struct bnxt_tc_l2_key l2_key; + struct bnxt_tc_l2_key l2_mask; + struct bnxt_tc_l3_key l3_key; + struct bnxt_tc_l3_key l3_mask; + struct bnxt_tc_l4_key l4_key; + struct bnxt_tc_l4_key l4_mask; + struct ip_tunnel_key tun_key; + struct ip_tunnel_key tun_mask; + + struct bnxt_tc_actions actions; + + /* updated stats accounting for hw-counter wrap-around */ + struct bnxt_tc_flow_stats stats; + /* previous snap-shot of stats */ + struct bnxt_tc_flow_stats prev_stats; + unsigned long lastused; /* jiffies */ + /* for calculating delta from prev_stats and + * updating prev_stats atomically. + */ + spinlock_t stats_lock; +}; + +/* Tunnel encap/decap hash table + * This table is used to maintain a list of flows that use + * the same tunnel encap/decap params (ip_daddrs, vni, udp_dport) + * and the FW returned handle. + * A separate table is maintained for encap and decap + */ +struct bnxt_tc_tunnel_node { + struct ip_tunnel_key key; + struct rhash_head node; + + /* tunnel l2 info */ + struct bnxt_tc_l2_key l2_info; + +#define INVALID_TUNNEL_HANDLE cpu_to_le32(0xffffffff) + /* tunnel handle returned by FW */ + __le32 tunnel_handle; + + u32 refcount; + struct rcu_head rcu; +}; + +/* L2 hash table + * The same data-struct is used for L2-flow table and L2-tunnel table. + * The L2 part of a flow or tunnel is stored in a hash table. + * A flow that shares the same L2 key/mask with an + * already existing flow/tunnel must refer to it's flow handle or + * decap_filter_id respectively. + */ +struct bnxt_tc_l2_node { + /* hash key: first 16b of key */ +#define BNXT_TC_L2_KEY_LEN 16 + struct bnxt_tc_l2_key key; + struct rhash_head node; + + /* a linked list of flows that share the same l2 key */ + struct list_head common_l2_flows; + + /* number of flows/tunnels sharing the l2 key */ + u16 refcount; + + struct rcu_head rcu; +}; + +struct bnxt_tc_flow_node { + /* hash key: provided by TC */ + unsigned long cookie; + struct rhash_head node; + + struct bnxt_tc_flow flow; + + __le16 flow_handle; + + /* L2 node in l2 hashtable that shares flow's l2 key */ + struct bnxt_tc_l2_node *l2_node; + /* for the shared_flows list maintained in l2_node */ + struct list_head l2_list_node; + + /* tunnel encap related */ + struct bnxt_tc_tunnel_node *encap_node; + + /* tunnel decap related */ + struct bnxt_tc_tunnel_node *decap_node; + /* L2 node in tunnel-l2 hashtable that shares flow's tunnel l2 key */ + struct bnxt_tc_l2_node *decap_l2_node; + /* for the shared_flows list maintained in tunnel decap l2_node */ + struct list_head decap_l2_list_node; + + struct rcu_head rcu; +}; + +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *cls_flower); +int bnxt_init_tc(struct bnxt *bp); +void bnxt_shutdown_tc(struct bnxt *bp); +void bnxt_tc_flow_stats_work(struct bnxt *bp); + +static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) +{ + return bp->tc_info && bp->tc_info->enabled; +} + +#else /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct tc_cls_flower_offload *cls_flower) +{ + return -EOPNOTSUPP; +} + +static inline int bnxt_init_tc(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_shutdown_tc(struct bnxt *bp) +{ +} + +static inline void bnxt_tc_flow_stats_work(struct bnxt *bp) +{ +} + +static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) +{ + return false; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ +#endif /* BNXT_TC_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 77da75a55c02..997e10e8b863 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); + if (ulp->msix_requested) + edev->en_ops->bnxt_free_msix(edev, ulp_id); } if (ulp->max_async_event_id) bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c new file mode 100644 index 000000000000..69186d188c43 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -0,0 +1,503 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/jhash.h> +#include <net/pkt_cls.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_devlink.h" +#include "bnxt_tc.h" + +#ifdef CONFIG_BNXT_SRIOV + +#define CFA_HANDLE_INVALID 0xffff +#define VF_IDX_INVALID 0xffff + +static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, + u16 *tx_cfa_action, u16 *rx_cfa_code) +{ + struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_vfr_alloc_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); + req.vf_id = cpu_to_le16(vf_idx); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } else { + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) +{ + struct hwrm_cfa_vfr_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + return rc; +} + +static int bnxt_vf_rep_open(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + /* Enable link and TX only if the parent PF is open. */ + if (netif_running(bp->dev)) { + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + } + return 0; +} + +static int bnxt_vf_rep_close(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_tx_disable(dev); + + return 0; +} + +static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc, len = skb->len; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *)vf_rep->dst); + skb_dst_set(skb, (struct dst_entry *)vf_rep->dst); + skb->dev = vf_rep->dst->u.port_info.lower_dev; + + rc = dev_queue_xmit(skb); + if (!rc) { + vf_rep->tx_stats.packets++; + vf_rep->tx_stats.bytes += len; + } + return rc; +} + +static void +bnxt_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + stats->rx_packets = vf_rep->rx_stats.packets; + stats->rx_bytes = vf_rep->rx_stats.bytes; + stats->tx_packets = vf_rep->tx_stats.packets; + stats->tx_bytes = vf_rep->tx_stats.bytes; +} + +static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + struct bnxt_vf_rep *vf_rep = cb_priv; + struct bnxt *bp = vf_rep->bp; + int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; + + if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, vf_fid, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, + bnxt_vf_rep_setup_tc_block_cb, + vf_rep, vf_rep); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + bnxt_vf_rep_setup_tc_block_cb, vf_rep); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return bnxt_vf_rep_setup_tc_block(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + u16 vf_idx; + + if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) { + vf_idx = bp->cfa_code_map[cfa_code]; + if (vf_idx != VF_IDX_INVALID) + return bp->vf_reps[vf_idx]->dev; + } + return NULL; +} + +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); + struct bnxt_vf_rep_stats *rx_stats; + + rx_stats = &vf_rep->rx_stats; + vf_rep->rx_stats.bytes += skb->len; + vf_rep->rx_stats.packets++; + + netif_receive_skb(skb); +} + +static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct pci_dev *pf_pdev = vf_rep->bp->pdev; + int rc; + + rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn), + vf_rep->vf_idx); + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int bnxt_vf_rep_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_port_attr_get(vf_rep->bp, attr); +} + +static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get +}; + +static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { + .get_drvinfo = bnxt_vf_rep_get_drvinfo +}; + +static const struct net_device_ops bnxt_vf_rep_netdev_ops = { + .ndo_open = bnxt_vf_rep_open, + .ndo_stop = bnxt_vf_rep_close, + .ndo_start_xmit = bnxt_vf_rep_xmit, + .ndo_get_stats64 = bnxt_vf_rep_get_stats64, + .ndo_setup_tc = bnxt_vf_rep_setup_tc, + .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +}; + +/* Called when the parent PF interface is closed: + * As the mode transition from SWITCHDEV to LEGACY + * happens under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_close(struct bnxt *bp) +{ + struct bnxt_vf_rep *vf_rep; + u16 num_vfs, i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + num_vfs = pci_num_vf(bp->pdev); + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (netif_running(vf_rep->dev)) + bnxt_vf_rep_close(vf_rep->dev); + } +} + +/* Called when the parent PF interface is opened (re-opened): + * As the mode transition from SWITCHDEV to LEGACY + * happen under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_open(struct bnxt *bp) +{ + int i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + for (i = 0; i < pci_num_vf(bp->pdev); i++) + bnxt_vf_rep_open(bp->vf_reps[i]->dev); +} + +static void __bnxt_vf_reps_destroy(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int i; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (vf_rep) { + dst_release((struct dst_entry *)vf_rep->dst); + + if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) + hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); + + if (vf_rep->dev) { + /* if register_netdev failed, then netdev_ops + * would have been set to NULL + */ + if (vf_rep->dev->netdev_ops) + unregister_netdev(vf_rep->dev); + free_netdev(vf_rep->dev); + } + } + } + + kfree(bp->vf_reps); + bp->vf_reps = NULL; +} + +void bnxt_vf_reps_destroy(struct bnxt *bp) +{ + bool closed = false; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + if (!bp->vf_reps) + return; + + /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced + * before proceeding with VF-rep cleanup. + */ + rtnl_lock(); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + closed = true; + } + /* un-publish cfa_code_map so that RX path can't see it anymore */ + kfree(bp->cfa_code_map); + bp->cfa_code_map = NULL; + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + if (closed) + bnxt_open_nic(bp, false, false); + rtnl_unlock(); + + /* Need to call vf_reps_destroy() outside of rntl_lock + * as unregister_netdev takes rtnl_lock + */ + __bnxt_vf_reps_destroy(bp); +} + +/* Use the OUI of the PF's perm addr and report the same mac addr + * for the same VF-rep each time + */ +static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac) +{ + u32 addr; + + ether_addr_copy(mac, src_mac); + + addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx; + mac[3] = (u8)(addr & 0xFF); + mac[4] = (u8)((addr >> 8) & 0xFF); + mac[5] = (u8)((addr >> 16) & 0xFF); +} + +static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + struct net_device *dev) +{ + struct net_device *pf_dev = bp->dev; + + dev->netdev_ops = &bnxt_vf_rep_netdev_ops; + dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); + /* Just inherit all the featues of the parent PF as the VF-R + * uses the RX/TX rings of the parent PF + */ + dev->hw_features = pf_dev->hw_features; + dev->gso_partial_features = pf_dev->gso_partial_features; + dev->vlan_features = pf_dev->vlan_features; + dev->hw_enc_features = pf_dev->hw_enc_features; + dev->features |= pf_dev->features; + bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, + dev->perm_addr); + ether_addr_copy(dev->dev_addr, dev->perm_addr); +} + +static int bnxt_vf_reps_create(struct bnxt *bp) +{ + u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + struct net_device *dev; + int rc, i; + + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); + if (!bp->vf_reps) + return -ENOMEM; + + /* storage for cfa_code to vf-idx mapping */ + cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE, + GFP_KERNEL); + if (!cfa_code_map) { + rc = -ENOMEM; + goto err; + } + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + + for (i = 0; i < num_vfs; i++) { + dev = alloc_etherdev(sizeof(*vf_rep)); + if (!dev) { + rc = -ENOMEM; + goto err; + } + + vf_rep = netdev_priv(dev); + bp->vf_reps[i] = vf_rep; + vf_rep->dev = dev; + vf_rep->bp = bp; + vf_rep->vf_idx = i; + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + + /* get cfa handles from FW */ + rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, + &vf_rep->tx_cfa_action, + &vf_rep->rx_cfa_code); + if (rc) { + rc = -ENOLINK; + goto err; + } + cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; + + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf_rep->dst) { + rc = -ENOMEM; + goto err; + } + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); + rc = register_netdev(dev); + if (rc) { + /* no need for unregister_netdev in cleanup */ + dev->netdev_ops = NULL; + goto err; + } + } + + /* publish cfa_code_map only after all VF-reps have been initialized */ + bp->cfa_code_map = cfa_code_map; + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + netif_keep_dst(bp->dev); + return 0; + +err: + netdev_info(bp->dev, "%s error=%d", __func__, rc); + kfree(cfa_code_map); + __bnxt_vf_reps_destroy(bp); + return rc; +} + +/* Devlink related routines */ +int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + + *mode = bp->eswitch_mode; + return 0; +} + +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + int rc = 0; + + mutex_lock(&bp->sriov_lock); + if (bp->eswitch_mode == mode) { + netdev_info(bp->dev, "already in %s eswitch mode", + mode == DEVLINK_ESWITCH_MODE_LEGACY ? + "legacy" : "switchdev"); + rc = -EINVAL; + goto done; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + bnxt_vf_reps_destroy(bp); + break; + + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (pci_num_vf(bp->pdev) == 0) { + netdev_info(bp->dev, + "Enable VFs before setting switchdev mode"); + rc = -EPERM; + goto done; + } + rc = bnxt_vf_reps_create(bp); + break; + + default: + rc = -EINVAL; + goto done; + } +done: + mutex_unlock(&bp->sriov_lock); + return rc; +} + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h new file mode 100644 index 000000000000..fb06bbe70e42 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -0,0 +1,58 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_VFR_H +#define BNXT_VFR_H + +#ifdef CONFIG_BNXT_SRIOV + +#define MAX_CFA_CODE 65536 + +void bnxt_vf_reps_destroy(struct bnxt *bp); +void bnxt_vf_reps_close(struct bnxt *bp); +void bnxt_vf_reps_open(struct bnxt *bp); +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + return bp->pf.vf[vf_rep->vf_idx].fw_fid; +} + +int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); + +#else + +static inline void bnxt_vf_reps_close(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_open(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ +} + +static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + return NULL; +} + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + return 0; +} +#endif /* CONFIG_BNXT_SRIOV */ +#endif /* BNXT_VFR_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 3961a6807454..261e5847557a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -94,6 +94,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, xdp.data_hard_start = *data_ptr - offset; xdp.data = *data_ptr; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = *data_ptr + *len; orig_data = xdp.data; mapping = rx_buf->mapping - bp->rx_dma_offset; @@ -169,8 +170,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) tc = netdev_get_num_tc(dev); if (!tc) tc = 1; - rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, - true, tc, tx_xdp); + rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + true, tc, tx_xdp); if (rc) { netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); return rc; @@ -207,7 +208,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) return 0; } -int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp) +int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct bnxt *bp = netdev_priv(dev); int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 12a5ad66b564..414b748038ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -16,6 +16,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); -int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp); +int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); #endif diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index cec94bbb2ea5..8bc126a156e8 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); if (ret) - return -ENOMEM; + goto error; n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; for (i = 0, j = 0; i < cp->max_cid_space; i++) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 7b0b399aaedd..24b4f4ceceef 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -72,23 +72,42 @@ #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ TOTAL_DESC * DMA_DESC_SIZE) +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel_relaxed(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl_relaxed(offset); +} + static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, void __iomem *d, u32 value) { - __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); } static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, void __iomem *d) { - return __raw_readl(d + DMA_DESC_LENGTH_STATUS); + return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS); } static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, void __iomem *d, dma_addr_t addr) { - __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); /* Register writes to GISB bus can take couple hundred nanoseconds * and are done for each packet, save these expensive writes unless @@ -96,7 +115,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, */ #ifdef CONFIG_PHYS_ADDR_T_64BIT if (priv->hw_params->flags & GENET_HAS_40BITS) - __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); #endif } @@ -113,7 +132,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, { dma_addr_t addr; - addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); /* Register writes to GISB bus can take couple hundred nanoseconds * and are done for each packet, save these expensive writes unless @@ -121,7 +140,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, */ #ifdef CONFIG_PHYS_ADDR_T_64BIT if (priv->hw_params->flags & GENET_HAS_40BITS) - addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; #endif return addr; } @@ -156,8 +175,8 @@ static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) if (GENET_IS_V1(priv)) return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); else - return __raw_readl(priv->base + - priv->hw_params->tbuf_offset + TBUF_CTRL); + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); } static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) @@ -165,7 +184,7 @@ static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) if (GENET_IS_V1(priv)) bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); else - __raw_writel(val, priv->base + + bcmgenet_writel(val, priv->base + priv->hw_params->tbuf_offset + TBUF_CTRL); } @@ -174,8 +193,8 @@ static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) if (GENET_IS_V1(priv)) return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); else - return __raw_readl(priv->base + - priv->hw_params->tbuf_offset + TBUF_BP_MC); + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); } static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) @@ -183,7 +202,7 @@ static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) if (GENET_IS_V1(priv)) bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); else - __raw_writel(val, priv->base + + bcmgenet_writel(val, priv->base + priv->hw_params->tbuf_offset + TBUF_BP_MC); } @@ -326,28 +345,28 @@ static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, enum dma_reg r) { - return __raw_readl(priv->base + GENET_TDMA_REG_OFF + - DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, u32 val, enum dma_reg r) { - __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, enum dma_reg r) { - return __raw_readl(priv->base + GENET_RDMA_REG_OFF + - DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, u32 val, enum dma_reg r) { - __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); } @@ -418,16 +437,16 @@ static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, unsigned int ring, enum dma_ring_reg r) { - return __raw_readl(priv->base + GENET_TDMA_REG_OFF + - (DMA_RING_SIZE * ring) + - genet_dma_ring_regs[r]); + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); } static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, unsigned int ring, u32 val, enum dma_ring_reg r) { - __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + (DMA_RING_SIZE * ring) + genet_dma_ring_regs[r]); } @@ -436,16 +455,16 @@ static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, unsigned int ring, enum dma_ring_reg r) { - return __raw_readl(priv->base + GENET_RDMA_REG_OFF + - (DMA_RING_SIZE * ring) + - genet_dma_ring_regs[r]); + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); } static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, unsigned int ring, u32 val, enum dma_ring_reg r) { - __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + (DMA_RING_SIZE * ring) + genet_dma_ring_regs[r]); } @@ -469,15 +488,13 @@ static void bcmgenet_complete(struct net_device *dev) static int bcmgenet_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { - struct bcmgenet_priv *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -ENODEV; - phy_ethtool_ksettings_get(priv->phydev, cmd); + phy_ethtool_ksettings_get(dev->phydev, cmd); return 0; } @@ -485,15 +502,13 @@ static int bcmgenet_get_link_ksettings(struct net_device *dev, static int bcmgenet_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { - struct bcmgenet_priv *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -ENODEV; - return phy_ethtool_ksettings_set(priv->phydev, cmd); + return phy_ethtool_ksettings_set(dev->phydev, cmd); } static int bcmgenet_set_rx_csum(struct net_device *dev, @@ -991,12 +1006,12 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); /* Enable EEE and switch to a 27Mhz clock automatically */ - reg = __raw_readl(priv->base + off); + reg = bcmgenet_readl(priv->base + off); if (enable) reg |= TBUF_EEE_EN | TBUF_PM_EN; else reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); - __raw_writel(reg, priv->base + off); + bcmgenet_writel(reg, priv->base + off); /* Do the same for thing for RBUF */ reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); @@ -1023,11 +1038,14 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) if (GENET_IS_V1(priv)) return -EOPNOTSUPP; + if (!dev->phydev) + return -ENODEV; + e->eee_enabled = p->eee_enabled; e->eee_active = p->eee_active; e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); - return phy_ethtool_get_eee(priv->phydev, e); + return phy_ethtool_get_eee(dev->phydev, e); } static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) @@ -1039,12 +1057,15 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) if (GENET_IS_V1(priv)) return -EOPNOTSUPP; + if (!dev->phydev) + return -ENODEV; + p->eee_enabled = e->eee_enabled; if (!p->eee_enabled) { bcmgenet_eee_enable_set(dev, false); } else { - ret = phy_init_eee(priv->phydev, 0); + ret = phy_init_eee(dev->phydev, 0); if (ret) { netif_err(priv, hw, dev, "EEE initialization failed\n"); return ret; @@ -1054,7 +1075,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) bcmgenet_eee_enable_set(dev, true); } - return phy_ethtool_set_eee(priv->phydev, e); + return phy_ethtool_set_eee(dev->phydev, e); } /* standard ethtool support functions. */ @@ -1088,7 +1109,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_CABLE_SENSE: - phy_detach(priv->phydev); + phy_detach(priv->dev->phydev); break; case GENET_POWER_WOL_MAGIC: @@ -1153,7 +1174,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); bcmgenet_phy_power_set(priv->dev, true); - bcmgenet_mii_reset(priv->dev); break; case GENET_POWER_CABLE_SENSE: @@ -1174,15 +1194,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, /* ioctl handle special commands that are not present in ethtool. */ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct bcmgenet_priv *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -ENODEV; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, @@ -1360,7 +1378,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, if (skb) { pkts_compl++; bytes_compl += GENET_CB(skb)->bytes_sent; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } txbds_processed++; @@ -1386,11 +1404,10 @@ static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { unsigned int released; - unsigned long flags; - spin_lock_irqsave(&ring->lock, flags); + spin_lock_bh(&ring->lock); released = __bcmgenet_tx_reclaim(dev, ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_bh(&ring->lock); return released; } @@ -1401,15 +1418,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) container_of(napi, struct bcmgenet_tx_ring, napi); unsigned int work_done = 0; struct netdev_queue *txq; - unsigned long flags; - spin_lock_irqsave(&ring->lock, flags); + spin_lock(&ring->lock); work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); netif_tx_wake_queue(txq); } - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock(&ring->lock); if (work_done == 0) { napi_complete(napi); @@ -1504,7 +1520,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) struct bcmgenet_tx_ring *ring = NULL; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; - unsigned long flags = 0; int nr_frags, index; dma_addr_t mapping; unsigned int size; @@ -1531,7 +1546,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) nr_frags = skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&ring->lock, flags); + spin_lock(&ring->lock); if (ring->free_bds <= (nr_frags + 1)) { if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); @@ -1565,8 +1580,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i <= nr_frags; i++) { tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - if (unlikely(!tx_cb_ptr)) - BUG(); + BUG_ON(!tx_cb_ptr); if (!i) { /* Transmit single SKB or head of fragment list */ @@ -1626,7 +1640,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); out: - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock(&ring->lock); return ret; @@ -1875,7 +1889,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, cb = ring->cbs + i; skb = bcmgenet_rx_refill(priv, cb); if (skb) - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); if (!cb->skb) return -ENOMEM; } @@ -1894,7 +1908,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); if (skb) - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } } @@ -1916,12 +1930,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) usleep_range(1000, 2000); } -static int reset_umac(struct bcmgenet_priv *priv) +static void reset_umac(struct bcmgenet_priv *priv) { - struct device *kdev = &priv->pdev->dev; - unsigned int timeout = 0; - u32 reg; - /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ bcmgenet_rbuf_ctrl_set(priv, 0); udelay(10); @@ -1929,23 +1939,10 @@ static int reset_umac(struct bcmgenet_priv *priv) /* disable MAC while updating its registers */ bcmgenet_umac_writel(priv, 0, UMAC_CMD); - /* issue soft reset, wait for it to complete */ - bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); - while (timeout++ < 1000) { - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (!(reg & CMD_SW_RESET)) - return 0; - - udelay(1); - } - - if (timeout == 1000) { - dev_err(kdev, - "timeout waiting for MAC to come out of reset\n"); - return -ETIMEDOUT; - } - - return 0; + /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ + bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); + udelay(2); + bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -1975,20 +1972,16 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); } -static int init_umac(struct bcmgenet_priv *priv) +static void init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; - int ret; u32 reg; u32 int0_enable = 0; dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); - ret = reset_umac(priv); - if (ret) - return ret; + reset_umac(priv); - bcmgenet_umac_writel(priv, 0, UMAC_CMD); /* clear tx/rx counter */ bcmgenet_umac_writel(priv, MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, @@ -2027,8 +2020,6 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); dev_dbg(kdev, "done init umac\n"); - - return 0; } /* Initialize a Tx ring along with corresponding hardware registers */ @@ -2085,6 +2076,10 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); + + /* Initialize Tx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ @@ -2116,6 +2111,10 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, if (ret) return ret; + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, + NAPI_POLL_WEIGHT); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); @@ -2140,50 +2139,27 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, return ret; } -static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) -{ - unsigned int i; - struct bcmgenet_tx_ring *ring; - - for (i = 0; i < priv->hw_params->tx_queues; ++i) { - ring = &priv->tx_rings[i]; - netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); - } - - ring = &priv->tx_rings[DESC_INDEX]; - netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); -} - static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_enable = UMAC_IRQ_TXDMA_DONE; - u32 int1_enable = 0; struct bcmgenet_tx_ring *ring; for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_enable(&ring->napi); - int1_enable |= (1 << i); + ring->int_enable(ring); } ring = &priv->tx_rings[DESC_INDEX]; napi_enable(&ring->napi); - - bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); - bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + ring->int_enable(ring); } static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_disable = UMAC_IRQ_TXDMA_DONE; - u32 int1_disable = 0xffff; struct bcmgenet_tx_ring *ring; - bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET); - bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET); - for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_disable(&ring->napi); @@ -2267,9 +2243,6 @@ static void bcmgenet_init_tx_queues(struct net_device *dev) bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); - /* Initialize Tx NAPI */ - bcmgenet_init_tx_napi(priv); - /* Enable Tx queues */ bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); @@ -2279,50 +2252,27 @@ static void bcmgenet_init_tx_queues(struct net_device *dev) bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); } -static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) -{ - unsigned int i; - struct bcmgenet_rx_ring *ring; - - for (i = 0; i < priv->hw_params->rx_queues; ++i) { - ring = &priv->rx_rings[i]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); - } - - ring = &priv->rx_rings[DESC_INDEX]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); -} - static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_enable = UMAC_IRQ_RXDMA_DONE; - u32 int1_enable = 0; struct bcmgenet_rx_ring *ring; for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_enable(&ring->napi); - int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); + ring->int_enable(ring); } ring = &priv->rx_rings[DESC_INDEX]; napi_enable(&ring->napi); - - bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); - bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + ring->int_enable(ring); } static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_disable = UMAC_IRQ_RXDMA_DONE; - u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT; struct bcmgenet_rx_ring *ring; - bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET); - bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET); - for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_disable(&ring->napi); @@ -2395,9 +2345,6 @@ static int bcmgenet_init_rx_queues(struct net_device *dev) ring_cfg |= (1 << DESC_INDEX); dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); - /* Initialize Rx NAPI */ - bcmgenet_init_rx_napi(priv); - /* Enable rings */ bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); @@ -2486,9 +2433,6 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); - /* disable DMA */ - bcmgenet_dma_teardown(priv); - for (i = 0; i < priv->num_tx_bds; i++) { cb = priv->tx_cbs + i; skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb); @@ -2571,27 +2515,20 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) /* Interrupt bottom half */ static void bcmgenet_irq_task(struct work_struct *work) { - unsigned long flags; unsigned int status; struct bcmgenet_priv *priv = container_of( work, struct bcmgenet_priv, bcmgenet_irq_work); netif_dbg(priv, intr, priv->dev, "%s\n", __func__); - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irq(&priv->lock); status = priv->irq0_stat; priv->irq0_stat = 0; - spin_unlock_irqrestore(&priv->lock, flags); - - if (status & UMAC_IRQ_MPD_R) { - netif_dbg(priv, wol, priv->dev, - "magic packet detected, waking up\n"); - bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); - } + spin_unlock_irq(&priv->lock); /* Link UP/DOWN event */ if (status & UMAC_IRQ_LINK_EVENT) - phy_mac_interrupt(priv->phydev, + phy_mac_interrupt(priv->dev->phydev, !!(status & UMAC_IRQ_LINK_UP)); } @@ -2679,23 +2616,13 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } } - if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | - UMAC_IRQ_PHY_DET_F | - UMAC_IRQ_LINK_EVENT | - UMAC_IRQ_HFB_SM | - UMAC_IRQ_HFB_MM)) { - /* all other interested interrupts handled in bottom half */ - schedule_work(&priv->bcmgenet_irq_work); - } - if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { wake_up(&priv->wq); } /* all other interested interrupts handled in bottom half */ - status &= (UMAC_IRQ_LINK_EVENT | - UMAC_IRQ_MPD_R); + status &= UMAC_IRQ_LINK_EVENT; if (status) { /* Save irq status for bottom-half processing. */ spin_lock_irqsave(&priv->lock, flags); @@ -2830,16 +2757,16 @@ static void bcmgenet_netif_start(struct net_device *dev) /* Start the network engine */ bcmgenet_enable_rx_napi(priv); - bcmgenet_enable_tx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); netif_tx_start_all_queues(dev); + bcmgenet_enable_tx_napi(priv); /* Monitor link interrupts now */ bcmgenet_link_intr_enable(priv); - phy_start(priv->phydev); + phy_start(dev->phydev); } static int bcmgenet_open(struct net_device *dev) @@ -2863,12 +2790,7 @@ static int bcmgenet_open(struct net_device *dev) /* take MAC out of reset */ bcmgenet_umac_reset(priv); - ret = init_umac(priv); - if (ret) - goto err_clk_disable; - - /* disable ethernet MAC while updating its registers */ - umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); + init_umac(priv); /* Make sure we reflect the value of CRC_CMD_FWD */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); @@ -2927,6 +2849,7 @@ err_irq1: err_irq0: free_irq(priv->irq0, priv); err_fini_dma: + bcmgenet_dma_teardown(priv); bcmgenet_fini_dma(priv); err_clk_disable: if (priv->internal_phy) @@ -2939,11 +2862,20 @@ static void bcmgenet_netif_stop(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + bcmgenet_disable_tx_napi(priv); netif_tx_stop_all_queues(dev); - phy_stop(priv->phydev); - bcmgenet_intr_disable(priv); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + phy_stop(dev->phydev); bcmgenet_disable_rx_napi(priv); - bcmgenet_disable_tx_napi(priv); + bcmgenet_intr_disable(priv); /* Wait for pending work items to complete. Since interrupts are * disabled no new work will be scheduled. @@ -2954,33 +2886,23 @@ static void bcmgenet_netif_stop(struct net_device *dev) priv->old_speed = -1; priv->old_duplex = -1; priv->old_pause = -1; + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); } static int bcmgenet_close(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - int ret; + int ret = 0; netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); bcmgenet_netif_stop(dev); /* Really kill the PHY state machine and disconnect from it */ - phy_disconnect(priv->phydev); - - /* Disable MAC receive */ - umac_enable_set(priv, CMD_RX_EN, false); - - ret = bcmgenet_dma_teardown(priv); - if (ret) - return ret; - - /* Disable MAC transmit. TX DMA disabled must be done before this */ - umac_enable_set(priv, CMD_TX_EN, false); - - /* tx reclaim */ - bcmgenet_tx_reclaim_all(dev); - bcmgenet_fini_dma(priv); + phy_disconnect(dev->phydev); free_irq(priv->irq0, priv); free_irq(priv->irq1, priv); @@ -2999,7 +2921,6 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) u32 p_index, c_index, intsts, intmsk; struct netdev_queue *txq; unsigned int free_bds; - unsigned long flags; bool txq_stopped; if (!netif_msg_tx_err(priv)) @@ -3007,7 +2928,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) txq = netdev_get_tx_queue(priv->dev, ring->queue); - spin_lock_irqsave(&ring->lock, flags); + spin_lock(&ring->lock); if (ring->index == DESC_INDEX) { intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; @@ -3019,7 +2940,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); txq_stopped = netif_tx_queue_stopped(txq); free_bds = ring->free_bds; - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock(&ring->lock); netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" "TX queue status: %s, interrupts: %s\n" @@ -3545,9 +3466,7 @@ static int bcmgenet_probe(struct platform_device *pdev) !strcasecmp(phy_mode_str, "internal")) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - err = reset_umac(priv); - if (err) - goto err_clk_disable; + reset_umac(priv); err = bcmgenet_mii_init(dev); if (err) @@ -3595,7 +3514,7 @@ static int bcmgenet_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); - int ret; + int ret = 0; if (!netif_running(dev)) return 0; @@ -3603,24 +3522,10 @@ static int bcmgenet_suspend(struct device *d) bcmgenet_netif_stop(dev); if (!device_may_wakeup(d)) - phy_suspend(priv->phydev); + phy_suspend(dev->phydev); netif_device_detach(dev); - /* Disable MAC receive */ - umac_enable_set(priv, CMD_RX_EN, false); - - ret = bcmgenet_dma_teardown(priv); - if (ret) - return ret; - - /* Disable MAC transmit. TX DMA disabled must be done before this */ - umac_enable_set(priv, CMD_TX_EN, false); - - /* tx reclaim */ - bcmgenet_tx_reclaim_all(dev); - bcmgenet_fini_dma(priv); - /* Prepare the device for Wake-on-LAN and switch to the slow clock */ if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); @@ -3659,20 +3564,16 @@ static int bcmgenet_resume(struct device *d) bcmgenet_umac_reset(priv); - ret = init_umac(priv); - if (ret) - goto out_clk_disable; + init_umac(priv); /* From WOL-enabled suspend, switch to regular clock */ if (priv->wolopts) clk_disable_unprepare(priv->clk_wol); - phy_init_hw(priv->phydev); - /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev); + phy_init_hw(dev->phydev); - /* disable ethernet MAC while updating its registers */ - umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); + /* Speed settings must be restored */ + bcmgenet_mii_config(priv->dev, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); @@ -3701,7 +3602,7 @@ static int bcmgenet_resume(struct device *d) netif_device_attach(dev); if (!device_may_wakeup(d)) - phy_resume(priv->phydev); + phy_resume(dev->phydev); if (priv->eee.eee_enabled) bcmgenet_eee_enable_set(dev, true); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b9344de669f8..3c50431ccd2a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -617,7 +617,6 @@ struct bcmgenet_priv { /* MDIO bus variables */ wait_queue_head_t wq; - struct phy_device *phydev; bool internal_phy; struct device_node *phy_dn; struct device_node *mdio_dn; @@ -657,6 +656,7 @@ struct bcmgenet_priv { struct clk *clk; struct platform_device *pdev; + struct platform_device *mii_pdev; /* WOL */ struct clk *clk_wol; @@ -671,12 +671,21 @@ struct bcmgenet_priv { static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ u32 off) \ { \ - return __raw_readl(priv->base + offset + off); \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ } \ static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ u32 val, u32 off) \ { \ - __raw_writel(val, priv->base + offset + off); \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ } GENET_IO_MACRO(ext, GENET_EXT_OFF); @@ -698,10 +707,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); -void bcmgenet_mii_reset(struct net_device *dev); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 071fcbd14e6a..5333274a283c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -24,69 +24,17 @@ #include <linux/of_net.h> #include <linux/of_mdio.h> #include <linux/platform_data/bcmgenet.h> +#include <linux/platform_data/mdio-bcm-unimac.h> #include "bcmgenet.h" -/* read a value from the MII */ -static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) -{ - int ret; - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); - /* Start MDIO transaction*/ - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) - & MDIO_START_BUSY), - HZ / 100); - ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - - /* Some broken devices are known not to release the line during - * turn-around, e.g: Broadcom BCM53125 external switches, so check for - * that condition here and ignore the MDIO controller read failure - * indication. - */ - if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL)) - return -EIO; - - return ret & 0xffff; -} - -/* write a value to the MII */ -static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, - int location, u16 val) -{ - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT) | (0xffff & val)), - UMAC_MDIO_CMD); - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & - MDIO_START_BUSY), - HZ / 100); - - return 0; -} - /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ void bcmgenet_mii_setup(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; bool status_changed = false; @@ -173,22 +121,6 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev, return 0; } -/* Perform a voluntary PHY software reset, since the EPHY is very finicky about - * not doing it and will start corrupting packets - */ -void bcmgenet_mii_reset(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - - if (GENET_IS_V4(priv)) - return; - - if (priv->phydev) { - phy_init_hw(priv->phydev); - phy_start_aneg(priv->phydev); - } -} - void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -234,14 +166,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) } if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) - fixed_phy_set_link_update(priv->phydev, + fixed_phy_set_link_update(priv->dev->phydev, bcmgenet_fixed_phy_link_update); } -int bcmgenet_mii_config(struct net_device *dev) +int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; struct device *kdev = &priv->pdev->dev; const char *phy_name = NULL; u32 id_mode_dis = 0; @@ -288,7 +220,7 @@ int bcmgenet_mii_config(struct net_device *dev) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if ((priv->phydev->supported & PHY_BASIC_FEATURES) == + if ((dev->phydev->supported & PHY_BASIC_FEATURES) == PHY_BASIC_FEATURES) port_ctrl = PORT_MODE_EXT_RVMII_25; else @@ -327,7 +259,8 @@ int bcmgenet_mii_config(struct net_device *dev) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } - dev_info_once(kdev, "configuring instance for %s\n", phy_name); + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); return 0; } @@ -357,7 +290,7 @@ int bcmgenet_mii_probe(struct net_device *dev) return -ENODEV; } } else { - phydev = priv->phydev; + phydev = dev->phydev; phydev->dev_flags = phy_flags; ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, @@ -368,16 +301,14 @@ int bcmgenet_mii_probe(struct net_device *dev) } } - priv->phydev = phydev; - /* Configure port multiplexer based on what the probed PHY device since * reading the 'max-speed' property determines the maximum supported * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev); + ret = bcmgenet_mii_config(dev, true); if (ret) { - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); return ret; } @@ -387,109 +318,126 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->phydev->irq = PHY_IGNORE_INTERRUPT; + dev->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; } -/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with - * their internal MDIO management controller making them fail to successfully - * be read from or written to for the first transaction. We insert a dummy - * BMSR read here to make sure that phy_get_device() and get_phy_id() can - * correctly read the PHY MII_PHYSID1/2 registers and successfully register a - * PHY device for this peripheral. - * - * Once the PHY driver is registered, we can workaround subsequent reads from - * there (e.g: during system-wide power management). - * - * bus->reset is invoked before mdiobus_scan during mdiobus_register and is - * therefore the right location to stick that workaround. Since we do not want - * to read from non-existing PHYs, we either use bus->phy_mask or do a manual - * Device Tree scan to limit the search area. - */ -static int bcmgenet_mii_bus_reset(struct mii_bus *bus) +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) { - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *np = priv->mdio_dn; - struct device_node *child = NULL; - u32 read_mask = 0; - int addr = 0; + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; - if (!np) { - read_mask = 1 << priv->phy_addr; - } else { - for_each_available_child_of_node(np, child) { - addr = of_mdio_parse_addr(&dev->dev, child); - if (addr < 0) - continue; + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; - read_mask |= 1 << addr; - } + priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; } - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) { - dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr); - mdiobus_read(bus, addr, MII_BMSR); - } + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; } +} +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); return 0; } -static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) { - struct mii_bus *bus; + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; - if (priv->mii_bus) - return 0; + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - pr_err("failed to allocate\n"); + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) return -ENOMEM; - } - bus = priv->mii_bus; - bus->priv = priv->dev; - bus->name = "bcmgenet MII bus"; - bus->parent = &priv->pdev->dev; - bus->read = bcmgenet_mii_read; - bus->write = bcmgenet_mii_write; - bus->reset = bcmgenet_mii_bus_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->pdev->name, priv->pdev->id); + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; return 0; +out: + platform_device_put(ppdev); + return ret; } static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; - struct phy_device *phydev = NULL; - char *compat; + struct phy_device *phydev; int phy_mode; int ret; - compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); - if (!compat) - return -ENOMEM; - - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); - kfree(compat); - if (!priv->mdio_dn) { - dev_err(kdev, "unable to find MDIO bus node\n"); - return -ENODEV; - } - - ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); @@ -536,33 +484,23 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; struct bcmgenet_platform_data *pd = kdev->platform_data; - struct mii_bus *mdio = priv->mii_bus; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; struct phy_device *phydev; - int ret; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + /* * Internal or external PHY with MDIO access */ - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - mdio->phy_mask = ~(1 << pd->phy_address); - else - mdio->phy_mask = 0; - - ret = mdiobus_register(mdio); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdiobus_get_phy(mdio, pd->phy_address); - else - phydev = phy_find_first(mdio); - + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); if (!phydev) { dev_err(kdev, "failed to register PHY device\n"); - mdiobus_unregister(mdio); return -ENODEV; } } else { @@ -589,7 +527,6 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) } - priv->phydev = phydev; priv->phy_interface = pd->phy_interface; return 0; @@ -608,10 +545,9 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *dn = priv->pdev->dev.of_node; int ret; - ret = bcmgenet_mii_alloc(priv); + ret = bcmgenet_mii_register(priv); if (ret) return ret; @@ -622,11 +558,7 @@ int bcmgenet_mii_init(struct net_device *dev) return 0; out: - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); - of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + bcmgenet_mii_exit(dev); return ret; } @@ -638,6 +570,5 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + platform_device_unregister(priv->mii_pdev); } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 16a0f192daec..ecdef42f0ae6 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1367,15 +1367,11 @@ static int sbmac_initctx(struct sbmac_softc *s) static void sbdma_uninitctx(struct sbmacdma *d) { - if (d->sbdma_dscrtable_unaligned) { - kfree(d->sbdma_dscrtable_unaligned); - d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; - } + kfree(d->sbdma_dscrtable_unaligned); + d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; - if (d->sbdma_ctxtable) { - kfree(d->sbdma_ctxtable); - d->sbdma_ctxtable = NULL; - } + kfree(d->sbdma_ctxtable); + d->sbdma_ctxtable = NULL; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d600c41fb1dc..de51c2177d03 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6587,7 +6587,7 @@ static void tg3_tx(struct tg3_napi *tnapi) pkts_compl++; bytes_compl += skb->len; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -7829,7 +7829,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, } } - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); *pskb = new_skb; return ret; } @@ -7882,7 +7882,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, } while (segs); tg3_tso_bug_end: - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } @@ -8543,7 +8543,7 @@ static void tg3_free_rings(struct tg3 *tp) tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags - 1); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); } @@ -10931,9 +10931,9 @@ static void tg3_chk_missed_msi(struct tg3 *tp) } } -static void tg3_timer(unsigned long __opaque) +static void tg3_timer(struct timer_list *t) { - struct tg3 *tp = (struct tg3 *) __opaque; + struct tg3 *tp = from_timer(tp, t, timer); spin_lock(&tp->lock); @@ -11087,9 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp) tp->asf_multiplier = (HZ / tp->timer_offset) * TG3_FW_UPDATE_FREQ_SEC; - init_timer(&tp->timer); - tp->timer.data = (unsigned long) tp; - tp->timer.function = tg3_timer; + timer_setup(&tp->timer, tg3_timer, 0); } static void tg3_timer_start(struct tg3 *tp) @@ -11536,11 +11534,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; err = tg3_request_irq(tp, i); if (err) { for (i--; i >= 0; i--) { - tnapi = &tp->napi[i]; + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); } goto out_napi_fini; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 3b5e98ecba00..c2d02d02d1e6 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $ * tg3.h: Definitions for Broadcom Tigon3 ethernet driver. * |

