diff options
Diffstat (limited to 'drivers/net/ethernet')
119 files changed, 2334 insertions, 546 deletions
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index 48bc7fa0258c..3044a6f35f04 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -1046,6 +1046,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = phy; + /* fall through */ case SIOCGMIIREG: /* Read the specified MII register. */ { int saved_window; diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index eae9827035dc..bcad4a7fac9f 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1107,6 +1107,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; + /* fall through */ case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 5417e4da64ca..1c1ddd891ca3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -517,7 +517,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) rc = ena_alloc_rx_page(rx_ring, rx_info, - __GFP_COLD | GFP_ATOMIC | __GFP_COMP); + GFP_ATOMIC | __GFP_COMP); if (unlikely(rc < 0)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "failed to alloc buffer for rx queue %d\n", @@ -2579,6 +2579,7 @@ static int ena_restore_device(struct ena_adapter *adapter) bool wd_state; int rc; + set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "Can not initialize device\n"); @@ -2592,6 +2593,11 @@ static int ena_restore_device(struct ena_adapter *adapter) goto err_device_destroy; } + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); + /* Make sure we don't have a race with AENQ Links state handler */ + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc) { @@ -2618,7 +2624,7 @@ err_device_destroy: ena_com_admin_destroy(ena_dev); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); - + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); dev_err(&pdev->dev, "Reset attempt failed. Can not reset the device\n"); @@ -3495,7 +3501,8 @@ static void ena_update_on_link_change(void *adapter_data, if (status) { netdev_dbg(adapter->netdev, "%s\n", __func__); set_bit(ENA_FLAG_LINK_UP, &adapter->flags); - netif_carrier_on(adapter->netdev); + if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) + netif_carrier_on(adapter->netdev); } else { clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); netif_carrier_off(adapter->netdev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index ed8bd0a579c4..3bbc003871de 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -272,7 +272,8 @@ enum ena_flags_t { ENA_FLAG_DEV_UP, ENA_FLAG_LINK_UP, ENA_FLAG_MSIX_ENABLED, - ENA_FLAG_TRIGGER_RESET + ENA_FLAG_TRIGGER_RESET, + ENA_FLAG_ONGOING_RESET }; /* adapter specific private data structure */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 45d92304068e..cc1e4f820e64 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -295,7 +295,7 @@ again: order = alloc_order; /* Try to obtain pages, decreasing order if necessary */ - gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN; + gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; while (order >= 0) { pages = alloc_pages_node(node, gfp, order); if (pages) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3d53153ce751..a74a8fbad53a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2206,7 +2206,7 @@ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, struct tc_mqprio_qopt *mqprio = type_data; u8 tc; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig index cdf78e069a39..7d623e90dc19 100644 --- a/drivers/net/ethernet/aquantia/Kconfig +++ b/drivers/net/ethernet/aquantia/Kconfig @@ -9,7 +9,7 @@ config NET_VENDOR_AQUANTIA Set this to y if you have an Ethernet network cards that uses the aQuantia AQC107/AQC108 chipset. - This option does not build any drivers; it casues the aQuantia + This option does not build any drivers; it causes the aQuantia drivers that can be built to appear in the list of Ethernet drivers. diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 0654e0c76bc2..519ca6534b85 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -304,8 +304,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff->flags = 0U; buff->len = AQ_CFG_RX_FRAME_MAX; - buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | - __GFP_COMP, pages_order); + buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order); if (!buff->page) { err = -ENOMEM; goto err_exit; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index d937083db9a4..894eda5b13cf 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -131,6 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev) switch (bgmac->net_dev->phydev->speed) { default: netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n"); + /* fall through */ case SPEED_1000: val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT; break; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 48d672b204a4..1d96cd594ade 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -15,6 +15,7 @@ #include <linux/bcm47xx_nvram.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <net/dsa.h> #include "bgmac.h" static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, @@ -127,6 +128,8 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, dma_desc->ctl1 = cpu_to_le32(ctl1); } +#define ENET_BRCM_TAG_LEN 4 + static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) @@ -139,6 +142,18 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, u32 flags; int i; + /* The Ethernet switch we are interfaced with needs packets to be at + * least 64 bytes (including FCS) otherwise they will be discarded when + * they enter the switch port logic. When Broadcom tags are enabled, we + * need to make sure that packets are at least 68 bytes + * (including FCS and tag) because the length verification is done after + * the Broadcom tag is stripped off the ingress packet. + */ + if (netdev_uses_dsa(net_dev)) { + if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) + goto err_stats; + } + if (skb->len > BGMAC_DESC_CTL1_LEN) { netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; @@ -225,6 +240,7 @@ err_dma_head: err_drop: dev_kfree_skb(skb); +err_stats: net_dev->stats.tx_dropped++; net_dev->stats.tx_errors++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1216c1f1e052..4c739d5355d2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4289,7 +4289,7 @@ int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, { struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 54d1571384a0..be9fd7d184d0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9332,7 +9332,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); - else + else if (bp->slowpath) bnx2x_set_storm_rx_mode(bp); /* Cleanup multicast configuration */ @@ -10271,8 +10271,15 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) smp_mb(); bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); - bnx2x_nic_load(bp, LOAD_NORMAL); - + /* When ret value shows failure of allocation failure, + * the nic is rebooted again. If open still fails, a error + * message to notify the user. + */ + if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + if (bnx2x_nic_load(bp, LOAD_NORMAL)) + BNX2X_ERR("Open the NIC fails again!\n"); + } rtnl_unlock(); return; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 96416f5d97f3..33c49ad697e4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4915,16 +4915,14 @@ hwrm_ver_get_exit: int bnxt_hwrm_fw_set_time(struct bnxt *bp) { -#if IS_ENABLED(CONFIG_RTC_LIB) struct hwrm_fw_set_time_input req = {0}; - struct rtc_time tm; - struct timeval tv; + struct tm tm; + time64_t now = ktime_get_real_seconds(); if (bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; - do_gettimeofday(&tv); - rtc_time_to_tm(tv.tv_sec, &tm); + time64_to_tm(now, 0, &tm); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); req.year = cpu_to_le16(1900 + tm.tm_year); req.month = 1 + tm.tm_mon; @@ -4933,9 +4931,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) req.minute = tm.tm_min; req.second = tm.tm_sec; return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -#else - return -EOPNOTSUPP; -#endif } static int bnxt_hwrm_port_qstats(struct bnxt *bp) @@ -7388,7 +7383,7 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return bnxt_setup_tc_block(dev, type_data); - case TC_SETUP_MQPRIO: { + case TC_SETUP_QDISC_MQPRIO: { struct tc_mqprio_qopt *mqprio = type_data; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index b6aa7db99705..69186d188c43 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -148,7 +148,6 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, return tcf_block_cb_register(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep, vf_rep); - return 0; case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5dafcde67e45..72a67f74b97b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -611,6 +611,9 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); err_out_free_mdiobus: + of_node_put(bp->phy_node); + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); err_out: return err; @@ -3550,6 +3553,9 @@ static int macb_probe(struct platform_device *pdev) err_out_unregister_mdio: phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); + of_node_put(bp->phy_node); + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); /* Shutdown the PHY if there is a GPIO reset */ @@ -3572,6 +3578,7 @@ static int macb_remove(struct platform_device *pdev) { struct net_device *dev; struct macb *bp; + struct device_node *np = pdev->dev.of_node; dev = platform_get_drvdata(pdev); @@ -3580,6 +3587,8 @@ static int macb_remove(struct platform_device *pdev) if (dev->phydev) phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); dev->phydev = NULL; mdiobus_free(bp->mii_bus); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index f05045a69dcc..6aa0eee88ea5 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -4038,7 +4038,8 @@ static int liquidio_init_nic_module(struct octeon_device *oct) */ if (!oct->octeon_id && oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { - if (lio_vf_rep_modinit()) { + retval = lio_vf_rep_modinit(); + if (retval) { liquidio_stop_nic_module(oct); goto octnet_init_failure; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 433f3619de8f..f2d1a076a038 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -198,7 +198,7 @@ static inline void struct sk_buff *skb; struct octeon_skb_page_info *skb_pg_info; - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = alloc_page(GFP_ATOMIC); if (unlikely(!page)) return NULL; diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 988c06a28e5e..8f1dd55b3e08 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -361,17 +361,8 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) } } -static void nic_free_lmacmem(struct nicpf *nic) +static void nic_get_hw_info(struct nicpf *nic) { - kfree(nic->vf_lmac_map); - kfree(nic->link); - kfree(nic->duplex); - kfree(nic->speed); -} - -static int nic_get_hw_info(struct nicpf *nic) -{ - u8 max_lmac; u16 sdevid; struct hw_info *hw = nic->hw; @@ -419,41 +410,16 @@ static int nic_get_hw_info(struct nicpf *nic) break; } hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev); - - /* Allocate memory for LMAC tracking elements */ - max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX; - nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->vf_lmac_map) - goto error; - nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->link) - goto error; - nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->duplex) - goto error; - nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL); - if (!nic->speed) - goto error; - return 0; - -error: - nic_free_lmacmem(nic); - return -ENOMEM; } #define BGX0_BLOCK 8 #define BGX1_BLOCK 9 -static int nic_init_hw(struct nicpf *nic) +static void nic_init_hw(struct nicpf *nic) { - int i, err; + int i; u64 cqm_cfg; - /* Get HW capability info */ - err = nic_get_hw_info(nic); - if (err) - return err; - /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -498,8 +464,6 @@ static int nic_init_hw(struct nicpf *nic) cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); - - return 0; } /* Channel parse index configuration */ @@ -1269,6 +1233,7 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct nicpf *nic; + u8 max_lmac; int err; BUILD_BUG_ON(sizeof(union nic_mbx) > 16); @@ -1278,10 +1243,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL); - if (!nic->hw) { - devm_kfree(dev, nic); + if (!nic->hw) return -ENOMEM; - } pci_set_drvdata(pdev, nic); @@ -1322,11 +1285,33 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->node = nic_get_node_id(pdev); - /* Initialize hardware */ - err = nic_init_hw(nic); - if (err) + /* Get HW capability info */ + nic_get_hw_info(nic); + + /* Allocate memory for LMAC tracking elements */ + err = -ENOMEM; + max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; + + nic->vf_lmac_map = devm_kmalloc_array(dev, max_lmac, sizeof(u8), + GFP_KERNEL); + if (!nic->vf_lmac_map) + goto err_release_regions; + + nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->link) goto err_release_regions; + nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->duplex) + goto err_release_regions; + + nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL); + if (!nic->speed) + goto err_release_regions; + + /* Initialize hardware */ + nic_init_hw(nic); + nic_set_lmac_vf_mapping(nic); /* Register interrupts */ @@ -1360,9 +1345,6 @@ err_unregister_interrupts: err_release_regions: pci_release_regions(pdev); err_disable_device: - nic_free_lmacmem(nic); - devm_kfree(dev, nic->hw); - devm_kfree(dev, nic); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; @@ -1384,10 +1366,6 @@ static void nic_remove(struct pci_dev *pdev) nic_unregister_interrupts(nic); pci_release_regions(pdev); - nic_free_lmacmem(nic); - devm_kfree(&pdev->dev, nic->hw); - devm_kfree(&pdev->dev, nic); - pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h index 705713b56636..3c3e6cf6aca6 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h +++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h @@ -60,7 +60,7 @@ struct t3cdev { int (*ctl)(struct t3cdev *dev, unsigned int req, void *data); void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh); void *priv; /* driver private data */ - void *l2opt; /* optional layer 2 data */ + void __rcu *l2opt; /* optional layer 2 data */ void *l3opt; /* optional layer 3 data */ void *l4opt; /* optional layer 4 data */ void *ulp; /* ulp stuff */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 239c43084e77..605689957496 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -145,6 +145,14 @@ struct cudbg_tid_info_region_rev1 { u32 reserved[16]; }; +#define CUDBG_MAX_FL_QIDS 1024 + +struct cudbg_ch_cntxt { + u32 cntxt_type; + u32 cntxt_id; + u32 data[SGE_CTXT_SIZE / 4]; +}; + #define CUDBG_MAX_RPLC_SIZE 128 struct cudbg_mps_tcam { @@ -166,6 +174,12 @@ struct cudbg_mps_tcam { u8 reserved[2]; }; +#define CUDBG_VPD_PF_SIZE 0x800 +#define CUDBG_SCFG_VER_ADDR 0x06 +#define CUDBG_SCFG_VER_LEN 4 +#define CUDBG_VPD_VER_ADDR 0x18c7 +#define CUDBG_VPD_VER_LEN 2 + struct cudbg_vpd_data { u8 sn[SERNUM_LEN + 1]; u8 bn[PN_LEN + 1]; @@ -179,6 +193,36 @@ struct cudbg_vpd_data { u32 vpd_vers; }; +#define CUDBG_MAX_TCAM_TID 0x800 + +enum cudbg_le_entry_types { + LE_ET_UNKNOWN = 0, + LE_ET_TCAM_CON = 1, + LE_ET_TCAM_SERVER = 2, + LE_ET_TCAM_FILTER = 3, + LE_ET_TCAM_CLIP = 4, + LE_ET_TCAM_ROUTING = 5, + LE_ET_HASH_CON = 6, + LE_ET_INVALID_TID = 8, +}; + +struct cudbg_tcam { + u32 filter_start; + u32 server_start; + u32 clip_start; + u32 routing_start; + u32 tid_hash_base; + u32 max_tid; +}; + +struct cudbg_tid_data { + u32 tid; + u32 dbig_cmd; + u32 dbig_conf; + u32 dbig_rsp_stat; + u32 data[NUM_LE_DB_DBGI_RSP_DATA_INSTANCES]; +}; + #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index e484c514e9ae..e10ff1ee62c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -63,8 +63,10 @@ enum cudbg_dbg_entity_type { CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_TID_INFO = 54, + CUDBG_DUMP_CONTEXT = 56, CUDBG_MPS_TCAM = 57, CUDBG_VPD_DATA = 58, + CUDBG_LE_TCAM = 59, CUDBG_CCTRL = 60, CUDBG_MA_INDIRECT = 61, CUDBG_ULPTX_LA = 62, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index fe3a9ef0ec3f..d699bf88d18f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -68,6 +68,22 @@ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) (sizeof(struct cudbg_entity_hdr) * (i - 1))); } +static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len, + void *dest) +{ + int vaddr, rc; + + vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE); + if (vaddr < 0) + return vaddr; + + rc = pci_read_vpd(padap->pdev, vaddr, len, dest); + if (rc < 0) + return rc; + + return 0; +} + int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -1099,6 +1115,84 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, return rc; } +int cudbg_dump_context_size(struct adapter *padap) +{ + u32 value, size; + u8 flq; + + value = t4_read_reg(padap, SGE_FLM_CFG_A); + + /* Get number of data freelist queues */ + flq = HDRSTARTFLQ_G(value); + size = CUDBG_MAX_FL_QIDS >> flq; + + /* Add extra space for congestion manager contexts. + * The number of CONM contexts are same as number of freelist + * queues. + */ + size += size; + return size * sizeof(struct cudbg_ch_cntxt); +} + +static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, + enum ctxt_type ctype, u32 *data) +{ + struct adapter *padap = pdbg_init->adap; + int rc = -1; + + /* Under heavy traffic, the SGE Queue contexts registers will be + * frequently accessed by firmware. + * + * To avoid conflicts with firmware, always ask firmware to fetch + * the SGE Queue contexts via mailbox. On failure, fallback to + * accessing hardware registers directly. + */ + if (is_fw_attached(pdbg_init)) + rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data); + if (rc) + t4_sge_ctxt_rd_bd(padap, cid, ctype, data); +} + +int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_ch_cntxt *buff; + u32 size, i = 0; + int rc; + + rc = cudbg_dump_context_size(padap); + if (rc <= 0) + return CUDBG_STATUS_ENTITY_NOT_FOUND; + + size = rc; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + buff = (struct cudbg_ch_cntxt *)temp_buff.data; + while (size > 0) { + buff->cntxt_type = CTXT_FLM; + buff->cntxt_id = i; + cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data); + buff++; + size -= sizeof(struct cudbg_ch_cntxt); + + buff->cntxt_type = CTXT_CNM; + buff->cntxt_id = i; + cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data); + buff++; + size -= sizeof(struct cudbg_ch_cntxt); + + i++; + } + + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) { *mask = x | y; @@ -1289,8 +1383,47 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + char vpd_str[CUDBG_VPD_VER_LEN + 1]; + u32 scfg_vers, vpd_vers, fw_vers; struct cudbg_vpd_data *vpd_data; - int rc; + struct vpd_params vpd = { 0 }; + int rc, ret; + + rc = t4_get_raw_vpd_params(padap, &vpd); + if (rc) + return rc; + + rc = t4_get_fw_version(padap, &fw_vers); + if (rc) + return rc; + + /* Serial Configuration Version is located beyond the PF's vpd size. + * Temporarily give access to entire EEPROM to get it. + */ + rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE); + if (rc < 0) + return rc; + + ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN, + &scfg_vers); + + /* Restore back to original PF's vpd size */ + rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE); + if (rc < 0) + return rc; + + if (ret) + return ret; + + rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN, + vpd_str); + if (rc) + return rc; + + vpd_str[CUDBG_VPD_VER_LEN] = '\0'; + rc = kstrtouint(vpd_str, 0, &vpd_vers); + if (rc) + return rc; rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data), &temp_buff); @@ -1298,16 +1431,191 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, return rc; vpd_data = (struct cudbg_vpd_data *)temp_buff.data; - memcpy(vpd_data->sn, padap->params.vpd.sn, SERNUM_LEN + 1); - memcpy(vpd_data->bn, padap->params.vpd.pn, PN_LEN + 1); - memcpy(vpd_data->na, padap->params.vpd.na, MACADDR_LEN + 1); - memcpy(vpd_data->mn, padap->params.vpd.id, ID_LEN + 1); - vpd_data->scfg_vers = padap->params.scfg_vers; - vpd_data->vpd_vers = padap->params.vpd_vers; - vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(padap->params.fw_vers); - vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(padap->params.fw_vers); - vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(padap->params.fw_vers); - vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(padap->params.fw_vers); + memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1); + memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1); + memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1); + memcpy(vpd_data->mn, vpd.id, ID_LEN + 1); + vpd_data->scfg_vers = scfg_vers; + vpd_data->vpd_vers = vpd_vers; + vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers); + vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers); + vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers); + vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, + struct cudbg_tid_data *tid_data) +{ + struct adapter *padap = pdbg_init->adap; + int i, cmd_retry = 8; + u32 val; + + /* Fill REQ_DATA regs with 0's */ + for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++) + t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0); + + /* Write DBIG command */ + val = DBGICMD_V(4) | DBGITID_V(tid); + t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val); + tid_data->dbig_cmd = val; + + val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */ + t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val); + tid_data->dbig_conf = val; + + /* Poll the DBGICMDBUSY bit */ + val = 1; + while (val) { + val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A); + val = val & DBGICMDBUSY_F; + cmd_retry--; + if (!cmd_retry) + return CUDBG_SYSTEM_ERROR; + } + + /* Check RESP status */ + val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A); + tid_data->dbig_rsp_stat = val; + if (!(val & 1)) + return CUDBG_SYSTEM_ERROR; + + /* Read RESP data */ + for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++) + tid_data->data[i] = t4_read_reg(padap, + LE_DB_DBGI_RSP_DATA_A + + (i << 2)); + tid_data->tid = tid; + return 0; +} + +static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region) +{ + int type = LE_ET_UNKNOWN; + + if (tid < tcam_region.server_start) + type = LE_ET_TCAM_CON; + else if (tid < tcam_region.filter_start) + type = LE_ET_TCAM_SERVER; + else if (tid < tcam_region.clip_start) + type = LE_ET_TCAM_FILTER; + else if (tid < tcam_region.routing_start) + type = LE_ET_TCAM_CLIP; + else if (tid < tcam_region.tid_hash_base) + type = LE_ET_TCAM_ROUTING; + else if (tid < tcam_region.max_tid) + type = LE_ET_HASH_CON; + else + type = LE_ET_INVALID_TID; + + return type; +} + +static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data, + struct cudbg_tcam tcam_region) +{ + int ipv6 = 0; + int le_type; + + le_type = cudbg_get_le_type(tid_data->tid, tcam_region); + if (tid_data->tid & 1) + return 0; + + if (le_type == LE_ET_HASH_CON) { + ipv6 = tid_data->data[16] & 0x8000; + } else if (le_type == LE_ET_TCAM_CON) { + ipv6 = tid_data->data[16] & 0x8000; + if (ipv6) + ipv6 = tid_data->data[9] == 0x00C00000; + } else { + ipv6 = 0; + } + return ipv6; +} + +void cudbg_fill_le_tcam_info(struct adapter *padap, + struct cudbg_tcam *tcam_region) +{ + u32 value; + + /* Get the LE regions */ + value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */ + tcam_region->tid_hash_base = value; + + /* Get routing table index */ + value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A); + tcam_region->routing_start = value; + + /*Get clip table index */ + value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); + tcam_region->clip_start = value; + + /* Get filter table index */ + value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A); + tcam_region->filter_start = value; + + /* Get server table index */ + value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A); + tcam_region->server_start = value; + + /* Check whether hash is enabled and calculate the max tids */ + value = t4_read_reg(padap, LE_DB_CONFIG_A); + if ((value >> HASHEN_S) & 1) { + value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A); + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + tcam_region->max_tid = (value & 0xFFFFF) + + tcam_region->tid_hash_base; + } else { + value = HASHTIDSIZE_G(value); + value = 1 << value; + tcam_region->max_tid = value + + tcam_region->tid_hash_base; + } + } else { /* hash not enabled */ + tcam_region->max_tid = CUDBG_MAX_TCAM_TID; + } +} + +int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_tcam tcam_region = { 0 }; + struct cudbg_tid_data *tid_data; + u32 bytes = 0; + int rc, size; + u32 i; + + cudbg_fill_le_tcam_info(padap, &tcam_region); + + size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; + size += sizeof(struct cudbg_tcam); + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam)); + bytes = sizeof(struct cudbg_tcam); + tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes); + /* read all tid */ + for (i = 0; i < tcam_region.max_tid; ) { + rc = cudbg_read_tid(pdbg_init, i, tid_data); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + + /* ipv6 takes two tids */ + cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++; + + tid_data++; + bytes += sizeof(struct cudbg_tid_data); + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); return rc; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 230ba88a6a81..caeee8e33e86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -123,12 +123,18 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_tid(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -155,4 +161,9 @@ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); u32 cudbg_cim_obq_size(struct adapter *padap, int qid); +int cudbg_dump_context_size(struct adapter *padap); + +struct cudbg_tcam; +void cudbg_fill_le_tcam_info(struct adapter *padap, + struct cudbg_tcam *tcam_region); #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0c83ceb5a1a6..6f9fa6e3c42a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1459,6 +1459,7 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, unsigned int t4_get_regs_len(struct adapter *adapter); void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); +int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, bool enable); int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); @@ -1669,6 +1670,10 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]); void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok); +int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, + enum ctxt_type ctype, u32 *data); +int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, + enum ctxt_type ctype, u32 *data); int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 7373617da635..29cc625e9833 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -60,8 +60,10 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_TID_INFO, cudbg_collect_tid }, + { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context }, { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, { CUDBG_VPD_DATA, cudbg_collect_vpd_data }, + { CUDBG_LE_TCAM, cudbg_collect_le_tcam }, { CUDBG_CCTRL, cudbg_collect_cctrl }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, @@ -72,6 +74,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) { + struct cudbg_tcam tcam_region = { 0 }; u32 value, n = 0, len = 0; switch (entity) { @@ -216,6 +219,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_TID_INFO: len = sizeof(struct cudbg_tid_info_region_rev1); break; + case CUDBG_DUMP_CONTEXT: + len = cudbg_dump_context_size(adap); + break; case CUDBG_MPS_TCAM: len = sizeof(struct cudbg_mps_tcam) * adap->params.arch.mps_tcam_size; @@ -223,6 +229,11 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_VPD_DATA: len = sizeof(struct cudbg_vpd_data); break; + case CUDBG_LE_TCAM: + cudbg_fill_le_tcam_info(adap, &tcam_region); + len = sizeof(struct cudbg_tcam) + + sizeof(struct cudbg_tid_data) * tcam_region.max_tid; + break; case CUDBG_CCTRL: len = sizeof(u16) * NMTUS * NCCTRL_WIN; break; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 1b7f6b9ccc8b..eb338212f5af 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1064,40 +1064,11 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) return 0; } -/** - * eeprom_ptov - translate a physical EEPROM address to virtual - * @phys_addr: the physical EEPROM address - * @fn: the PCI function number - * @sz: size of function-specific area - * - * Translate a physical EEPROM address to virtual. The first 1K is - * accessed through virtual addresses starting at 31K, the rest is - * accessed through virtual addresses starting at 0. - * - * The mapping is as follows: - * [0..1K) -> [31K..32K) - * [1K..1K+A) -> [31K-A..31K) - * [1K+A..ES) -> [0..ES-A-1K) - * - * where A = @fn * @sz, and ES = EEPROM size. - */ -static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) -{ - fn *= sz; - if (phys_addr < 1024) - return phys_addr + (31 << 10); - if (phys_addr < 1024 + fn) - return 31744 - fn + phys_addr - 1024; - if (phys_addr < EEPROMSIZE) - return phys_addr - 1024 - fn; - return -EINVAL; -} - /* The next two routines implement eeprom read/write from physical addresses. */ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) { - int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); @@ -1106,7 +1077,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) { - int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 486b01fe23bd..922f2f937789 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, */ static inline int reclaimable(const struct sge_txq *q) { - int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; } @@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb); */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx; if (reclaim < 0) @@ -1537,7 +1537,13 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) */ static inline int is_ofld_imm(const struct sk_buff *skb) { - return skb->len <= MAX_IMM_TX_PKT_LEN; + struct work_request_hdr *req = (struct work_request_hdr *)skb->data; + unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); + + if (opcode == FW_CRYPTO_LOOKASIDE_WR) + return skb->len <= SGE_MAX_WR_LEN; + else + return skb->len <= MAX_IMM_TX_PKT_LEN; } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index efe9d3a20135..f63210f15579 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2639,6 +2639,35 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) #define CHELSIO_VPD_UNIQUE_ID 0x82 /** + * t4_eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return 31744 - fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + return -EINVAL; +} + +/** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: whether to enable or disable write protection @@ -9618,6 +9647,68 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, } } +/* t4_sge_ctxt_rd - read an SGE context through FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @cid: the context id + * @ctype: the context type + * @data: where to store the context data + * + * Issues a FW command through the given mailbox to read an SGE context. + */ +int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, + enum ctxt_type ctype, u32 *data) +{ + struct fw_ldst_cmd c; + int ret; + + if (ctype == CTXT_FLM) + ret = FW_LDST_ADDRSPC_SGE_FLMC; + else + ret = FW_LDST_ADDRSPC_SGE_CONMC; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(ret)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.idctxt.physid = cpu_to_be32(cid); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); + data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); + data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); + data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); + data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); + data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); + } + return ret; +} + +/** + * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW + * @adap: the adapter + * @cid: the context id + * @ctype: the context type + * @data: where to store the context data + * + * Reads an SGE context directly, bypassing FW. This is only for + * debugging when FW is unavailable. + */ +int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, + enum ctxt_type ctype, u32 *data) +{ + int i, ret; + + t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype)); + ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1); + if (!ret) + for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4) + *data++ = t4_read_reg(adap, i); + return ret; +} + int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 7c6af14905c2..a964ed184356 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -68,6 +68,12 @@ enum { ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */ }; +/* SGE context types */ +enum ctxt_type { + CTXT_FLM = 2, + CTXT_CNM, +}; + enum { SF_PAGE_SIZE = 256, /* serial flash page size */ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ @@ -79,6 +85,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ enum { SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_CTXT_SIZE = 24, /* size of SGE context */ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ SGE_MAX_IQ_SIZE = 65520, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 623f453bd327..a7cfece72828 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -65,6 +65,9 @@ #define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_LE_DB_DBGI_REQ_DATA_INSTANCES 17 +#define NUM_LE_DB_DBGI_RSP_DATA_INSTANCES 17 + #define SGE_PF_KDOORBELL_A 0x0 #define QID_S 15 @@ -150,6 +153,23 @@ #define T6_DBVFIFO_SIZE_M 0x1fffU #define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M) +#define SGE_CTXT_CMD_A 0x11fc + +#define BUSY_S 31 +#define BUSY_V(x) ((x) << BUSY_S) +#define BUSY_F BUSY_V(1U) + +#define CTXTTYPE_S 24 +#define CTXTTYPE_M 0x3U +#define CTXTTYPE_V(x) ((x) << CTXTTYPE_S) + +#define CTXTQID_S 0 +#define CTXTQID_M 0x1ffffU +#define CTXTQID_V(x) ((x) << CTXTQID_S) + +#define SGE_CTXT_DATA0_A 0x1200 +#define SGE_CTXT_DATA5_A 0x1214 + #define GLOBALENABLE_S 0 #define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S) #define GLOBALENABLE_F GLOBALENABLE_V(1U) @@ -319,6 +339,16 @@ #define SGE_IMSG_CTXT_BADDR_A 0x1088 #define SGE_FLM_CACHE_BADDR_A 0x108c +#define SGE_FLM_CFG_A 0x1090 + +#define NOHDR_S 18 +#define NOHDR_V(x) ((x) << NOHDR_S) +#define NOHDR_F NOHDR_V(1U) + +#define HDRSTARTFLQ_S 11 +#define HDRSTARTFLQ_M 0x7U +#define HDRSTARTFLQ_G(x) (((x) >> HDRSTARTFLQ_S) & HDRSTARTFLQ_M) + #define SGE_INGRESS_RX_THRESHOLD_A 0x10a0 #define THRESHOLD_0_S 24 @@ -2273,6 +2303,35 @@ #define CHNENABLE_V(x) ((x) << CHNENABLE_S) #define CHNENABLE_F CHNENABLE_V(1U) +#define LE_DB_DBGI_CONFIG_A 0x19cf0 + +#define DBGICMDBUSY_S 3 +#define DBGICMDBUSY_V(x) ((x) << DBGICMDBUSY_S) +#define DBGICMDBUSY_F DBGICMDBUSY_V(1U) + +#define DBGICMDSTRT_S 2 +#define DBGICMDSTRT_V(x) ((x) << DBGICMDSTRT_S) +#define DBGICMDSTRT_F DBGICMDSTRT_V(1U) + +#define DBGICMDMODE_S 0 +#define DBGICMDMODE_M 0x3U +#define DBGICMDMODE_V(x) ((x) << DBGICMDMODE_S) + +#define LE_DB_DBGI_REQ_TCAM_CMD_A 0x19cf4 + +#define DBGICMD_S 20 +#define DBGICMD_M 0xfU +#define DBGICMD_V(x) ((x) << DBGICMD_S) + +#define DBGITID_S 0 +#define DBGITID_M 0xfffffU +#define DBGITID_V(x) ((x) << DBGITID_S) + +#define LE_DB_DBGI_REQ_DATA_A 0x19d00 +#define LE_DB_DBGI_RSP_STATUS_A 0x19d94 + +#define LE_DB_DBGI_RSP_DATA_A 0x19da0 + #define PRTENABLE_S 29 #define PRTENABLE_V(x) ((x) << PRTENABLE_S) #define PRTENABLE_F PRTENABLE_V(1U) @@ -2882,11 +2941,20 @@ #define T6_LIPMISS_F T6_LIPMISS_V(1U) #define LE_DB_CONFIG_A 0x19c04 +#define LE_DB_ROUTING_TABLE_INDEX_A 0x19c10 #define LE_DB_ACTIVE_TABLE_START_INDEX_A 0x19c10 +#define LE_DB_FILTER_TABLE_INDEX_A 0x19c14 #define LE_DB_SERVER_INDEX_A 0x19c18 #define LE_DB_SRVR_START_INDEX_A 0x19c18 +#define LE_DB_CLIP_TABLE_INDEX_A 0x19c1c #define LE_DB_ACT_CNT_IPV4_A 0x19c20 #define LE_DB_ACT_CNT_IPV6_A 0x19c24 +#define LE_DB_HASH_CONFIG_A 0x19c28 + +#define HASHTIDSIZE_S 16 +#define HASHTIDSIZE_M 0x3fU +#define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M) + #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index f2d623a7aee0..123e2c1b65f5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -37,7 +37,7 @@ #define T4FW_VERSION_MAJOR 0x01 #define T4FW_VERSION_MINOR 0x10 -#define T4FW_VERSION_MICRO 0x2D +#define T4FW_VERSION_MICRO 0x3F #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -46,7 +46,7 @@ #define T5FW_VERSION_MAJOR 0x01 #define T5FW_VERSION_MINOR 0x10 -#define T5FW_VERSION_MICRO 0x2D +#define T5FW_VERSION_MICRO 0x3F #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -55,7 +55,7 @@ #define T6FW_VERSION_MAJOR 0x01 #define T6FW_VERSION_MINOR 0x10 -#define T6FW_VERSION_MICRO 0x2D +#define T6FW_VERSION_MICRO 0x3F #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..c6e859a27ee6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) if (wrapped) newacc += 65536; - ACCESS_ONCE(*acc) = newacc; + WRITE_ONCE(*acc, newacc); } static void populate_erx_stats(struct be_adapter *adapter, diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 23053919ebf5..ae55da60ed0e 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -257,8 +257,8 @@ enum rx_desc_status_bits { RXFSD = 0x00000800, /* first descriptor */ RXLSD = 0x00000400, /* last descriptor */ ErrorSummary = 0x80, /* error summary */ - RUNT = 0x40, /* runt packet received */ - LONG = 0x20, /* long packet received */ + RUNTPKT = 0x40, /* runt packet received */ + LONGPKT = 0x20, /* long packet received */ FAE = 0x10, /* frame align error */ CRC = 0x08, /* crc error */ RXER = 0x04, /* receive error */ @@ -1628,7 +1628,7 @@ static int netdev_rx(struct net_device *dev) dev->name, rx_status); dev->stats.rx_errors++; /* end of a packet. */ - if (rx_status & (LONG | RUNT)) + if (rx_status & (LONGPKT | RUNTPKT)) dev->stats.rx_length_errors++; if (rx_status & RXER) dev->stats.rx_frame_errors++; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ebc55b6a6349..7caa8da48421 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -351,7 +351,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, u8 num_tc; int i; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; @@ -2728,11 +2728,11 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* bp init */ for (i = 0; i < DPAA_BPS_NUM; i++) { - int err; - dpaa_bps[i] = dpaa_bp_alloc(dev); - if (IS_ERR(dpaa_bps[i])) + if (IS_ERR(dpaa_bps[i])) { + err = PTR_ERR(dpaa_bps[i]); goto free_dpaa_bps; + } /* the raw size of the buffers used for reception */ dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); /* avoid runtime computations by keeping the usable size here */ diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 44720f83af27..5385074b3b7d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -583,12 +583,11 @@ struct fec_enet_private { u64 ethtool_stats[0]; }; -void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_init(struct platform_device *pdev, int irq_idx); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); -uint fec_ptp_check_pps_event(struct fec_enet_private *fep); /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3dc2d771a222..610573855213 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1602,10 +1602,6 @@ fec_enet_interrupt(int irq, void *dev_id) ret = IRQ_HANDLED; complete(&fep->mdio_done); } - - if (fep->ptp_clock) - if (fec_ptp_check_pps_event(fep)) - ret = IRQ_HANDLED; return ret; } @@ -3312,6 +3308,19 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) } +static int fec_enet_get_irq_cnt(struct platform_device *pdev) +{ + int irq_cnt = platform_irq_count(pdev); + + if (irq_cnt > FEC_IRQ_NUM) + irq_cnt = FEC_IRQ_NUM; /* last for pps */ + else if (irq_cnt == 2) + irq_cnt = 1; /* last for pps */ + else if (irq_cnt <= 0) + irq_cnt = 1; /* At least 1 irq is needed */ + return irq_cnt; +} + static int fec_probe(struct platform_device *pdev) { @@ -3325,6 +3334,8 @@ fec_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node, *phy_node; int num_tx_qs; int num_rx_qs; + char irq_name[8]; + int irq_cnt; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -3465,18 +3476,20 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_reset; + irq_cnt = fec_enet_get_irq_cnt(pdev); if (fep->bufdesc_ex) - fec_ptp_init(pdev); + fec_ptp_init(pdev, irq_cnt); ret = fec_enet_init(ndev); if (ret) goto failed_init; - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); + for (i = 0; i < irq_cnt; i++) { + sprintf(irq_name, "int%d", i); + irq = platform_get_irq_byname(pdev, irq_name); + if (irq < 0) + irq = platform_get_irq(pdev, i); if (irq < 0) { - if (i) - break; ret = irq; goto failed_irq; } diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 6ebad3fac81d..f81439796ac7 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -549,6 +549,37 @@ static void fec_time_keep(struct work_struct *work) schedule_delayed_work(&fep->time_keep, HZ); } +/* This function checks the pps event and reloads the timer compare counter. */ +static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct fec_enet_private *fep = netdev_priv(ndev); + u32 val; + u8 channel = fep->pps_channel; + struct ptp_clock_event event; + + val = readl(fep->hwp + FEC_TCSR(channel)); + if (val & FEC_T_TF_MASK) { + /* Write the next next compare(not the next according the spec) + * value to the register + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); + do { + writel(val, fep->hwp + FEC_TCSR(channel)); + } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); + + /* Update the counter; */ + fep->next_counter = (fep->next_counter + fep->reload_period) & + fep->cc.mask; + + event.type = PTP_CLOCK_PPS; + ptp_clock_event(fep->ptp_clock, &event); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + /** * fec_ptp_init * @ndev: The FEC network adapter @@ -558,10 +589,12 @@ static void fec_time_keep(struct work_struct *work) * cyclecounter init routine and exits. */ -void fec_ptp_init(struct platform_device *pdev) +void fec_ptp_init(struct platform_device *pdev, int irq_idx) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + int irq; + int ret; fep->ptp_caps.owner = THIS_MODULE; snprintf(fep->ptp_caps.name, 16, "fec ptp"); @@ -587,6 +620,20 @@ void fec_ptp_init(struct platform_device *pdev) INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); + irq = platform_get_irq_byname(pdev, "pps"); + if (irq < 0) + irq = platform_get_irq(pdev, irq_idx); + /* Failure to get an irq is not fatal, + * only the PTP_CLOCK_PPS clock events should stop + */ + if (irq >= 0) { + ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, + 0, pdev->name, ndev); + if (ret < 0) + dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", + ret); + } + fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); if (IS_ERR(fep->ptp_clock)) { fep->ptp_clock = NULL; @@ -605,36 +652,3 @@ void fec_ptp_stop(struct platform_device *pdev) if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } - -/** - * fec_ptp_check_pps_event - * @fep: the fec_enet_private structure handle - * - * This function check the pps event and reload the timer compare counter. - */ -uint fec_ptp_check_pps_event(struct fec_enet_private *fep) -{ - u32 val; - u8 channel = fep->pps_channel; - struct ptp_clock_event event; - - val = readl(fep->hwp + FEC_TCSR(channel)); - if (val & FEC_T_TF_MASK) { - /* Write the next next compare(not the next according the spec) - * value to the register - */ - writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); - do { - writel(val, fep->hwp + FEC_TCSR(channel)); - } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); - - /* Update the counter; */ - fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; - - event.type = PTP_CLOCK_PPS; - ptp_clock_event(fep->ptp_clock, &event); - return 1; - } - - return 0; -} diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 1789b206be58..6552d68ea6e1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1339,8 +1339,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) switch (port->port_type) { case FMAN_PORT_TYPE_RX: set_rx_dflt_cfg(port, params); + /* fall through */ case FMAN_PORT_TYPE_TX: set_tx_dflt_cfg(port, params, &port->dts_params); + /* fall through */ default: set_dflt_cfg(port, params); } diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 1d6da1ea7bfb..88c0a0636b44 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -615,7 +615,6 @@ static int mac_probe(struct platform_device *_of_dev) mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL); if (!mac_dev) { err = -ENOMEM; - dev_err(dev, "devm_kzalloc() = %d\n", err); goto _return; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -709,12 +708,8 @@ static int mac_probe(struct platform_device *_of_dev) } if (!of_device_is_available(mac_node)) { - devm_iounmap(dev, priv->vaddr); - __devm_release_region(dev, fman_get_mem_region(priv->fman), - res.start, res.end + 1 - res.start); - devm_kfree(dev, mac_dev); - dev_set_drvdata(dev, NULL); - return -ENODEV; + err = -ENODEV; + goto _return_of_get_parent; } /* Get the cell-index */ @@ -825,6 +820,7 @@ static int mac_probe(struct platform_device *_of_dev) phy = of_phy_find_device(mac_dev->phy_node); if (!phy) { err = -EINVAL; + of_node_put(mac_dev->phy_node); goto _return_of_get_parent; } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 0cec06bec63e..340e28211135 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) unsigned int count; smp_rmb(); - count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); + count = tx_count(READ_ONCE(priv->tx_head), tx_tail); if (count == 0) goto out; @@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_addr_t phys; smp_rmb(); - count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); + count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); if (count == (TX_DESC_NUM - 1)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 844c83ea549e..ce5ed8845042 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -390,7 +390,7 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_TQP_DESC_N_S 16 #define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16) #define HCLGE_CFG_PHY_ADDR_S 0 -#define HCLGE_CFG_PHY_ADDR_M GENMASK(4, 0) +#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0) #define HCLGE_CFG_MEDIA_TP_S 8 #define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8) #define HCLGE_CFG_RX_BUF_LEN_S 16 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c6ba89089ef3..59ed806a52c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -891,14 +891,14 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; if (hnae3_dev_roce_supported(hdev)) { - hdev->num_roce_msix = + hdev->num_roce_msi = hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ - hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; + hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; } else { hdev->num_msi = hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), @@ -1950,7 +1950,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) struct hnae3_handle *roce = &vport->roce; struct hnae3_handle *nic = &vport->nic; - roce->rinfo.num_vectors = vport->back->num_roce_msix; + roce->rinfo.num_vectors = vport->back->num_roce_msi; if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || vport->back->num_msi_left == 0) @@ -1968,67 +1968,47 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) return 0; } -static int hclge_init_msix(struct hclge_dev *hdev) +static int hclge_init_msi(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - int ret, i; - - hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!hdev->msix_entries) - return -ENOMEM; - - hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, - sizeof(u16), GFP_KERNEL); - if (!hdev->vector_status) - return -ENOMEM; + int vectors; + int i; - for (i = 0; i < hdev->num_msi; i++) { - hdev->msix_entries[i].entry = i; - hdev->vector_status[i] = HCLGE_INVALID_VPORT; + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); - hdev->num_msi_left = hdev->num_msi; - hdev->base_msi_vector = hdev->pdev->irq; + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = hdev->base_msi_vector + HCLGE_ROCE_VECTOR_OFFSET; - ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries, - hdev->num_msi, hdev->num_msi); - if (ret < 0) { - dev_info(&hdev->pdev->dev, - "MSI-X vector alloc failed: %d\n", ret); - return ret; - } - - return 0; -} - -static int hclge_init_msi(struct hclge_dev *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - int vectors; - int i; - hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(u16), GFP_KERNEL); - if (!hdev->vector_status) + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); return -ENOMEM; + } for (i = 0; i < hdev->num_msi; i++) hdev->vector_status[i] = HCLGE_INVALID_VPORT; - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); - if (vectors < 0) { - dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); - return -EINVAL; + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + pci_free_irq_vectors(pdev); + return -ENOMEM; } - hdev->num_msi = vectors; - hdev->num_msi_left = vectors; - hdev->base_msi_vector = pdev->irq; - hdev->roce_base_vector = hdev->base_msi_vector + - HCLGE_ROCE_VECTOR_OFFSET; return 0; } @@ -2325,18 +2305,7 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) /* get the speed and duplex as autoneg'result from mac cmd when phy * doesn't exit. */ - if (mac.phydev) - return 0; - - /* update mac->antoneg. */ - ret = hclge_query_autoneg_result(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "autoneg result query failed %d\n", ret); - return ret; - } - - if (!mac.autoneg) + if (mac.phydev || !mac.autoneg) return 0; ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); @@ -2715,6 +2684,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, vport->vport_id * HCLGE_VECTOR_VF_OFFSET; hdev->vector_status[i] = vport->vport_id; + hdev->vector_irq[i] = vector->vector; vector++; alloc++; @@ -2733,15 +2703,10 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) { int i; - for (i = 0; i < hdev->num_msi; i++) { - if (hdev->msix_entries) { - if (vector == hdev->msix_entries[i].vector) - return i; - } else { - if (vector == (hdev->base_msi_vector + i)) - return i; - } - } + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + return -EINVAL; } @@ -4675,14 +4640,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - if (hdev->flag & HCLGE_FLAG_USE_MSIX) { - pci_disable_msix(pdev); - devm_kfree(&pdev->dev, hdev->msix_entries); - hdev->msix_entries = NULL; - } else { - pci_disable_msi(pdev); - } - + pci_free_irq_vectors(pdev); pci_clear_master(pdev); pci_release_mem_regions(pdev); pci_disable_device(pdev); @@ -4700,7 +4658,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_hclge_dev; } - hdev->flag |= HCLGE_FLAG_USE_MSIX; hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; @@ -4737,12 +4694,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - if (hdev->flag & HCLGE_FLAG_USE_MSIX) - ret = hclge_init_msix(hdev); - else - ret = hclge_init_msi(hdev); + ret = hclge_init_msi(hdev); if (ret) { - dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); + dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 742e6ee9efaf..7027814ea5d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -425,9 +425,6 @@ struct hclge_dev { u16 num_tqps; /* Num task queue pairs of this PF */ u16 num_req_vfs; /* Num VFs requested for this PF */ - u16 num_roce_msix; /* Num of roce vectors for this PF */ - int roce_base_vector; - /* Base task tqp physical id of this PF */ u16 base_tqp_pid; u16 alloc_rss_size; /* Allocated RSS task queue */ @@ -457,8 +454,10 @@ struct hclge_dev { u16 num_msi_left; u16 num_msi_used; u32 base_msi_vector; - struct msix_entry *msix_entries; u16 *vector_status; + int *vector_irq; + u16 num_roce_msi; /* Num of roce vectors for this PF */ + int roce_base_vector; u16 pending_udp_bitmap; @@ -482,12 +481,10 @@ struct hclge_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; -#define HCLGE_FLAG_USE_MSI 0x00000001 -#define HCLGE_FLAG_USE_MSIX 0x00000002 -#define HCLGE_FLAG_MAIN 0x00000004 -#define HCLGE_FLAG_DCB_CAPABLE 0x00000008 -#define HCLGE_FLAG_DCB_ENABLE 0x00000010 -#define HCLGE_FLAG_MQPRIO_ENABLE 0x00000020 +#define HCLGE_FLAG_MAIN BIT(0) +#define HCLGE_FLAG_DCB_CAPABLE BIT(1) +#define HCLGE_FLAG_DCB_ENABLE BIT(2) +#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) u32 flag; u32 pkt_buf_size; /* Total pf buf size for tx/rx */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 2a0af11c9b59..59415090ff0f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1252,7 +1252,7 @@ out: static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; return hns3_setup_tc(dev, type_data); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index d0cff2807d0b..1dc4aef37d3a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -75,6 +75,7 @@ #include <asm/firmware.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> +#include <linux/utsname.h> #include "ibmvnic.h" @@ -573,6 +574,15 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) return 0; } +static void release_vpd_data(struct ibmvnic_adapter *adapter) +{ + if (!adapter->vpd) + return; + + kfree(adapter->vpd->buff); + kfree(adapter->vpd); +} + static void release_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@ -753,6 +763,8 @@ static void release_resources(struct ibmvnic_adapter *adapter) { int i; + release_vpd_data(adapter); + release_tx_pools(adapter); release_rx_pools(adapter); @@ -833,6 +845,56 @@ static int set_real_num_queues(struct net_device *netdev) return rc; } +static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + int len = 0; + + if (adapter->vpd->buff) + len = adapter->vpd->len; + + reinit_completion(&adapter->fw_done); + crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; + crq.get_vpd_size.cmd = GET_VPD_SIZE; + ibmvnic_send_crq(adapter, &crq); + wait_for_completion(&adapter->fw_done); + + if (!adapter->vpd->len) + return -ENODATA; + + if (!adapter->vpd->buff) + adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); + else if (adapter->vpd->len != len) + adapter->vpd->buff = + krealloc(adapter->vpd->buff, + adapter->vpd->len, GFP_KERNEL); + + if (!adapter->vpd->buff) { + dev_err(dev, "Could allocate VPD buffer\n"); + return -ENOMEM; + } + + adapter->vpd->dma_addr = + dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { + dev_err(dev, "Could not map VPD buffer\n"); + kfree(adapter->vpd->buff); + return -ENOMEM; + } + + reinit_completion(&adapter->fw_done); + crq.get_vpd.first = IBMVNIC_CRQ_CMD; + crq.get_vpd.cmd = GET_VPD; + crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); + crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); + ibmvnic_send_crq(adapter, &crq); + wait_for_completion(&adapter->fw_done); + + return 0; +} + static int init_resources(struct ibmvnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -850,6 +912,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) if (rc) return rc; + adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); + if (!adapter->vpd) + return -ENOMEM; + adapter->map_id = 1; adapter->napi = kcalloc(adapter->req_rx_queues, sizeof(struct napi_struct), GFP_KERNEL); @@ -923,7 +989,7 @@ static int __ibmvnic_open(struct net_device *netdev) static int ibmvnic_open(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int rc; + int rc, vpd; mutex_lock(&adapter->reset_lock); @@ -950,6 +1016,12 @@ static int ibmvnic_open(struct net_device *netdev) rc = __ibmvnic_open(netdev); netif_carrier_on(netdev); + + /* Vital Product Data (VPD) */ + vpd = ibmvnic_get_vpd(adapter); + if (vpd) + netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); + mutex_unlock(&adapter->reset_lock); return rc; @@ -1878,11 +1950,15 @@ static int ibmvnic_get_link_ksettings(struct net_device *netdev, return 0; } -static void ibmvnic_get_drvinfo(struct net_device *dev, +static void ibmvnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, adapter->fw_version, + sizeof(info->fw_version)); } static u32 ibmvnic_get_msglevel(struct net_device *netdev) @@ -2813,6 +2889,55 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) return ibmvnic_send_crq(adapter, &crq); } +struct vnic_login_client_data { + u8 type; + __be16 len; + char name; +} __packed; + +static int vnic_client_data_len(struct ibmvnic_adapter *adapter) +{ + int len; + + /* Calculate the amount of buffer space needed for the + * vnic client data in the login buffer. There are four entries, + * OS name, LPAR name, device name, and a null last entry. + */ + len = 4 * sizeof(struct vnic_login_client_data); + len += 6; /* "Linux" plus NULL */ + len += strlen(utsname()->nodename) + 1; + len += strlen(adapter->netdev->name) + 1; + + return len; +} + +static void vnic_add_client_data(struct ibmvnic_adapter *adapter, + struct vnic_login_client_data *vlcd) +{ + const char *os_name = "Linux"; + int len; + + /* Type 1 - LPAR OS */ + vlcd->type = 1; + len = strlen(os_name) + 1; + vlcd->len = cpu_to_be16(len); + strncpy(&vlcd->name, os_name, len); + vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + + /* Type 2 - LPAR name */ + vlcd->type = 2; + len = strlen(utsname()->nodename) + 1; + vlcd->len = cpu_to_be16(len); + strncpy(&vlcd->name, utsname()->nodename, len); + vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + + /* Type 3 - device name */ + vlcd->type = 3; + len = strlen(adapter->netdev->name) + 1; + vlcd->len = cpu_to_be16(len); + strncpy(&vlcd->name, adapter->netdev->name, len); +} + static void send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; @@ -2825,13 +2950,18 @@ static void send_login(struct ibmvnic_adapter *adapter) size_t buffer_size; __be64 *tx_list_p; __be64 *rx_list_p; + int client_data_len; + struct vnic_login_client_data *vlcd; int i; + client_data_len = vnic_client_data_len(adapter); + buffer_size = sizeof(struct ibmvnic_login_buffer) + - sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues); + sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + + client_data_len; - login_buffer = kmalloc(buffer_size, GFP_ATOMIC); + login_buffer = kzalloc(buffer_size, GFP_ATOMIC); if (!login_buffer) goto buf_alloc_failed; @@ -2898,6 +3028,15 @@ static void send_login(struct ibmvnic_adapter *adapter) } } + /* Insert vNIC login client data */ + vlcd = (struct vnic_login_client_data *) + ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); + login_buffer->client_data_offset = + cpu_to_be32((char *)vlcd - (char *)login_buffer); + login_buffer->client_data_len = cpu_to_be32(client_data_len); + + vnic_add_client_data(adapter, vlcd); + netdev_dbg(adapter->netdev, "Login Buffer:\n"); for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { netdev_dbg(adapter->netdev, "%016lx\n", @@ -3076,6 +3215,73 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter) ibmvnic_send_crq(adapter, &crq); } +static void handle_vpd_size_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + + if (crq->get_vpd_size_rsp.rc.code) { + dev_err(dev, "Error retrieving VPD size, rc=%x\n", + crq->get_vpd_size_rsp.rc.code); + complete(&adapter->fw_done); + return; + } + + adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); + complete(&adapter->fw_done); +} + +static void handle_vpd_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + unsigned char *substr = NULL, *ptr = NULL; + u8 fw_level_len = 0; + + memset(adapter->fw_version, 0, 32); + + dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, + DMA_FROM_DEVICE); + + if (crq->get_vpd_rsp.rc.code) { + dev_err(dev, "Error retrieving VPD from device, rc=%x\n", + crq->get_vpd_rsp.rc.code); + goto complete; + } + + /* get the position of the firmware version info + * located after the ASCII 'RM' substring in the buffer + */ + substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); + if (!substr) { + dev_info(dev, "No FW level provided by VPD\n"); + goto complete; + } + + /* get length of firmware level ASCII substring */ + if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { + fw_level_len = *(substr + 2); + } else { + dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); + goto complete; + } + + /* copy firmware version string from vpd into adapter */ + if ((substr + 3 + fw_level_len) < + (adapter->vpd->buff + adapter->vpd->len)) { + ptr = strncpy((char *)adapter->fw_version, + substr + 3, fw_level_len); + + if (!ptr) + dev_err(dev, "Failed to isolate FW level string\n"); + } else { + dev_info(dev, "FW substr extrapolated VPD buff\n"); + } + +complete: + complete(&adapter->fw_done); +} + static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; @@ -3807,6 +4013,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, netdev_dbg(netdev, "Got Collect firmware trace Response\n"); complete(&adapter->fw_done); break; + case GET_VPD_SIZE_RSP: + handle_vpd_size_rsp(crq, adapter); + break; + case GET_VPD_RSP: + handle_vpd_rsp(crq, adapter); + break; default: netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", gen_crq->cmd); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 4670af80d612..4487f1e2c266 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -57,6 +57,8 @@ struct ibmvnic_login_buffer { __be32 off_rxcomp_subcrqs; __be32 login_rsp_ioba; __be32 login_rsp_len; + __be32 client_data_offset; + __be32 client_data_len; } __packed __aligned(8); struct ibmvnic_login_rsp_buffer { @@ -558,6 +560,12 @@ struct ibmvnic_multicast_ctrl { struct ibmvnic_rc rc; } __packed __aligned(8); +struct ibmvnic_get_vpd_size { + u8 first; + u8 cmd; + u8 reserved[14]; +} __packed __aligned(8); + struct ibmvnic_get_vpd_size_rsp { u8 first; u8 cmd; @@ -575,6 +583,13 @@ struct ibmvnic_get_vpd { u8 reserved[4]; } __packed __aligned(8); +struct ibmvnic_get_vpd_rsp { + u8 first; + u8 cmd; + u8 reserved[10]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + struct ibmvnic_acl_change_indication { u8 first; u8 cmd; @@ -700,10 +715,10 @@ union ibmvnic_crq { struct ibmvnic_change_mac_addr change_mac_addr_rsp; struct ibmvnic_multicast_ctrl multicast_ctrl; struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; - struct ibmvnic_generic_crq get_vpd_size; + struct ibmvnic_get_vpd_size get_vpd_size; struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; struct ibmvnic_get_vpd get_vpd; - struct ibmvnic_generic_crq get_vpd_rsp; + struct ibmvnic_get_vpd_rsp get_vpd_rsp; struct ibmvnic_acl_change_indication acl_change_indication; struct ibmvnic_acl_query acl_query; struct ibmvnic_generic_crq acl_query_rsp; @@ -937,6 +952,12 @@ struct ibmvnic_error_buff { __be32 error_id; }; +struct ibmvnic_vpd { + unsigned char *buff; + dma_addr_t dma_addr; + u64 len; +}; + enum vnic_state {VNIC_PROBING = 1, VNIC_PROBED, VNIC_OPENING, @@ -978,6 +999,10 @@ struct ibmvnic_adapter { dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; + /* Vital Product Data (VPD) */ + struct ibmvnic_vpd *vpd; + char fw_version[32]; + /* Statistics */ struct ibmvnic_statistics stats; dma_addr_t stats_token; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 81e4425f0529..adc62fb38c49 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1389,7 +1389,7 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, { struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 2b8bbc84e34f..4c3b4243cf65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); if (!rx_ring) continue; @@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index dc9b8dcf4a1e..5f6cf7212d4f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1722,7 +1722,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } rcu_read_lock(); for (j = 0; j < vsi->num_queue_pairs; j++) { - tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + tx_ring = READ_ONCE(vsi->tx_rings[j]); if (!tx_ring) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 05b94d87a6c3..4a964d6e4a9e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -464,7 +464,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; i40e_get_netdev_stats_struct_tx(tx_ring, stats); @@ -814,7 +814,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ - p = ACCESS_ONCE(vsi->tx_rings[q]); + p = READ_ONCE(vsi->tx_rings[q]); do { start = u64_stats_fetch_begin_irq(&p->syncp); @@ -7550,7 +7550,7 @@ static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_MQPRIO: + case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: return i40e_setup_tc_block(netdev, type_data); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..97381238eb7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } smp_mb(); /* Force any pending update before accessing. */ - adj = ACCESS_ONCE(pf->ptp_base_adj); + adj = READ_ONCE(pf->ptp_base_adj); freq = adj; freq *= ppb; @@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ - ACCESS_ONCE(pf->ptp_base_adj) = incval; + WRITE_ONCE(pf->ptp_base_adj, incval); smp_mb(); /* Force the above update. */ } diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 8eee081d395f..568c96842f28 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg); /* write operations, indexed using DWORDS */ #define wr32(reg, val) \ do { \ - u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ if (!E1000_REMOVED(hw_addr)) \ writel((val), &hw_addr[(reg)]); \ } while (0) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e22bce7cdacd..e94d3c256667 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -762,7 +762,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) u32 igb_rd32(struct e1000_hw *hw, u32 reg) { struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); - u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; if (E1000_REMOVED(hw_addr)) @@ -2488,7 +2488,7 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, struct igb_adapter *adapter = netdev_priv(dev); switch (type) { - case TC_SETUP_CBS: + case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index e083732adf64..a01409e2e06c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr) static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return; @@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr) static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e5dcb25be398..ca06c3cc2ca8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) */ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (ixgbe_removed(reg_addr)) @@ -8760,7 +8760,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); u64 bytes, packets; unsigned int start; @@ -8776,12 +8776,12 @@ static void ixgbe_get_stats64(struct net_device *netdev, } for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); ixgbe_get_ring_stats64(stats, ring); } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); ixgbe_get_ring_stats64(stats, ring); } @@ -9431,7 +9431,7 @@ static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return ixgbe_setup_tc_block(dev, type_data); - case TC_SETUP_MQPRIO: + case TC_SETUP_QDISC_MQPRIO: return ixgbe_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 86d6924a2b71..ae312c45696a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) } smp_mb(); - incval = ACCESS_ONCE(adapter->base_incval); + incval = READ_ONCE(adapter->base_incval); freq = incval; freq *= ppb; @@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) } /* update the base incval used to calculate frequency adjustment */ - ACCESS_ONCE(adapter->base_incval) = incval; + WRITE_ONCE(adapter->base_incval, incval); smp_mb(); /* need lock to prevent incorrect read while modifying cyclecounter */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 12d3601b1d57..feed11bc9ddf 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (IXGBE_REMOVED(reg_addr)) diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 04d8d4ee4f04..c651fefcc3d2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -182,7 +182,7 @@ struct ixgbevf_info { static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (IXGBE_REMOVED(reg_addr)) return; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 64a04975bcf8..bc93b69cfd1e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, { u32 val; - /* Only 255 descriptors can be added at once ; Assume caller - * process TX desriptors in quanta less than 256 - */ - val = pend_desc + txq->pending; - mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc += txq->pending; + + /* Only 255 Tx descriptors can be added at once */ + do { + val = min(pend_desc, 255); + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc -= val; + } while (pend_desc > 0); txq->pending = 0; } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 965b6a829a5d..6c20e811f973 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -799,6 +799,42 @@ enum mvpp2_bm_type { MVPP2_BM_SWF_SHORT }; +/* GMAC MIB Counters register definitions */ +#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 +#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 +#define MVPP22_MIB_COUNTERS_OFFSET 0x0 +#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 + +#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 +#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 +#define MVPP2_MIB_CRC_ERRORS_SENT 0xc +#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 +#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 +#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c +#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 +#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 +#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 +#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 +#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c +#define MVPP2_MIB_FC_SENT 0x54 +#define MVPP2_MIB_FC_RCVD 0x58 +#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c +#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 +#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 +#define MVPP2_MIB_OVERSIZE_RCVD 0x68 +#define MVPP2_MIB_JABBER_RCVD 0x6c +#define MVPP2_MIB_MAC_RCV_ERROR 0x70 +#define MVPP2_MIB_BAD_CRC_EVENT 0x74 +#define MVPP2_MIB_COLLISION 0x78 +#define MVPP2_MIB_LATE_COLLISION 0x7c + +#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) + /* Definitions */ /* Shared Packet Processor resources */ @@ -826,6 +862,7 @@ struct mvpp2 { struct clk *axi_clk; /* List of pointers to port structures */ + int port_count; struct mvpp2_port **port_list; /* Aggregated TXQs */ @@ -847,6 +884,10 @@ struct mvpp2 { /* Maximum number of RXQs per port */ unsigned int max_port_rxqs; + + /* Workqueue to gather hardware statistics */ + char queue_name[30]; + struct workqueue_struct *stats_queue; }; struct mvpp2_pcpu_stats { @@ -891,6 +932,7 @@ struct mvpp2_port { /* Per-port registers' base address */ void __iomem *base; + void __iomem *stats_base; struct mvpp2_rx_queue **rxqs; unsigned int nrxqs; @@ -909,6 +951,11 @@ struct mvpp2_port { u16 tx_ring_size; u16 rx_ring_size; struct mvpp2_pcpu_stats __percpu *stats; + u64 *ethtool_stats; + + /* Per-port work and its lock to gather hardware statistics */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; phy_interface_t phy_interface; struct device_node *phy_node; @@ -4778,9 +4825,131 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port) writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } +struct mvpp2_ethtool_counter { + unsigned int offset; + const char string[ETH_GSTRING_LEN]; + bool reg_is_64b; +}; + +static u64 mvpp2_read_count(struct mvpp2_port *port, + const struct mvpp2_ethtool_counter *counter) +{ + u64 val; + + val = readl(port->stats_base + counter->offset); + if (counter->reg_is_64b) + val += (u64)readl(port->stats_base + counter->offset + 4) << 32; + + return val; +} + +/* Due to the fact that software statistics and hardware statistics are, by + * design, incremented at different moments in the chain of packet processing, + * it is very likely that incoming packets could have been dropped after being + * counted by hardware but before reaching software statistics (most probably + * multicast packets), and in the oppposite way, during transmission, FCS bytes + * are added in between as well as TSO skb will be split and header bytes added. + * Hence, statistics gathered from userspace with ifconfig (software) and + * ethtool (hardware) cannot be compared. + */ +static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { + { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, + { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, + { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, + { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, + { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, + { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, + { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, + { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, + { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, + { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, + { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, + { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, + { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, + { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, + { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, + { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, + { MVPP2_MIB_FC_SENT, "fc_sent" }, + { MVPP2_MIB_FC_RCVD, "fc_received" }, + { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, + { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, + { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, + { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, + { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, + { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, + { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, + { MVPP2_MIB_COLLISION, "collision" }, + { MVPP2_MIB_LATE_COLLISION, "late_collision" }, +}; + +static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + memcpy(data + i * ETH_GSTRING_LEN, + &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); + } +} + +static void mvpp2_gather_hw_statistics(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, + stats_work); + u64 *pstats; + int i; + + mutex_lock(&port->gather_stats_lock); + + pstats = port->ethtool_stats; + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); + + /* No need to read again the counters right after this function if it + * was called asynchronously by the user (ie. use of ethtool). + */ + cancel_delayed_work(&port->stats_work); + queue_delayed_work(port->priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + mutex_unlock(&port->gather_stats_lock); +} + +static void mvpp2_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Update statistics for the given port, then take the lock to avoid + * concurrent accesses on the ethtool_stats structure during its copy. + */ + mvpp2_gather_hw_statistics(&port->stats_work.work); + + mutex_lock(&port->gather_stats_lock); + memcpy(data, port->ethtool_stats, + sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); + mutex_unlock(&port->gather_stats_lock); +} + +static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvpp2_ethtool_regs); + + return -EOPNOTSUPP; +} + static void mvpp2_port_reset(struct mvpp2_port *port) { u32 val; + unsigned int i; + + /* Read the GOP statistics to reset the hardware counters */ + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & ~MVPP2_GMAC_PORT_RESET_MASK; @@ -6769,6 +6938,9 @@ static int mvpp2_irqs_init(struct mvpp2_port *port) for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); if (err) goto err; @@ -6798,6 +6970,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) struct mvpp2_queue_vector *qv = port->qvecs + i; irq_set_affinity_hint(qv->irq, NULL); + irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); free_irq(qv->irq, qv); } } @@ -6912,6 +7085,10 @@ static int mvpp2_open(struct net_device *dev) if (priv->hw_version == MVPP22) mvpp22_init_rss(port); + /* Start hardware statistics gathering */ + queue_delayed_work(priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + return 0; err_free_link_irq: @@ -6956,6 +7133,8 @@ static int mvpp2_stop(struct net_device *dev) mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); + cancel_delayed_work_sync(&port->stats_work); + return 0; } @@ -7267,6 +7446,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_drvinfo = mvpp2_ethtool_get_drvinfo, .get_ringparam = mvpp2_ethtool_get_ringparam, .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_strings = mvpp2_ethtool_get_strings, + .get_ethtool_stats = mvpp2_ethtool_get_stats, + .get_sset_count = mvpp2_ethtool_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; @@ -7670,6 +7852,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, err = PTR_ERR(port->base); goto err_free_irq; } + + port->stats_base = port->priv->lms_base + + MVPP21_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; } else { if (of_property_read_u32(port_node, "gop-port-id", &port->gop_id)) { @@ -7679,15 +7865,29 @@ static int mvpp2_port_probe(struct platform_device *pdev, } port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); + port->stats_base = port->priv->iface_base + + MVPP22_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; } - /* Alloc per-cpu stats */ + /* Alloc per-cpu and ethtool stats */ port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); if (!port->stats) { err = -ENOMEM; goto err_free_irq; } + port->ethtool_stats = devm_kcalloc(&pdev->dev, + ARRAY_SIZE(mvpp2_ethtool_regs), + sizeof(u64), GFP_KERNEL); + if (!port->ethtool_stats) { + err = -ENOMEM; + goto err_free_stats; + } + + mutex_init(&port->gather_stats_lock); + INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); + mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from); port->tx_ring_size = MVPP2_MAX_TXD; @@ -8010,7 +8210,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int port_count, i; + int i; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -8125,14 +8325,14 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_mg_clk; } - port_count = of_get_available_child_count(dn); - if (port_count == 0) { + priv->port_count = of_get_available_child_count(dn); + if (priv->port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; goto err_mg_clk; } - priv->port_list = devm_kcalloc(&pdev->dev, port_count, + priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count, sizeof(*priv->port_list), GFP_KERNEL); if (!priv->port_list) { @@ -8149,6 +8349,21 @@ static int mvpp2_probe(struct platform_device *pdev) i++; } + /* Statistics must be gathered regularly because some of them (like + * packets counters) are 32-bit registers and could overflow quite + * quickly. For instance, a 10Gb link used at full bandwidth with the + * smallest packets (64B) will overflow a 32-bit counter in less than + * 30 seconds. Then, use a workqueue to fill 64-bit counters. + */ + snprintf(priv->queue_name, sizeof(priv->queue_name), + "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), + priv->port_count > 1 ? "+" : ""); + priv->stats_queue = create_singlethread_workqueue(priv->queue_name); + if (!priv->stats_queue) { + err = -ENOMEM; + goto err_mg_clk; + } + platform_set_drvdata(pdev, priv); return 0; @@ -8170,9 +8385,14 @@ static int mvpp2_remove(struct platform_device *pdev) struct device_node *port_node; int i = 0; + flush_workqueue(priv->stats_queue); + destroy_workqueue(priv->stats_queue); + for_each_available_child_of_node(dn, port_node) { - if (priv->port_list[i]) + if (priv->port_list[i]) { + mutex_destroy(&priv->port_list[i]->gather_stats_lock); mvpp2_port_remove(priv->port_list[i]); + } i++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 22b1cc012bc9..36054e6fb9d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -38,3 +38,11 @@ config MLX4_DEBUG mlx4_core driver. The output can be turned on via the debug_level module parameter (which can also be set after the driver is loaded through sysfs). + +config MLX4_CORE_GEN2 + bool "Support for old gen2 Mellanox PCI IDs" if (MLX4_CORE) + depends on MLX4_CORE + default y + ---help--- + Say Y here if you want to use old gen2 Mellanox devices in the + driver. diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index de0f9e5e42ec..e2b6b0cac1ac 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -231,10 +231,10 @@ static void dump_err_buf(struct mlx4_dev *dev) i, swab32(readl(priv->catas_err.map + i))); } -static void poll_catas(unsigned long dev_ptr) +static void poll_catas(struct timer_list *t) { - struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; - struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer); + struct mlx4_dev *dev = &priv->dev; u32 slave_read; if (mlx4_is_slave(dev)) { @@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) phys_addr_t addr; INIT_LIST_HEAD(&priv->catas_err.list); - setup_timer(&priv->catas_err.timer, poll_catas, (unsigned long)dev); + timer_setup(&priv->catas_err.timer, poll_catas, 0); priv->catas_err.map = NULL; if (!mlx4_is_slave(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 736a6ccaf05e..99051a294fa6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -135,7 +135,7 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, { struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 92aec17f4b4d..85e28efcda33 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -193,7 +193,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, - GFP_KERNEL | __GFP_COLD)) { + GFP_KERNEL)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate enough rx buffers\n"); return -ENOMEM; @@ -551,8 +551,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, do { if (mlx4_en_prepare_rx_desc(priv, ring, ring->prod & ring->size_mask, - GFP_ATOMIC | __GFP_COLD | - __GFP_MEMALLOC)) + GFP_ATOMIC | __GFP_MEMALLOC)) break; ring->prod++; } while (likely(--missing)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 596445a4a241..6b6853773848 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; - last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); - ring_cons = ACCESS_ONCE(ring->cons); + last_nr_txbb = READ_ONCE(ring->last_nr_txbb); + ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; @@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, wmb(); /* we want to dirty this cache line once */ - ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; - ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); + WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; @@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; /* fetch ring->cons far ahead before needing it to avoid stall */ - ring_cons = ACCESS_ONCE(ring->cons); + ring_cons = READ_ONCE(ring->cons); real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); @@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) */ smp_rmb(); - ring_cons = ACCESS_ONCE(ring->cons); + ring_cons = READ_ONCE(ring->cons); if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e61c99ef741d..4d84cab77105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4066,6 +4066,7 @@ int mlx4_restart_one(struct pci_dev *pdev) #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } static const struct pci_device_id mlx4_pci_table[] = { +#ifdef CONFIG_MLX4_CORE_GEN2 /* MT25408 "Hermon" */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */ @@ -4085,6 +4086,7 @@ static const struct pci_device_id mlx4_pci_table[] = { MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2), /* MT25400 Family [ConnectX-2] */ MLX_VF(0x1002), /* Virtual Function */ +#endif /* CONFIG_MLX4_CORE_GEN2 */ /* MT27500 Family [ConnectX-3] */ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3), MLX_VF(0x1004), /* Virtual Function */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index fc281712869b..17b723218b0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -93,7 +93,7 @@ static void delayed_event_release(struct mlx5_device_context *dev_ctx, list_splice_init(&priv->waiting_events_list, &temp); if (!dev_ctx->context) goto out; - list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) + list_for_each_entry_safe(de, n, &temp, list) dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 751f62cae969..c0872b3284cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -68,7 +68,7 @@ #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1 +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 @@ -655,12 +655,14 @@ struct mlx5e_tc_table { struct mlx5e_vlan_table { struct mlx5e_flow_table ft; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); + struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; struct mlx5_flow_handle *any_cvlan_rule; struct mlx5_flow_handle *any_svlan_rule; - bool filter_disabled; + bool cvlan_filter_disabled; }; struct mlx5e_l2_table { @@ -887,8 +889,8 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); -void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); -void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); +void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); void mlx5e_timestamp_set(struct mlx5e_priv *priv); struct mlx5e_redirect_rqt_param { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f0d11ad05ed2..def513484845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -118,7 +118,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) int i; list_size = 0; - for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) + for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) list_size++; max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); @@ -135,7 +135,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) return -ENOMEM; i = 0; - for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) { + for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) { if (i >= list_size) break; vlans[i++] = vlan; @@ -154,7 +154,8 @@ enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_UNTAGGED, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, - MLX5E_VLAN_RULE_TYPE_MATCH_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, }; static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, @@ -174,6 +175,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + /* cvlan_tag enabled in match criteria and + * disabled in match value means both S & C tags + * don't exist (untagged of both) + */ rule_p = &priv->fs.vlan.untagged_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); @@ -190,8 +195,18 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; - default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ - rule_p = &priv->fs.vlan.active_vlans_rule[vid]; + case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: + rule_p = &priv->fs.vlan.active_svlans_rule[vid]; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.svlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, + vid); + break; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ + rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); @@ -223,7 +238,7 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, if (!spec) return -ENOMEM; - if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) + if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID) mlx5e_vport_context_update_vlans(priv); err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); @@ -255,11 +270,17 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.any_svlan_rule = NULL; } break; - case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: + if (priv->fs.vlan.active_svlans_rule[vid]) { + mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]); + priv->fs.vlan.active_svlans_rule[vid] = NULL; + } + break; + case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: mlx5e_vport_context_update_vlans(priv); - if (priv->fs.vlan.active_vlans_rule[vid]) { - mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]); - priv->fs.vlan.active_vlans_rule[vid] = NULL; + if (priv->fs.vlan.active_cvlans_rule[vid]) { + mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); + priv->fs.vlan.active_cvlans_rule[vid] = NULL; } mlx5e_vport_context_update_vlans(priv); break; @@ -283,46 +304,83 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); } -void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) { - if (!priv->fs.vlan.filter_disabled) + if (!priv->fs.vlan.cvlan_filter_disabled) return; - priv->fs.vlan.filter_disabled = false; + priv->fs.vlan.cvlan_filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } -void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) { - if (priv->fs.vlan.filter_disabled) + if (priv->fs.vlan.cvlan_filter_disabled) return; - priv->fs.vlan.filter_disabled = true; + priv->fs.vlan.cvlan_filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } -int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) +static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid) { - struct mlx5e_priv *priv = netdev_priv(dev); + int err; + + set_bit(vid, priv->fs.vlan.active_cvlans); - set_bit(vid, priv->fs.vlan.active_vlans); + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + if (err) + clear_bit(vid, priv->fs.vlan.active_cvlans); - return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + return err; } -int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) +static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) +{ + struct net_device *netdev = priv->netdev; + int err; + + set_bit(vid, priv->fs.vlan.active_svlans); + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + if (err) { + clear_bit(vid, priv->fs.vlan.active_svlans); + return err; + } + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); - clear_bit(vid, priv->fs.vlan.active_vlans); + if (be16_to_cpu(proto) == ETH_P_8021Q) + return mlx5e_vlan_rx_add_cvid(priv, vid); + else if (be16_to_cpu(proto) == ETH_P_8021AD) + return mlx5e_vlan_rx_add_svid(priv, vid); + + return -EOPNOTSUPP; +} - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (be16_to_cpu(proto) == ETH_P_8021Q) { + clear_bit(vid, priv->fs.vlan.active_cvlans); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + } else if (be16_to_cpu(proto) == ETH_P_8021AD) { + clear_bit(vid, priv->fs.vlan.active_svlans); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(dev); + } return 0; } @@ -333,11 +391,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) { - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); + for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - if (priv->fs.vlan.filter_disabled && + for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); + + if (priv->fs.vlan.cvlan_filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) mlx5e_add_any_vid_rules(priv); } @@ -348,11 +409,14 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) { - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); + for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - if (priv->fs.vlan.filter_disabled && + for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); + + if (priv->fs.vlan.cvlan_filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) mlx5e_del_any_vid_rules(priv); } @@ -365,21 +429,24 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, struct mlx5e_l2_hash_node *hn) { u8 action = hn->action; + u8 mac_addr[ETH_ALEN]; int l2_err = 0; + ether_addr_copy(mac_addr, hn->ai.addr); + switch (action) { case MLX5E_ACTION_ADD: mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); - if (!is_multicast_ether_addr(hn->ai.addr)) { - l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); + if (!is_multicast_ether_addr(mac_addr)) { + l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr); hn->mpfs = !l2_err; } hn->action = MLX5E_ACTION_NONE; break; case MLX5E_ACTION_DEL: - if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) - l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); + if (!is_multicast_ether_addr(mac_addr) && hn->mpfs) + l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr); mlx5e_del_l2_flow_rule(priv, &hn->ai); mlx5e_del_l2_from_hash(hn); break; @@ -387,7 +454,7 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, if (l2_err) netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", - action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); + action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); } static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) @@ -545,8 +612,11 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; if (enable_promisc) { + if (!priv->channels.params.vlan_strip_disable) + netdev_warn_once(ndev, + "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->fs.vlan.filter_disabled) + if (!priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) @@ -561,7 +631,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (disable_allmulti) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { - if (!priv->fs.vlan.filter_disabled) + if (!priv->fs.vlan.cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); mlx5e_del_l2_flow_rule(priv, &ea->promisc); } @@ -1265,13 +1335,15 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 3 +#define MLX5E_NUM_VLAN_GROUPS 4 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) -#define MLX5E_VLAN_GROUP1_SIZE BIT(1) -#define MLX5E_VLAN_GROUP2_SIZE BIT(0) +#define MLX5E_VLAN_GROUP1_SIZE BIT(12) +#define MLX5E_VLAN_GROUP2_SIZE BIT(1) +#define MLX5E_VLAN_GROUP3_SIZE BIT(0) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ MLX5E_VLAN_GROUP1_SIZE +\ - MLX5E_VLAN_GROUP2_SIZE) + MLX5E_VLAN_GROUP2_SIZE +\ + MLX5E_VLAN_GROUP3_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -1294,7 +1366,8 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1305,7 +1378,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP2_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1314,6 +1387,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f877f2f5f2a5..d2b057a3e512 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -196,6 +196,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; @@ -224,6 +225,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_tso_bytes += sq_stats->tso_bytes; s->tx_tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_added_vlan_packets += sq_stats->added_vlan_packets; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; @@ -3146,7 +3148,7 @@ int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_BLOCK: return mlx5e_setup_tc_block(dev, type_data); #endif - case TC_SETUP_MQPRIO: + case TC_SETUP_QDISC_MQPRIO: return mlx5e_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; @@ -3260,14 +3262,14 @@ out: return err; } -static int set_feature_vlan_filter(struct net_device *netdev, bool enable) +static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); if (enable) - mlx5e_enable_vlan_filter(priv); + mlx5e_enable_cvlan_filter(priv); else - mlx5e_disable_vlan_filter(priv); + mlx5e_disable_cvlan_filter(priv); return 0; } @@ -3378,7 +3380,7 @@ static int mlx5e_set_features(struct net_device *netdev, set_feature_lro); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_FILTER, - set_feature_vlan_filter); + set_feature_cvlan_filter); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, set_feature_tc_num_filters); err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, @@ -3395,6 +3397,25 @@ static int mlx5e_set_features(struct net_device *netdev, return err ? -EINVAL : 0; } +static netdev_features_t mlx5e_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mutex_lock(&priv->state_lock); + if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { + /* HW strips the outer C-tag header, this is a problem + * for S-tag traffic. + */ + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if (!priv->channels.params.vlan_strip_disable) + netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); + } + mutex_unlock(&priv->state_lock); + + return features; +} + static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3872,6 +3893,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, + .ndo_fix_features = mlx5e_fix_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, @@ -4174,6 +4196,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { netdev->hw_features |= NETIF_F_GSO_PARTIAL; @@ -4231,6 +4254,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) } netdev->features |= NETIF_F_HIGHDMA; + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; netdev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 6d7df4750e0f..5b499c7a698f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -216,22 +216,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { - struct page *page; - if (mlx5e_rx_cache_get(rq, dma_info)) return 0; - page = dev_alloc_pages(rq->buff.page_order); - if (unlikely(!page)) + dma_info->page = dev_alloc_pages(rq->buff.page_order); + if (unlikely(!dma_info->page)) return -ENOMEM; - dma_info->addr = dma_map_page(rq->pdev, page, 0, + dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, RQ_PAGE_SIZE(rq), rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { - put_page(page); + put_page(dma_info->page); + dma_info->page = NULL; return -ENOMEM; } - dma_info->page = page; return 0; } @@ -563,7 +561,6 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); - skb->mac_len = ETH_HLEN; proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); tot_len = cqe_bcnt - network_depth; @@ -610,10 +607,11 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } -static inline bool is_first_ethertype_ip(struct sk_buff *skb) +static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) { __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + ethertype = __vlan_get_protocol(skb, ethertype, network_depth); return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } @@ -623,6 +621,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, struct sk_buff *skb, bool lro) { + int network_depth = 0; + if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; @@ -632,9 +632,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (is_first_ethertype_ip(skb)) { + if (is_last_ethertype_ip(skb, &network_depth)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + if (network_depth > ETH_HLEN) + /* CQE csum is calculated from the IP header and does + * not cover VLAN headers (if present). This will add + * the checksum manually. + */ + skb->csum = csum_partial(skb->data + ETH_HLEN, + network_depth - ETH_HLEN, + skb->csum); rq->stats.csum_complete++; return; } @@ -664,6 +672,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct net_device *netdev = rq->netdev; int lro_num_seg; + skb->mac_len = ETH_HLEN; lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); @@ -685,9 +694,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (likely(netdev->features & NETIF_F_RXHASH)) mlx5e_skb_set_hash(cqe, skb); - if (cqe_has_vlan(cqe)) + if (cqe_has_vlan(cqe)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); + rq->stats.removed_vlan_packets++; + } skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 8bc30484ecc1..b74ddc7984bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -42,8 +42,10 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, @@ -733,6 +735,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, @@ -755,6 +758,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index d094663edd9b..d679e21f686e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -59,8 +59,10 @@ struct mlx5e_sw_stats { u64 tx_tso_bytes; u64 tx_tso_inner_packets; u64 tx_tso_inner_bytes; + u64 tx_added_vlan_packets; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; u64 rx_csum_none; u64 rx_csum_complete; @@ -153,6 +155,7 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_tx; u64 xdp_tx_full; @@ -180,6 +183,7 @@ struct mlx5e_sq_stats { u64 tso_inner_bytes; u64 csum_partial; u64 csum_partial_inner; + u64 added_vlan_packets; u64 nop; /* less likely accessed in data path */ u64 csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index de651de35c9b..569b42a01026 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -361,6 +361,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); ihs += VLAN_HLEN; + sq->stats.added_vlan_packets++; } else { memcpy(eseg->inline_hdr.start, skb_data, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); @@ -369,7 +370,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); + if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) + eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); + sq->stats.added_vlan_packets++; } headlen = skb_len - skb->data_len; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index e906b754415c..ab92298eafc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -49,7 +49,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); bool busy = false; - int work_done; + int work_done = 0; int i; for (i = 0; i < c->num_tc; i++) @@ -58,15 +58,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (c->xdp) busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); - work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); - busy |= work_done == budget; + if (likely(budget)) { /* budget=0 means: don't poll rx rings */ + work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); + busy |= work_done == budget; + } busy |= c->rq.post_wqes(&c->rq); if (busy) { if (likely(mlx5e_channel_no_affinity_change(c))) return budget; - if (work_done == budget) + if (budget && work_done == budget) work_done--; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index ecbe9fad22d8..5f323442cc5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1486,9 +1486,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) return -EAGAIN; } + /* Panic tear down fw command will stop the PCI bus communication + * with the HCA, so the health polll is no longer needed. + */ + mlx5_drain_health_wq(dev); + mlx5_stop_health_poll(dev); + ret = mlx5_cmd_force_teardown_hca(dev); if (ret) { mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); + mlx5_start_health_poll(dev); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 80f4efd3e82f..9463c3fa254f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -19,7 +19,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_acl.o spectrum_flower.o \ spectrum_cnt.o spectrum_fid.o \ spectrum_ipip.o spectrum_acl_flex_actions.o \ - spectrum_mr.o spectrum_mr_tcam.o + spectrum_mr.o spectrum_mr_tcam.o \ + spectrum_qdisc.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5066553dd0b6..6c4e08b8058a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1758,6 +1758,191 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* CWTP - Congetion WRED ECN TClass Profile + * ---------------------------------------- + * Configures the profiles for queues of egress port and traffic class + */ +#define MLXSW_REG_CWTP_ID 0x2802 +#define MLXSW_REG_CWTP_BASE_LEN 0x28 +#define MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN 0x08 +#define MLXSW_REG_CWTP_LEN 0x40 + +MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN); + +/* reg_cwtp_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8); + +/* reg_cwtp_traffic_class + * Traffic Class to configure + * Access: Index + */ +MLXSW_ITEM32(reg, cwtp, traffic_class, 32, 0, 8); + +/* reg_cwtp_profile_min + * Minimum Average Queue Size of the profile in cells. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, cwtp, profile_min, MLXSW_REG_CWTP_BASE_LEN, + 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 0, false); + +/* reg_cwtp_profile_percent + * Percentage of WRED and ECN marking for maximum Average Queue size + * Range is 0 to 100, units of integer percentage + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, cwtp, profile_percent, MLXSW_REG_CWTP_BASE_LEN, + 24, 7, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false); + +/* reg_cwtp_profile_max + * Maximum Average Queue size of the profile in cells + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN, + 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false); + +#define MLXSW_REG_CWTP_MIN_VALUE 64 +#define MLXSW_REG_CWTP_MAX_PROFILE 2 +#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1 + +static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port, + u8 traffic_class) +{ + int i; + + MLXSW_REG_ZERO(cwtp, payload); + mlxsw_reg_cwtp_local_port_set(payload, local_port); + mlxsw_reg_cwtp_traffic_class_set(payload, traffic_class); + + for (i = 0; i <= MLXSW_REG_CWTP_MAX_PROFILE; i++) { + mlxsw_reg_cwtp_profile_min_set(payload, i, + MLXSW_REG_CWTP_MIN_VALUE); + mlxsw_reg_cwtp_profile_max_set(payload, i, + MLXSW_REG_CWTP_MIN_VALUE); + } +} + +#define MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile) (profile - 1) + +static inline void +mlxsw_reg_cwtp_profile_pack(char *payload, u8 profile, u32 min, u32 max, + u32 probability) +{ + u8 index = MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile); + + mlxsw_reg_cwtp_profile_min_set(payload, index, min); + mlxsw_reg_cwtp_profile_max_set(payload, index, max); + mlxsw_reg_cwtp_profile_percent_set(payload, index, probability); +} + +/* CWTPM - Congestion WRED ECN TClass and Pool Mapping + * --------------------------------------------------- + * The CWTPM register maps each egress port and traffic class to profile num. + */ +#define MLXSW_REG_CWTPM_ID 0x2803 +#define MLXSW_REG_CWTPM_LEN 0x44 + +MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN); + +/* reg_cwtpm_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8); + +/* reg_cwtpm_traffic_class + * Traffic Class to configure + * Access: Index + */ +MLXSW_ITEM32(reg, cwtpm, traffic_class, 32, 0, 8); + +/* reg_cwtpm_ew + * Control enablement of WRED for traffic class: + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ew, 36, 1, 1); + +/* reg_cwtpm_ee + * Control enablement of ECN for traffic class: + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ee, 36, 0, 1); + +/* reg_cwtpm_tcp_g + * TCP Green Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, tcp_g, 52, 0, 2); + +/* reg_cwtpm_tcp_y + * TCP Yellow Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, tcp_y, 56, 16, 2); + +/* reg_cwtpm_tcp_r + * TCP Red Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, tcp_r, 56, 0, 2); + +/* reg_cwtpm_ntcp_g + * Non-TCP Green Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ntcp_g, 60, 0, 2); + +/* reg_cwtpm_ntcp_y + * Non-TCP Yellow Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ntcp_y, 64, 16, 2); + +/* reg_cwtpm_ntcp_r + * Non-TCP Red Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2); + +#define MLXSW_REG_CWTPM_RESET_PROFILE 0 + +static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, + u8 traffic_class, u8 profile, + bool wred, bool ecn) +{ + MLXSW_REG_ZERO(cwtpm, payload); + mlxsw_reg_cwtpm_local_port_set(payload, local_port); + mlxsw_reg_cwtpm_traffic_class_set(payload, traffic_class); + mlxsw_reg_cwtpm_ew_set(payload, wred); + mlxsw_reg_cwtpm_ee_set(payload, ecn); + mlxsw_reg_cwtpm_tcp_g_set(payload, profile); + mlxsw_reg_cwtpm_tcp_y_set(payload, profile); + mlxsw_reg_cwtpm_tcp_r_set(payload, profile); + mlxsw_reg_cwtpm_ntcp_g_set(payload, profile); + mlxsw_reg_cwtpm_ntcp_y_set(payload, profile); + mlxsw_reg_cwtpm_ntcp_r_set(payload, profile); +} + /* PPBT - Policy-Engine Port Binding Table * --------------------------------------- * This register is used for configuration of the Port Binding Table. @@ -3156,8 +3341,10 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_EXT_CNT = 0x5, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, + MLXSW_REG_PPCNT_TC_CONG_TC = 0x13, }; /* reg_ppcnt_grp @@ -3173,6 +3360,7 @@ enum mlxsw_reg_ppcnt_grp { * 0x10: Per Priority Counters * 0x11: Per Traffic Class Counters * 0x12: Physical Layer Counters + * 0x13: Per Traffic Class Congestion Counters * Access: Index */ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); @@ -3311,6 +3499,14 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); +/* Ethernet Extended Counter Group Counters */ + +/* reg_ppcnt_ecn_marked + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ecn_marked, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); + /* Ethernet Per Priority Group Counters */ /* reg_ppcnt_rx_octets @@ -3386,6 +3582,14 @@ MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); +/* Ethernet Per Traffic Class Congestion Group Counters */ + +/* reg_ppcnt_wred_discard + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, wred_discard, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) @@ -7405,6 +7609,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(cwtp), + MLXSW_REG(cwtpm), MLXSW_REG(ppbt), MLXSW_REG(pacl), MLXSW_REG(pagt), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 63e50877796b..2d46ec84ebdf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -74,8 +74,8 @@ #include "../mlxfw/mlxfw.h" #define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1420 -#define MLXSW_FWREV_SUBMINOR 122 +#define MLXSW_FWREV_MINOR 1530 +#define MLXSW_FWREV_SUBMINOR 152 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { .major = MLXSW_FWREV_MAJOR, @@ -1324,6 +1324,38 @@ out: return err; } +static void +mlxsw_sp_port_get_hw_xstats(struct net_device *dev, + struct mlxsw_sp_port_xstats *xstats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int err, i; + + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, + ppcnt_pl); + if (!err) + xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); + + for (i = 0; i < TC_MAX_QUEUE; i++) { + err = mlxsw_sp_port_get_stats_raw(dev, + MLXSW_REG_PPCNT_TC_CONG_TC, + i, ppcnt_pl); + if (!err) + xstats->wred_drop[i] = + mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); + + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, + i, ppcnt_pl); + if (err) + continue; + + xstats->backlog[i] = + mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); + xstats->tail_drop[i] = + mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); + } +} + static void update_stats_cache(struct work_struct *work) { struct mlxsw_sp_port *mlxsw_sp_port = @@ -1335,6 +1367,8 @@ static void update_stats_cache(struct work_struct *work) mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, &mlxsw_sp_port->periodic_hw_stats.stats); + mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, + &mlxsw_sp_port->periodic_hw_stats.xstats); out: mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, @@ -1797,6 +1831,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); + case TC_SETUP_QDISC_RED: + return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); default: return -EOPNOTSUPP; } @@ -3007,6 +3043,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", mlxsw_sp_port->local_port); + err = PTR_ERR(mlxsw_sp_port_vlan); goto err_port_vlan_get; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 47dd7e06fd29..58cf222fb985 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -48,6 +48,7 @@ #include <linux/notifier.h> #include <net/psample.h> #include <net/pkt_cls.h> +#include <net/red.h> #include "port.h" #include "core.h" @@ -203,6 +204,37 @@ struct mlxsw_sp_port_vlan { struct list_head bridge_vlan_node; }; +enum mlxsw_sp_qdisc_type { + MLXSW_SP_QDISC_NO_QDISC, + MLXSW_SP_QDISC_RED, +}; + +struct mlxsw_sp_qdisc { + u32 handle; + enum mlxsw_sp_qdisc_type type; + struct red_stats xstats_base; + union { + struct { + u64 tail_drop_base; + u64 ecn_base; + u64 wred_drop_base; + } red; + } xstats; + + u64 tx_bytes; + u64 tx_packets; + u64 drops; + u64 overlimits; +}; + +/* No need an internal lock; At worse - miss a single periodic iteration */ +struct mlxsw_sp_port_xstats { + u64 ecn; + u64 wred_drop[TC_MAX_QUEUE]; + u64 tail_drop[TC_MAX_QUEUE]; + u64 backlog[TC_MAX_QUEUE]; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -232,10 +264,12 @@ struct mlxsw_sp_port { struct { #define MLXSW_HW_STATS_UPDATE_TIME HZ struct rtnl_link_stats64 stats; + struct mlxsw_sp_port_xstats xstats; struct delayed_work update_dw; } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; + struct mlxsw_sp_qdisc root_qdisc; }; static inline bool @@ -546,6 +580,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); +/* spectrum_qdisc.c */ +int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_red_qopt_offload *p); + /* spectrum_fid.c */ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_flood_type packet_type, u8 local_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c new file mode 100644 index 000000000000..c33beac5def0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -0,0 +1,276 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <net/pkt_cls.h> +#include <net/red.h> + +#include "spectrum.h" +#include "reg.h" + +static int +mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, + int tclass_num, u32 min, u32 max, + u32 probability, bool is_ecn) +{ + char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + int err; + + mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num); + mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE, + roundup(min, MLXSW_REG_CWTP_MIN_VALUE), + roundup(max, MLXSW_REG_CWTP_MIN_VALUE), + probability); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd); + if (err) + return err; + + mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, + MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); +} + +static int +mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, + int tclass_num) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; + + mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, + MLXSW_REG_CWTPM_RESET_PROFILE, false, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); +} + +static void +mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num) +{ + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + struct mlxsw_sp_port_xstats *xstats; + struct rtnl_link_stats64 *stats; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + stats = &mlxsw_sp_port->periodic_hw_stats.stats; + + mlxsw_sp_qdisc->tx_packets = stats->tx_packets; + mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes; + + switch (mlxsw_sp_qdisc->type) { + case MLXSW_SP_QDISC_RED: + xstats_base->prob_mark = xstats->ecn; + xstats_base->prob_drop = xstats->wred_drop[tclass_num]; + xstats_base->pdrop = xstats->tail_drop[tclass_num]; + + mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop + + xstats_base->prob_mark; + mlxsw_sp_qdisc->drops = xstats_base->prob_drop + + xstats_base->pdrop; + break; + default: + break; + } +} + +static int +mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num) +{ + int err; + + if (mlxsw_sp_qdisc->handle != handle) + return 0; + + err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num); + mlxsw_sp_qdisc->handle = TC_H_UNSPEC; + mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC; + + return err; +} + +static int +mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num, + struct tc_red_qopt_offload_params *p) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 min, max; + u64 prob; + int err = 0; + + if (p->min > p->max) { + dev_err(mlxsw_sp->bus_info->dev, + "spectrum: RED: min %u is bigger then max %u\n", p->min, + p->max); + goto err_bad_param; + } + if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { + dev_err(mlxsw_sp->bus_info->dev, + "spectrum: RED: max value %u is too big\n", p->max); + goto err_bad_param; + } + if (p->min == 0 || p->max == 0) { + dev_err(mlxsw_sp->bus_info->dev, + "spectrum: RED: 0 value is illegal for min and max\n"); + goto err_bad_param; + } + + /* calculate probability in percentage */ + prob = p->probability; + prob *= 100; + prob = DIV_ROUND_UP(prob, 1 << 16); + prob = DIV_ROUND_UP(prob, 1 << 16); + min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); + max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); + err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, + max, prob, p->is_ecn); + if (err) + goto err_config; + + mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED; + if (mlxsw_sp_qdisc->handle != handle) + mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port, + mlxsw_sp_qdisc, + tclass_num); + + mlxsw_sp_qdisc->handle = handle; + return 0; + +err_bad_param: + err = -EINVAL; +err_config: + mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle, + mlxsw_sp_qdisc, tclass_num); + return err; +} + +static int +mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num, struct red_stats *res) +{ + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + struct mlxsw_sp_port_xstats *xstats; + + if (mlxsw_sp_qdisc->handle != handle || + mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) + return -EOPNOTSUPP; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + + res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; + res->prob_mark = xstats->ecn - xstats_base->prob_mark; + res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + return 0; +} + +static int +mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num, + struct tc_red_qopt_offload_stats *res) +{ + u64 tx_bytes, tx_packets, overlimits, drops; + struct mlxsw_sp_port_xstats *xstats; + struct rtnl_link_stats64 *stats; + + if (mlxsw_sp_qdisc->handle != handle || + mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) + return -EOPNOTSUPP; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + stats = &mlxsw_sp_port->periodic_hw_stats.stats; + + tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes; + tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets; + overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - + mlxsw_sp_qdisc->overlimits; + drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + mlxsw_sp_qdisc->drops; + + _bstats_update(res->bstats, tx_bytes, tx_packets); + res->qstats->overlimits += overlimits; + res->qstats->drops += drops; + res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + xstats->backlog[tclass_num]); + + mlxsw_sp_qdisc->drops += drops; + mlxsw_sp_qdisc->overlimits += overlimits; + mlxsw_sp_qdisc->tx_bytes += tx_bytes; + mlxsw_sp_qdisc->tx_packets += tx_packets; + return 0; +} + +#define MLXSW_SP_PORT_DEFAULT_TCLASS 0 + +int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_red_qopt_offload *p) +{ + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + int tclass_num; + + if (p->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc; + tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + + switch (p->command) { + case TC_RED_REPLACE: + return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num, + &p->set); + case TC_RED_DESTROY: + return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num); + case TC_RED_XSTATS: + return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num, + p->xstats); + case TC_RED_STATS: + return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num, + &p->stats); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e9187841d82a..632c7b229054 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2416,16 +2416,25 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router->neigh_ht); } +static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_rif *rif) +{ + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + + mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, + rif->rif_index, rif->addr); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; + mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, - rif_list_node) { - mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); + rif_list_node) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); - } } enum mlxsw_sp_nexthop_type { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 5d5b9855e24e..b2299f2b2155 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1122,7 +1122,6 @@ static void vxge_set_multicast(struct net_device *dev) struct netdev_hw_addr *ha; struct vxgedev *vdev; int i, mcast_cnt = 0; - struct __vxge_hw_device *hldev; struct vxge_vpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info; @@ -1136,7 +1135,6 @@ static void vxge_set_multicast(struct net_device *dev) "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); - hldev = vdev->devh; if (unlikely(!is_vxge_card_up(vdev))) return; @@ -1283,7 +1281,6 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct vxgedev *vdev; - struct __vxge_hw_device *hldev; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info_new, mac_info_old; int vpath_idx = 0; @@ -1291,7 +1288,6 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); - hldev = vdev->devh; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; @@ -1534,7 +1530,7 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_reset failed for" "vpath:%d", vp_id); - return status; + return status; } } else return VXGE_HW_FAIL; @@ -1954,19 +1950,19 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) * for all VPATHs. The h/w only uses the lowest numbered VPATH * when steering frames. */ - for (index = 0; index < vdev->no_of_vpath; index++) { + for (index = 0; index < vdev->no_of_vpath; index++) { status = vxge_hw_vpath_rts_rth_set( vdev->vpaths[index].handle, vdev->config.rth_algorithm, &hash_types, vdev->config.rth_bkt_sz); - if (status != VXGE_HW_OK) { + if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "RTH configuration failed for vpath:%d", vdev->vpaths[index].device_id); return status; - } - } + } + } return status; } @@ -1995,7 +1991,7 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_reset failed for " "vpath:%d", i); - return status; + return status; } } } @@ -2177,7 +2173,6 @@ static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) */ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) { - struct net_device *dev; struct __vxge_hw_device *hldev; u64 reason; enum vxge_hw_status status; @@ -2185,7 +2180,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); - dev = vdev->ndev; hldev = pci_get_drvdata(vdev->pdev); if (pci_channel_offline(vdev->pdev)) @@ -2480,32 +2474,31 @@ static int vxge_add_isr(struct vxgedev *vdev) switch (msix_idx) { case 0: snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, - "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", + "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", vdev->ndev->name, vdev->entries[intr_cnt].entry, pci_fun, vp_idx); ret = request_irq( - vdev->entries[intr_cnt].vector, + vdev->entries[intr_cnt].vector, vxge_tx_msix_handle, 0, vdev->desc[intr_cnt], &vdev->vpaths[vp_idx].fifo); - vdev->vxge_entries[intr_cnt].arg = + vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx].fifo; irq_req = 1; break; case 1: snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, - "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", + "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", vdev->ndev->name, vdev->entries[intr_cnt].entry, pci_fun, vp_idx); ret = request_irq( - vdev->entries[intr_cnt].vector, - vxge_rx_msix_napi_handle, - 0, + vdev->entries[intr_cnt].vector, + vxge_rx_msix_napi_handle, 0, vdev->desc[intr_cnt], &vdev->vpaths[vp_idx].ring); - vdev->vxge_entries[intr_cnt].arg = + vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx].ring; irq_req = 1; break; @@ -2518,9 +2511,9 @@ static int vxge_add_isr(struct vxgedev *vdev) vxge_rem_msix_isr(vdev); vdev->config.intr_type = INTA; vxge_debug_init(VXGE_ERR, - "%s: Defaulting to INTA" - , vdev->ndev->name); - goto INTA_MODE; + "%s: Defaulting to INTA", + vdev->ndev->name); + goto INTA_MODE; } if (irq_req) { @@ -2629,7 +2622,7 @@ static void vxge_poll_vp_lockup(struct timer_list *t) ring = &vdev->vpaths[i].ring; /* Truncated to machine word size number of frames */ - rx_frms = ACCESS_ONCE(ring->stats.rx_frms); + rx_frms = READ_ONCE(ring->stats.rx_frms); /* Did this vpath received any packets */ if (ring->stats.prev_rx_frms == rx_frms) { @@ -2713,14 +2706,13 @@ static int vxge_open(struct net_device *dev) struct vxge_vpath *vpath; int ret = 0; int i; - u64 val64, function_mode; + u64 val64; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); vdev = netdev_priv(dev); hldev = pci_get_drvdata(vdev->pdev); - function_mode = vdev->config.device_hw_info.function_mode; /* make sure you have link off by default every time Nic is * initialized */ @@ -4512,8 +4504,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "Failed to initialize device (%d)", status); - ret = -EINVAL; - goto _exit3; + ret = -EINVAL; + goto _exit3; } if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major, diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index de64cedf8b26..c1c595f8bb87 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -58,7 +58,6 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, const struct tc_action *action) { size_t act_size = sizeof(struct nfp_fl_push_vlan); - struct tcf_vlan *vlan = to_vlan(action); u16 tmp_push_vlan_tci; push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; @@ -67,8 +66,8 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->vlan_tpid = tcf_vlan_push_proto(action); tmp_push_vlan_tci = - FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) | + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) | NFP_FL_PUSH_VLAN_CFI; push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index e0283bb24f06..8fcc90c0d2d3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -125,6 +125,21 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) return nfp_flower_cmsg_portmod(repr, false); } +static int +nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) +{ + return tc_setup_cb_egdev_register(netdev, + nfp_flower_setup_tc_egress_cb, + netdev_priv(netdev)); +} + +static void +nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) +{ + tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, + netdev_priv(netdev)); +} + static void nfp_flower_sriov_disable(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -452,6 +467,9 @@ const struct nfp_app_type app_flower = { .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, + .repr_init = nfp_flower_repr_netdev_init, + .repr_clean = nfp_flower_repr_netdev_clean, + .repr_open = nfp_flower_repr_netdev_open, .repr_stop = nfp_flower_repr_netdev_stop, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c90e72b7ff5a..e6b26c5ae6e0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -52,8 +52,7 @@ struct nfp_app; #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 -#define NFP_FL_META_FLAG_NEW_MASK 128 -#define NFP_FL_META_FLAG_LAST_MASK 1 +#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7) #define NFP_FL_MASK_REUSE_TIME_NS 40000 #define NFP_FL_MASK_ID_LOCATION 1 @@ -197,5 +196,7 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); +int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 193520ef23f0..db977cf8e933 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len, id = nfp_add_mask_table(app, mask_data, mask_len); if (id < 0) return false; - *meta_flags |= NFP_FL_META_FLAG_NEW_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } *mask_id = id; @@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, if (!mask_entry) return false; + if (meta_flags) + *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; + *mask_id = mask_entry->mask_id; mask_entry->ref_cnt--; if (!mask_entry->ref_cnt) { @@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, nfp_release_mask_id(app, *mask_id); kfree(mask_entry); if (meta_flags) - *meta_flags |= NFP_FL_META_FLAG_LAST_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } return true; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index cdbb5464b790..f5d73b83dcc2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -131,7 +131,8 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) static int nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, - struct tc_cls_flower_offload *flow) + struct tc_cls_flower_offload *flow, + bool egress) { struct flow_dissector_key_basic *mask_basic = NULL; struct flow_dissector_key_basic *key_basic = NULL; @@ -167,6 +168,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL, flow->key); + if (!egress) + return -EOPNOTSUPP; + if (mask_enc_ctl->addr_type != 0xffff || enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) return -EOPNOTSUPP; @@ -194,6 +198,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, key_layer |= NFP_FLOWER_LAYER_VXLAN; key_size += sizeof(struct nfp_flower_vxlan); + } else if (egress) { + /* Reject non tunnel matches offloaded to egress repr. */ + return -EOPNOTSUPP; } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -315,7 +322,7 @@ err_free_flow: */ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow) + struct tc_cls_flower_offload *flow, bool egress) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; @@ -326,7 +333,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(key_layer, flow); + err = nfp_flower_calculate_key_layers(key_layer, flow, egress); if (err) goto err_free_key_ls; @@ -447,7 +454,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flower) + struct tc_cls_flower_offload *flower, bool egress) { if (!eth_proto_is_802_3(flower->common.protocol) || flower->common.chain_index) @@ -455,7 +462,7 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, switch (flower->command) { case TC_CLSFLOWER_REPLACE: - return nfp_flower_add_offload(app, netdev, flower); + return nfp_flower_add_offload(app, netdev, flower, egress); case TC_CLSFLOWER_DESTROY: return nfp_flower_del_offload(app, netdev, flower); case TC_CLSFLOWER_STATS: @@ -465,6 +472,23 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } +int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct nfp_repr *repr = cb_priv; + + if (!tc_can_offload(repr->netdev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nfp_flower_repr_offload(repr->app, repr->netdev, + type_data, true); + default: + return -EOPNOTSUPP; + } +} + static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { @@ -476,7 +500,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data); + type_data, false); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 54b67c9b8d5b..0e5e0305ad1c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -76,6 +76,8 @@ extern const struct nfp_app_type app_flower; * @vnic_free: free up app's vNIC state * @vnic_init: vNIC netdev was registered * @vnic_clean: vNIC netdev about to be unregistered + * @repr_init: representor about to be registered + * @repr_clean: representor about to be unregistered * @repr_open: representor netdev open callback * @repr_stop: representor netdev stop callback * @start: start application logic @@ -109,6 +111,9 @@ struct nfp_app_type { int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn); void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn); + int (*repr_init)(struct nfp_app *app, struct net_device *netdev); + void (*repr_clean)(struct nfp_app *app, struct net_device *netdev); + int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr); @@ -212,6 +217,21 @@ static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr) return app->type->repr_stop(app, repr); } +static inline int +nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev) +{ + if (!app->type->repr_init) + return 0; + return app->type->repr_init(app, netdev); +} + +static inline void +nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev) +{ + if (app->type->repr_clean) + app->type->repr_clean(app, netdev); +} + static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) { app->ctrl = ctrl; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 232044b1b7aa..1a603fdd9e80 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1185,7 +1185,7 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) } else { struct page *page; - page = alloc_page(GFP_KERNEL | __GFP_COLD); + page = alloc_page(GFP_KERNEL); frag = page ? page_address(page) : NULL; } if (!frag) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 60c8d733a37d..2801ecd09eab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -104,7 +104,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { { "rx_frame_too_long_errors", NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, }, { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, }, - { "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, }, + { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, }, { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, }, { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, }, { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, }, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 1bce8c131bb9..924a05e05da0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -258,6 +258,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { static void nfp_repr_clean(struct nfp_repr *repr) { unregister_netdev(repr->netdev); + nfp_app_repr_clean(repr->app, repr->netdev); dst_release((struct dst_entry *)repr->dst); nfp_port_free(repr->port); } @@ -297,6 +298,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->netdev_ops = &nfp_repr_netdev_ops; netdev->ethtool_ops = &nfp_port_ethtool_ops; + netdev->max_mtu = pf_netdev->max_mtu; + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); if (nfp_app_has_tc(app)) { @@ -304,12 +307,18 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->hw_features |= NETIF_F_HW_TC; } - err = register_netdev(netdev); + err = nfp_app_repr_init(app, netdev); if (err) goto err_clean; + err = register_netdev(netdev); + if (err) + goto err_repr_clean; + return 0; +err_repr_clean: + nfp_app_repr_clean(app, netdev); err_clean: dst_release((struct dst_entry *)repr->dst); return err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 51dcb9c603ee..21bd4aa32646 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -157,7 +157,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port); /* unused 0x008 */ #define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010) #define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018) -#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020) +#define NFP_MAC_STATS_RX_VLAN_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x020) #define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028) #define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030) #define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 31a943860f32..ac8439ceea10 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2226,8 +2226,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* setup the header buffer */ do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data + offset, bcnt, @@ -2262,8 +2260,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) offset = 0; do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; if (!start_tx_ctx) start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; @@ -2304,6 +2300,16 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) } while (frag_size); } + if (unlikely(put_tx == np->first_tx.orig)) + prev_tx = np->last_tx.orig; + else + prev_tx = put_tx - 1; + + if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + prev_tx_ctx = np->last_tx_ctx; + else + prev_tx_ctx = np->put_tx_ctx - 1; + /* set last fragment flag */ prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); @@ -2377,8 +2383,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, /* setup the header buffer */ do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data + offset, bcnt, @@ -2414,8 +2418,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, offset = 0; do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; if (!start_tx_ctx) start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; @@ -2456,6 +2458,16 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, } while (frag_size); } + if (unlikely(put_tx == np->first_tx.ex)) + prev_tx = np->last_tx.ex; + else + prev_tx = put_tx - 1; + + if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + prev_tx_ctx = np->last_tx_ctx; + else + prev_tx_ctx = np->put_tx_ctx - 1; + /* set last fragment flag */ prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6e15d3c10ebf..fe7c1f230028 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1277,11 +1277,10 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, { struct qed_dcbx_get *dcbx_info; - dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC); + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC); if (!dcbx_info) return NULL; - memset(dcbx_info, 0, sizeof(*dcbx_info)); if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { kfree(dcbx_info); return NULL; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 9feec7009443..7b97a9969046 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1092,8 +1092,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, { if (!rx_ring->pg_chunk.page) { u64 map; - rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | - GFP_ATOMIC, + rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC, qdev->lbq_buf_order); if (unlikely(!rx_ring->pg_chunk.page)) { netif_err(qdev, drv, qdev->ndev, @@ -4725,9 +4724,9 @@ static const struct net_device_ops qlge_netdev_ops = { .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, }; -static void ql_timer(unsigned long data) +static void ql_timer(struct timer_list *t) { - struct ql_adapter *qdev = (struct ql_adapter *)data; + struct ql_adapter *qdev = from_timer(qdev, t, timer); u32 var = 0; var = ql_read32(qdev, STS); @@ -4806,11 +4805,8 @@ static int qlge_probe(struct pci_dev *pdev, /* Start up the timer to trigger EEH if * the bus goes dead */ - init_timer_deferrable(&qdev->timer); - qdev->timer.data = (unsigned long)qdev; - qdev->timer.function = ql_timer; - qdev->timer.expires = jiffies + (5*HZ); - add_timer(&qdev->timer); + timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE); + mod_timer(&qdev->timer, jiffies + (5*HZ)); ql_link_off(qdev); ql_display_dev_info(ndev); atomic_set(&qdev->lb_count, 0); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c index 384c8bc874f3..4be65d6761b3 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c @@ -213,7 +213,6 @@ static int ql_idc_req_aen(struct ql_adapter *qdev) /* Get the status data and start up a thread to * handle the request. */ - mbcp = &qdev->idc_mbc; mbcp->out_count = 4; status = ql_get_mb_sts(qdev, mbcp); if (status) { diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index fd218fd9ef3c..2cb3622c4acc 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1982,8 +1982,6 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, rtl_writephy(tp, MII_ADVERTISE, auto_nego); rtl_writephy(tp, MII_CTRL1000, giga_ctrl); } else { - giga_ctrl = 0; - if (speed == SPEED_10) bmcr = 0; else if (speed == SPEED_100) @@ -3791,27 +3789,32 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0000); /* EEE setting */ - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000); rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000); rtl_writephy(tp, 0x1f, 0x0002); rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy(tp, 0x0d, 0x0007); rtl_writephy(tp, 0x0e, 0x003c); rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0e, 0x0006); rtl_writephy(tp, 0x0d, 0x0000); /* Green feature */ rtl_writephy(tp, 0x1f, 0x0003); - rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000); + rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); + /* soft-reset phy */ + rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ rtl_rar_exgmac_set(tp, tp->dev->dev_addr); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 19a91881fbf9..e566dbb3343d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2077,7 +2077,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); - if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { + if (likely(READ_ONCE(efx->irq_soft_enabled))) { /* Note test interrupts */ if (context->index == efx->irq_level) efx->last_irq_cpu = raw_smp_processor_id(); @@ -2092,7 +2092,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); struct efx_channel *channel; efx_dword_t reg; u32 queues; @@ -3299,7 +3299,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, bool rx_cont; u16 flags = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; /* Basic packet information */ @@ -3436,7 +3436,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) unsigned int tx_ev_q_label; int tx_descs = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) @@ -5324,7 +5324,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) int i; for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { - if (ACCESS_ONCE(table->entry[i].spec) & + if (READ_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { rc = efx_ef10_filter_remove_internal(efx, 1U << EFX_FILTER_PRI_AUTO, i, true); @@ -5734,7 +5734,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) * MCFW do not support VFs. */ rc = efx_ef10_vport_set_mac_address(efx); - } else { + } else if (rc) { efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, sizeof(inbuf), NULL, 0, rc); } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 6668e371405c..e3c492fcaff0 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2810,7 +2810,7 @@ static void efx_reset_work(struct work_struct *data) unsigned long pending; enum reset_type method; - pending = ACCESS_ONCE(efx->reset_pending); + pending = READ_ONCE(efx->reset_pending); method = fls(pending) - 1; if (method == RESET_TYPE_MC_BIST) @@ -2875,7 +2875,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (ACCESS_ONCE(efx->state) != STATE_READY) + if (READ_ONCE(efx->state) != STATE_READY) return; /* efx_process_channel() will no longer read events once a diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 6685a66ee1a3..3d6c91e96589 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2543,7 +2543,7 @@ static void ef4_reset_work(struct work_struct *data) unsigned long pending; enum reset_type method; - pending = ACCESS_ONCE(efx->reset_pending); + pending = READ_ONCE(efx->reset_pending); method = fls(pending) - 1; if ((method == RESET_TYPE_RECOVER_OR_DISABLE || @@ -2603,7 +2603,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (ACCESS_ONCE(efx->state) != STATE_READY) + if (READ_ONCE(efx->state) != STATE_READY) return; queue_work(reset_workqueue, &efx->reset_work); diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index ccda017b6794..6520d7bc8d21 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Check to see if we have a serious error condition */ @@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) ef4_oword_t reg; int link_speed, isolate; - isolate = !!ACCESS_ONCE(efx->reset_pending); + isolate = !!READ_ONCE(efx->reset_pending); switch (link_state->speed) { case 10000: link_speed = 3; break; diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 05916c710d8c..494884f6af4a 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c @@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) struct ef4_nic *efx = channel->efx; int tx_packets = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { @@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) struct ef4_rx_queue *rx_queue; struct ef4_nic *efx = channel->efx; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return; rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); @@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) { struct ef4_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); ef4_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct ef4_channel *channel; @@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Handle non-event-queue sources */ diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index e2e3c008d073..07c62dc552cb 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_ static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, unsigned int write_count) { - unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; @@ -466,11 +466,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx); static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) { - return ACCESS_ONCE(channel->event_test_cpu); + return READ_ONCE(channel->event_test_cpu); } static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) { - return ACCESS_ONCE(efx->last_irq_cpu); + return READ_ONCE(efx->last_irq_cpu); } /* Global Resources */ diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 382019b302db..02456ed13a7d 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -163,7 +163,7 @@ static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic) do { page = ef4_reuse_page(rx_queue); if (page == NULL) { - page = alloc_pages(__GFP_COLD | __GFP_COMP | + page = alloc_pages(__GFP_COMP | (atomic ? GFP_ATOMIC : GFP_KERNEL), efx->rx_buffer_order); if (unlikely(page == NULL)) diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..3409bbf5b19f 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1) */ netif_tx_stop_queue(txq1->core_txq); smp_mb(); - txq1->old_read_count = ACCESS_ONCE(txq1->read_count); - txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + txq1->old_read_count = READ_ONCE(txq1->read_count); + txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = max(txq1->insert_count - txq1->old_read_count, txq2->insert_count - txq2->old_read_count); @@ -435,7 +435,7 @@ int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, unsigned tc, num_tc; int rc; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; num_tc = mqprio->num_tc; @@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 6608dfe455b1..5334dc83d926 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) struct efx_nic *efx = channel->efx; int tx_packets = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { @@ -983,7 +983,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return; rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); @@ -1524,7 +1524,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); efx_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct efx_channel *channel; @@ -1616,7 +1616,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Handle non-event-queue sources */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4d7fb8af880d..7b51b6371724 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) { - unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; @@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { - return ACCESS_ONCE(channel->event_test_cpu); + return READ_ONCE(channel->event_test_cpu); } static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) { - return ACCESS_ONCE(efx->last_irq_cpu); + return READ_ONCE(efx->last_irq_cpu); } /* Global Resources */ diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..caa89bf7603e 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -648,17 +648,15 @@ static void efx_ptp_send_times(struct efx_nic *efx, struct pps_event_time now; struct timespec64 limit; struct efx_ptp_data *ptp = efx->ptp_data; - struct timespec64 start; int *mc_running = ptp->start.addr; pps_get_ts(&now); - start = now.ts_real; limit = now.ts_real; timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); /* Write host time for specified period or until MC is done */ while ((timespec64_compare(&now.ts_real, &limit) < 0) && - ACCESS_ONCE(*mc_running)) { + READ_ONCE(*mc_running)) { struct timespec64 update_time; unsigned int host_time; @@ -668,7 +666,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, do { pps_get_ts(&now); } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && - ACCESS_ONCE(*mc_running)); + READ_ONCE(*mc_running)); /* Synchronise NIC with single word of time only */ host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | @@ -832,14 +830,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ptp->start.dma_addr); /* Clear flag that signals MC ready */ - ACCESS_ONCE(*start) = 0; + WRITE_ONCE(*start, 0); rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, MC_CMD_PTP_IN_SYNCHRONIZE_LEN); EFX_WARN_ON_ONCE_PARANOID(rc); /* Wait for start from MCDI (or timeout) */ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); - while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { + while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { udelay(20); /* Usually start MCDI execution quickly */ loops++; } @@ -849,7 +847,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) if (!time_before(jiffies, timeout)) ++ptp->sync_timeouts; - if (ACCESS_ONCE(*start)) + if (READ_ONCE(*start)) efx_ptp_send_times(efx, &last_time); /* Collect results */ diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8cb60513dca2..cfe76aad79ee 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -163,7 +163,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) do { page = efx_reuse_page(rx_queue); if (page == NULL) { - page = alloc_pages(__GFP_COLD | __GFP_COMP | + page = alloc_pages(__GFP_COMP | (atomic ? GFP_ATOMIC : GFP_KERNEL), efx->rx_buffer_order); if (unlikely(page == NULL)) diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..0ea7e16f2e6e 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) */ netif_tx_stop_queue(txq1->core_txq); smp_mb(); - txq1->old_read_count = ACCESS_ONCE(txq1->read_count); - txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + txq1->old_read_count = READ_ONCE(txq1->read_count); + txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = max(txq1->insert_count - txq1->old_read_count, txq2->insert_count - txq2->old_read_count); @@ -663,7 +663,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, unsigned tc, num_tc; int rc; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; num_tc = mqprio->num_tc; @@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index aeda3ab2d761..789dad8a07b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -98,7 +98,7 @@ #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) -#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN) enum dwmac4_irq_status { time_stamp_irq = 0x00001000, @@ -106,6 +106,7 @@ enum dwmac4_irq_status { mmc_tx_irq = 0x00000400, mmc_rx_irq = 0x00000200, mmc_irq = 0x00000100, + lpi_irq = 0x00000020, pmt_irq = 0x00000010, }; @@ -132,6 +133,10 @@ enum power_event { #define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ #define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ #define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ /* MAC Debug bitmap */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 2f7d7ec59962..f3ed8f7853eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -580,6 +580,25 @@ static int dwmac4_irq_status(struct mac_device_info *hw, x->irq_receive_pmt_irq_n++; } + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & lpi_irq) { + /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ + u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); if (intr_status & PCS_RGSMIIIS_IRQ) dwmac4_phystatus(ioaddr, x); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index ab502ee35fb2..06001bacbe0f 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6243,7 +6243,7 @@ static void niu_get_rx_stats(struct niu *np, pkts = dropped = errors = bytes = 0; - rx_rings = ACCESS_ONCE(np->rx_rings); + rx_rings = READ_ONCE(np->rx_rings); if (!rx_rings) goto no_rings; @@ -6274,7 +6274,7 @@ static void niu_get_tx_stats(struct niu *np, pkts = errors = bytes = 0; - tx_rings = ACCESS_ONCE(np->tx_rings); + tx_rings = READ_ONCE(np->tx_rings); if (!tx_rings) goto no_rings; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c index e9672b1f9968..031cf9c3435a 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c @@ -335,7 +335,7 @@ static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata, dma_addr_t pages_dma; /* Try to obtain pages, decreasing order if necessary */ - gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; + gfp |= __GFP_COMP | __GFP_NOWARN; while (order >= 0) { pages = alloc_pages(gfp, order); if (pages) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index db8a4bcfc6c7..a73600dceb8b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -119,8 +119,8 @@ do { \ #define CPDMA_RXCP 0x60 #define CPSW_POLL_WEIGHT 64 -#define CPSW_MIN_PACKET_SIZE 60 -#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) +#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN) +#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 437d36289786..ed58c746e4af 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -906,7 +906,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) sw_data[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ - page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); + page = alloc_page(GFP_ATOMIC | GFP_DMA); if (unlikely(!page)) { dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); goto fail; @@ -1887,7 +1887,7 @@ static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index c00102b8145a..b3e5816a4678 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -40,7 +40,7 @@ #include <linux/tcp.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> -#include <linux/tick.h> +#include <linux/sched/isolation.h> #include <asm/checksum.h> #include <asm/homecache.h> @@ -2270,8 +2270,8 @@ static int __init tile_net_init_module(void) tile_net_dev_init(name, mac); if (!network_cpus_init()) - cpumask_and(&network_cpus_map, housekeeping_cpumask(), - cpu_online_mask); + cpumask_and(&network_cpus_map, + housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask); return 0; } diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 49ccee4b9aec..56d06282fbde 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -608,9 +608,9 @@ static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) * ISSUE: Maybe instead track number of expected completions, and free * only that many, resetting to zero if "pending" is ever false. */ -static void tile_net_handle_egress_timer(unsigned long arg) +static void tile_net_handle_egress_timer(struct timer_list *t) { - struct tile_net_cpu *info = (struct tile_net_cpu *)arg; + struct tile_net_cpu *info = from_timer(info, t, egress_timer); struct net_device *dev = info->napi.dev; /* The timer is no longer scheduled. */ @@ -1004,9 +1004,8 @@ static void tile_net_register(void *dev_ptr) BUG(); /* Initialize the egress timer. */ - init_timer_pinned(&info->egress_timer); - info->egress_timer.data = (long)info; - info->egress_timer.function = tile_net_handle_egress_timer; + timer_setup(&info->egress_timer, tile_net_handle_egress_timer, + TIMER_PINNED); u64_stats_init(&info->stats.syncp); |