diff options
Diffstat (limited to 'drivers/net/ethernet')
145 files changed, 8873 insertions, 6233 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 8cfce95c82fc..39cd3a27fe77 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -107,7 +107,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) return 0; } -int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) +static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) { int err = 0; u32 h = 0U; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 9ffc4a8c5fc7..3853296d78c1 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -384,7 +384,7 @@ static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, struct bnx2 *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - if (ops == NULL) + if (!ops) return -EINVAL; if (cp->drv_state & CNIC_DRV_STATE_REGD) @@ -755,13 +755,13 @@ bnx2_alloc_tx_mem(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) return -ENOMEM; txr->tx_desc_ring = dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, &txr->tx_desc_mapping, GFP_KERNEL); - if (txr->tx_desc_ring == NULL) + if (!txr->tx_desc_ring) return -ENOMEM; } return 0; @@ -779,7 +779,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) rxr->rx_buf_ring = vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return -ENOMEM; for (j = 0; j < bp->rx_max_ring; j++) { @@ -788,7 +788,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_desc_ring[j] == NULL) + if (!rxr->rx_desc_ring[j]) return -ENOMEM; } @@ -796,7 +796,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) if (bp->rx_pg_ring_size) { rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * bp->rx_max_pg_ring); - if (rxr->rx_pg_ring == NULL) + if (!rxr->rx_pg_ring) return -ENOMEM; } @@ -807,7 +807,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_pg_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_pg_desc_ring[j] == NULL) + if (!rxr->rx_pg_desc_ring[j]) return -ENOMEM; } @@ -845,7 +845,7 @@ bnx2_alloc_stats_blk(struct net_device *dev) sizeof(struct statistics_block); status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, &bp->status_blk_mapping, GFP_KERNEL); - if (status_blk == NULL) + if (!status_blk) return -ENOMEM; bp->status_blk = status_blk; @@ -914,7 +914,7 @@ bnx2_alloc_mem(struct bnx2 *bp) BNX2_PAGE_SIZE, &bp->ctx_blk_mapping[i], GFP_KERNEL); - if (bp->ctx_blk[i] == NULL) + if (!bp->ctx_blk[i]) goto alloc_mem_err; } } @@ -2667,7 +2667,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) u32 val; good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); - if (good_mbuf == NULL) + if (!good_mbuf) return -ENOMEM; BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, @@ -3225,7 +3225,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (len <= bp->rx_copy_thresh) { skb = netdev_alloc_skb(bp->dev, len + 6); - if (skb == NULL) { + if (!skb) { bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); goto next_rx; @@ -3285,7 +3285,7 @@ next_rx: sw_cons = BNX2_NEXT_RX_BD(sw_cons); sw_prod = BNX2_NEXT_RX_BD(sw_prod); - if ((rx_pkt == budget)) + if (rx_pkt == budget) break; /* Refresh hw_cons to see if there is new work */ @@ -4561,7 +4561,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (align_start || align_end) { align_buf = kmalloc(len32, GFP_KERNEL); - if (align_buf == NULL) + if (!align_buf) return -ENOMEM; if (align_start) { memcpy(align_buf, start, 4); @@ -4575,7 +4575,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); - if (flash_buffer == NULL) { + if (!flash_buffer) { rc = -ENOMEM; goto nvram_write_end; } @@ -5440,7 +5440,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; int j; - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) continue; for (j = 0; j < BNX2_TX_DESC_CNT; ) { @@ -5448,7 +5448,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct sk_buff *skb = tx_buf->skb; int k, last; - if (skb == NULL) { + if (!skb) { j = BNX2_NEXT_TX_BD(j); continue; } @@ -5485,14 +5485,14 @@ bnx2_free_rx_skbs(struct bnx2 *bp) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; int j; - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return; for (j = 0; j < bp->rx_max_ring_idx; j++) { struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j]; u8 *data = rx_buf->data; - if (data == NULL) + if (!data) continue; dma_unmap_single(&bp->pdev->dev, @@ -6826,7 +6826,7 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); - if (bp->stats_blk == NULL) + if (!bp->stats_blk) return; net_stats->rx_packets = @@ -7217,7 +7217,7 @@ bnx2_get_eeprom_len(struct net_device *dev) { struct bnx2 *bp = netdev_priv(dev); - if (bp->flash_info == NULL) + if (!bp->flash_info) return 0; return (int) bp->flash_size; @@ -7678,7 +7678,7 @@ bnx2_get_ethtool_stats(struct net_device *dev, u32 *temp_stats = (u32 *) bp->temp_stats_blk; u8 *stats_len_arr = NULL; - if (hw_stats == NULL) { + if (!hw_stats) { memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); return; } @@ -8121,7 +8121,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->temp_stats_blk = kzalloc(sizeof(struct statistics_block), GFP_KERNEL); - if (bp->temp_stats_blk == NULL) { + if (!bp->temp_stats_blk) { rc = -ENOMEM; goto err_out; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 95871576ab92..8cd73ff5debc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4962,8 +4962,13 @@ void bnx2x_tx_timeout(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); -#ifdef BNX2X_STOP_ON_ERROR + /* We want the information of the dump logged, + * but calling bnx2x_panic() would kill all chances of recovery. + */ if (!bp->panic) +#ifndef BNX2X_STOP_ON_ERROR + bnx2x_panic_dump(bp, false); +#else bnx2x_panic(); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e500528ad751..8a815bb57177 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1766,7 +1766,7 @@ static int load_firmware(struct octeon_device *oct) ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); if (ret) { - dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", + dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", fw_name); release_firmware(fw); return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e988caa797cb..20b6e1b3f5e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size; void *s = NULL; - void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); + void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -633,7 +633,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, } *(void **)metadata = s; } - memset(p, 0, len); return p; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 276f22357f81..7a271feec5e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size + stat_size; void *s = NULL; - void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -708,7 +708,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, } if (metadata) *(void **)metadata = s; - memset(p, 0, len); return p; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 39da7e3c804b..974a868a4824 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3941,6 +3941,7 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(FORCE_PAUSE); CAP16_TO_CAP32(MDIAUTO); CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); @@ -3982,6 +3983,7 @@ static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) CAP32_TO_CAP16(802_3_PAUSE); CAP32_TO_CAP16(802_3_ASM_DIR); CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(FORCE_PAUSE); CAP32_TO_CAP16(MDIAUTO); CAP32_TO_CAP16(MDISTRAIGHT); CAP32_TO_CAP16(FEC_RS); @@ -4014,6 +4016,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) fw_pause |= FW_PORT_CAP32_FC_RX; if (cc_pause & PAUSE_TX) fw_pause |= FW_PORT_CAP32_FC_TX; + if (!(cc_pause & PAUSE_AUTONEG)) + fw_pause |= FW_PORT_CAP32_FORCE_PAUSE; return fw_pause; } @@ -4101,7 +4105,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } - if (rcap & ~lc->pcaps) { + /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so + * we need to exclude this from this check in order to maintain + * compatibility ... + */ + if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) { dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", rcap, lc->pcaps); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 2d91480a5a0e..f1967cf6d43c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2475,7 +2475,7 @@ enum fw_port_cap { FW_PORT_CAP_MDISTRAIGHT = 0x0400, FW_PORT_CAP_FEC_RS = 0x0800, FW_PORT_CAP_FEC_BASER_RS = 0x1000, - FW_PORT_CAP_FEC_RESERVED = 0x2000, + FW_PORT_CAP_FORCE_PAUSE = 0x2000, FW_PORT_CAP_802_3_PAUSE = 0x4000, FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; @@ -2522,7 +2522,8 @@ enum fw_port_mdi { #define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL #define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL #define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL -#define FW_PORT_CAP32_RESERVED2 0xf0000000UL +#define FW_PORT_CAP32_FORCE_PAUSE 0x10000000UL +#define FW_PORT_CAP32_RESERVED2 0xe0000000UL #define FW_PORT_CAP32_SPEED_S 0 #define FW_PORT_CAP32_SPEED_M 0xfff diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index dfce5df7538e..3007e1ac1e61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, * Allocate the hardware ring and PCI DMA bus address space for said. */ size_t hwlen = nelem * hwsize + stat_size; - void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); if (!hwring) return NULL; @@ -776,11 +776,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, *(void **)swringp = swring; } - /* - * Zero out the hardware ring and return its address as our function - * value. - */ - memset(hwring, 0, hwlen); return hwring; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index bd3f6e4d1341..ff9eb45f67f8 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -539,7 +539,7 @@ static int gmac_setup_txqs(struct net_device *netdev) } if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { - dev_warn(geth->dev, "TX queue base it not aligned\n"); + dev_warn(geth->dev, "TX queue base is not aligned\n"); kfree(skb_tab); return -ENOMEM; } @@ -680,7 +680,7 @@ static int gmac_setup_rxq(struct net_device *netdev) if (!port->rxq_ring) return -ENOMEM; if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) { - dev_warn(geth->dev, "RX queue base it not aligned\n"); + dev_warn(geth->dev, "RX queue base is not aligned\n"); return -ENOMEM; } @@ -905,7 +905,7 @@ static int geth_setup_freeq(struct gemini_ethernet *geth) if (!geth->freeq_ring) return -ENOMEM; if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) { - dev_warn(geth->dev, "queue ring base it not aligned\n"); + dev_warn(geth->dev, "queue ring base is not aligned\n"); goto err_freeq; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c697e79e491e..8f755009ff38 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter) if ((val & POST_STAGE_FAT_LOG_START) != POST_STAGE_FAT_LOG_START && (val & POST_STAGE_ARMFW_UE) - != POST_STAGE_ARMFW_UE) + != POST_STAGE_ARMFW_UE && + (val & POST_STAGE_RECOVERABLE_ERR) + != POST_STAGE_RECOVERABLE_ERR) return; } diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index ed8ad0fefbda..0914a3ea4405 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o -obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ab7521c04eb2..c729665107f5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3514,7 +3514,7 @@ fec_probe(struct platform_device *pdev) goto failed_init; for (i = 0; i < irq_cnt; i++) { - sprintf(irq_name, "int%d", i); + snprintf(irq_name, sizeof(irq_name), "int%d", i); irq = platform_get_irq_byname(pdev, irq_name); if (irq < 0) irq = platform_get_irq(pdev, i); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 5aa814799d70..8e42c0246611 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1372,7 +1372,4 @@ struct filer_table { struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; }; -/* The gianfar_ptp module will set this variable */ -extern int gfar_phc_index; - #endif /* __GIANFAR_H */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index a93e0199c369..8cb98cae0a6f 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -41,6 +41,8 @@ #include <linux/phy.h> #include <linux/sort.h> #include <linux/if_vlan.h> +#include <linux/of_platform.h> +#include <linux/fsl/ptp_qoriq.h> #include "gianfar.h" @@ -1509,24 +1511,35 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -int gfar_phc_index = -1; -EXPORT_SYMBOL(gfar_phc_index); - static int gfar_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct gfar_private *priv = netdev_priv(dev); + struct platform_device *ptp_dev; + struct device_node *ptp_node; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; return 0; } + + ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); + if (ptp_node) { + ptp_dev = of_find_device_by_node(ptp_node); + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + } + + if (ptp) + info->phc_index = ptp->phc_index; + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = gfar_phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c deleted file mode 100644 index 9f8d4f8e57e3..000000000000 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * PTP 1588 clock using the eTSEC - * - * Copyright (C) 2010 OMICRON electronics GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/device.h> -#include <linux/hrtimer.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <linux/timex.h> -#include <linux/io.h> - -#include <linux/ptp_clock_kernel.h> - -#include "gianfar.h" - -/* - * gianfar ptp registers - * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 - */ -struct gianfar_ptp_registers { - u32 tmr_ctrl; /* Timer control register */ - u32 tmr_tevent; /* Timestamp event register */ - u32 tmr_temask; /* Timer event mask register */ - u32 tmr_pevent; /* Timestamp event register */ - u32 tmr_pemask; /* Timer event mask register */ - u32 tmr_stat; /* Timestamp status register */ - u32 tmr_cnt_h; /* Timer counter high register */ - u32 tmr_cnt_l; /* Timer counter low register */ - u32 tmr_add; /* Timer drift compensation addend register */ - u32 tmr_acc; /* Timer accumulator register */ - u32 tmr_prsc; /* Timer prescale */ - u8 res1[4]; - u32 tmroff_h; /* Timer offset high */ - u32 tmroff_l; /* Timer offset low */ - u8 res2[8]; - u32 tmr_alarm1_h; /* Timer alarm 1 high register */ - u32 tmr_alarm1_l; /* Timer alarm 1 high register */ - u32 tmr_alarm2_h; /* Timer alarm 2 high register */ - u32 tmr_alarm2_l; /* Timer alarm 2 high register */ - u8 res3[48]; - u32 tmr_fiper1; /* Timer fixed period interval */ - u32 tmr_fiper2; /* Timer fixed period interval */ - u32 tmr_fiper3; /* Timer fixed period interval */ - u8 res4[20]; - u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ - u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ - u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ - u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ -}; - -/* Bit definitions for the TMR_CTRL register */ -#define ALM1P (1<<31) /* Alarm1 output polarity */ -#define ALM2P (1<<30) /* Alarm2 output polarity */ -#define FIPERST (1<<28) /* FIPER start indication */ -#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ -#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ -#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ -#define TCLK_PERIOD_MASK (0x3ff) -#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ -#define FRD (1<<14) /* FIPER Realignment Disable */ -#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ -#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ -#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ -#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ -#define COPH (1<<7) /* Generated clock output phase. */ -#define CIPH (1<<6) /* External oscillator input clock phase */ -#define TMSR (1<<5) /* Timer soft reset. */ -#define BYP (1<<3) /* Bypass drift compensated clock */ -#define TE (1<<2) /* 1588 timer enable. */ -#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ -#define CKSEL_MASK (0x3) - -/* Bit definitions for the TMR_TEVENT register */ -#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ -#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ -#define ALM2 (1<<17) /* Current time = alarm time register 2 */ -#define ALM1 (1<<16) /* Current time = alarm time register 1 */ -#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ -#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ -#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ - -/* Bit definitions for the TMR_TEMASK register */ -#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ -#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ -#define ALM2EN (1<<17) /* Timer ALM2 event enable */ -#define ALM1EN (1<<16) /* Timer ALM1 event enable */ -#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ -#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ - -/* Bit definitions for the TMR_PEVENT register */ -#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ -#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ -#define RXP (1<<0) /* PTP frame has been received */ - -/* Bit definitions for the TMR_PEMASK register */ -#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ -#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ -#define RXPEN (1<<0) /* Receive PTP packet event enable */ - -/* Bit definitions for the TMR_STAT register */ -#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ -#define STAT_VEC_MASK (0x3f) - -/* Bit definitions for the TMR_PRSC register */ -#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ -#define PRSC_OCK_MASK (0xffff) - - -#define DRIVER "gianfar_ptp" -#define DEFAULT_CKSEL 1 -#define N_EXT_TS 2 -#define REG_SIZE sizeof(struct gianfar_ptp_registers) - -struct etsects { - struct gianfar_ptp_registers __iomem *regs; - spinlock_t lock; /* protects regs */ - struct ptp_clock *clock; - struct ptp_clock_info caps; - struct resource *rsrc; - int irq; - u64 alarm_interval; /* for periodic alarm */ - u64 alarm_value; - u32 tclk_period; /* nanoseconds */ - u32 tmr_prsc; - u32 tmr_add; - u32 cksel; - u32 tmr_fiper1; - u32 tmr_fiper2; -}; - -/* - * Register access functions - */ - -/* Caller must hold etsects->lock. */ -static u64 tmr_cnt_read(struct etsects *etsects) -{ - u64 ns; - u32 lo, hi; - - lo = gfar_read(&etsects->regs->tmr_cnt_l); - hi = gfar_read(&etsects->regs->tmr_cnt_h); - ns = ((u64) hi) << 32; - ns |= lo; - return ns; -} - -/* Caller must hold etsects->lock. */ -static void tmr_cnt_write(struct etsects *etsects, u64 ns) -{ - u32 hi = ns >> 32; - u32 lo = ns & 0xffffffff; - - gfar_write(&etsects->regs->tmr_cnt_l, lo); - gfar_write(&etsects->regs->tmr_cnt_h, hi); -} - -/* Caller must hold etsects->lock. */ -static void set_alarm(struct etsects *etsects) -{ - u64 ns; - u32 lo, hi; - - ns = tmr_cnt_read(etsects) + 1500000000ULL; - ns = div_u64(ns, 1000000000UL) * 1000000000ULL; - ns -= etsects->tclk_period; - hi = ns >> 32; - lo = ns & 0xffffffff; - gfar_write(&etsects->regs->tmr_alarm1_l, lo); - gfar_write(&etsects->regs->tmr_alarm1_h, hi); -} - -/* Caller must hold etsects->lock. */ -static void set_fipers(struct etsects *etsects) -{ - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); - gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); -} - -/* - * Interrupt service routine - */ - -static irqreturn_t isr(int irq, void *priv) -{ - struct etsects *etsects = priv; - struct ptp_clock_event event; - u64 ns; - u32 ack = 0, lo, hi, mask, val; - - val = gfar_read(&etsects->regs->tmr_tevent); - - if (val & ETS1) { - ack |= ETS1; - hi = gfar_read(&etsects->regs->tmr_etts1_h); - lo = gfar_read(&etsects->regs->tmr_etts1_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(etsects->clock, &event); - } - - if (val & ETS2) { - ack |= ETS2; - hi = gfar_read(&etsects->regs->tmr_etts2_h); - lo = gfar_read(&etsects->regs->tmr_etts2_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(etsects->clock, &event); - } - - if (val & ALM2) { - ack |= ALM2; - if (etsects->alarm_value) { - event.type = PTP_CLOCK_ALARM; - event.index = 0; - event.timestamp = etsects->alarm_value; - ptp_clock_event(etsects->clock, &event); - } - if (etsects->alarm_interval) { - ns = etsects->alarm_value + etsects->alarm_interval; - hi = ns >> 32; - lo = ns & 0xffffffff; - spin_lock(&etsects->lock); - gfar_write(&etsects->regs->tmr_alarm2_l, lo); - gfar_write(&etsects->regs->tmr_alarm2_h, hi); - spin_unlock(&etsects->lock); - etsects->alarm_value = ns; - } else { - gfar_write(&etsects->regs->tmr_tevent, ALM2); - spin_lock(&etsects->lock); - mask = gfar_read(&etsects->regs->tmr_temask); - mask &= ~ALM2EN; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock(&etsects->lock); - etsects->alarm_value = 0; - etsects->alarm_interval = 0; - } - } - - if (val & PP1) { - ack |= PP1; - event.type = PTP_CLOCK_PPS; - ptp_clock_event(etsects->clock, &event); - } - - if (ack) { - gfar_write(&etsects->regs->tmr_tevent, ack); - return IRQ_HANDLED; - } else - return IRQ_NONE; -} - -/* - * PTP clock operations - */ - -static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) -{ - u64 adj, diff; - u32 tmr_add; - int neg_adj = 0; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - if (scaled_ppm < 0) { - neg_adj = 1; - scaled_ppm = -scaled_ppm; - } - tmr_add = etsects->tmr_add; - adj = tmr_add; - - /* calculate diff as adj*(scaled_ppm/65536)/1000000 - * and round() to the nearest integer - */ - adj *= scaled_ppm; - diff = div_u64(adj, 8000000); - diff = (diff >> 13) + ((diff >> 12) & 1); - - tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - - gfar_write(&etsects->regs->tmr_add, tmr_add); - - return 0; -} - -static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - s64 now; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - spin_lock_irqsave(&etsects->lock, flags); - - now = tmr_cnt_read(etsects); - now += delta; - tmr_cnt_write(etsects, now); - set_fipers(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - return 0; -} - -static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - spin_lock_irqsave(&etsects->lock, flags); - - ns = tmr_cnt_read(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - *ts = ns_to_timespec64(ns); - - return 0; -} - -static int ptp_gianfar_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - ns = timespec64_to_ns(ts); - - spin_lock_irqsave(&etsects->lock, flags); - - tmr_cnt_write(etsects, ns); - set_fipers(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - return 0; -} - -static int ptp_gianfar_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) -{ - struct etsects *etsects = container_of(ptp, struct etsects, caps); - unsigned long flags; - u32 bit, mask; - - switch (rq->type) { - case PTP_CLK_REQ_EXTTS: - switch (rq->extts.index) { - case 0: - bit = ETS1EN; - break; - case 1: - bit = ETS2EN; - break; - default: - return -EINVAL; - } - spin_lock_irqsave(&etsects->lock, flags); - mask = gfar_read(&etsects->regs->tmr_temask); - if (on) - mask |= bit; - else - mask &= ~bit; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock_irqrestore(&etsects->lock, flags); - return 0; - - case PTP_CLK_REQ_PPS: - spin_lock_irqsave(&etsects->lock, flags); - mask = gfar_read(&etsects->regs->tmr_temask); - if (on) - mask |= PP1EN; - else - mask &= ~PP1EN; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock_irqrestore(&etsects->lock, flags); - return 0; - - default: - break; - } - - return -EOPNOTSUPP; -} - -static const struct ptp_clock_info ptp_gianfar_caps = { - .owner = THIS_MODULE, - .name = "gianfar clock", - .max_adj = 512000, - .n_alarm = 0, - .n_ext_ts = N_EXT_TS, - .n_per_out = 0, - .n_pins = 0, - .pps = 1, - .adjfine = ptp_gianfar_adjfine, - .adjtime = ptp_gianfar_adjtime, - .gettime64 = ptp_gianfar_gettime, - .settime64 = ptp_gianfar_settime, - .enable = ptp_gianfar_enable, -}; - -static int gianfar_ptp_probe(struct platform_device *dev) -{ - struct device_node *node = dev->dev.of_node; - struct etsects *etsects; - struct timespec64 now; - int err = -ENOMEM; - u32 tmr_ctrl; - unsigned long flags; - - etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); - if (!etsects) - goto no_memory; - - err = -ENODEV; - - etsects->caps = ptp_gianfar_caps; - - if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel)) - etsects->cksel = DEFAULT_CKSEL; - - if (of_property_read_u32(node, - "fsl,tclk-period", &etsects->tclk_period) || - of_property_read_u32(node, - "fsl,tmr-prsc", &etsects->tmr_prsc) || - of_property_read_u32(node, - "fsl,tmr-add", &etsects->tmr_add) || - of_property_read_u32(node, - "fsl,tmr-fiper1", &etsects->tmr_fiper1) || - of_property_read_u32(node, - "fsl,tmr-fiper2", &etsects->tmr_fiper2) || - of_property_read_u32(node, - "fsl,max-adj", &etsects->caps.max_adj)) { - pr_err("device tree node missing required elements\n"); - goto no_node; - } - - etsects->irq = platform_get_irq(dev, 0); - - if (etsects->irq < 0) { - pr_err("irq not in device tree\n"); - goto no_node; - } - if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { - pr_err("request_irq failed\n"); - goto no_node; - } - - etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!etsects->rsrc) { - pr_err("no resource\n"); - goto no_resource; - } - if (request_resource(&iomem_resource, etsects->rsrc)) { - pr_err("resource busy\n"); - goto no_resource; - } - - spin_lock_init(&etsects->lock); - - etsects->regs = ioremap(etsects->rsrc->start, - resource_size(etsects->rsrc)); - if (!etsects->regs) { - pr_err("ioremap ptp registers failed\n"); - goto no_ioremap; - } - getnstimeofday64(&now); - ptp_gianfar_settime(&etsects->caps, &now); - - tmr_ctrl = - (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | - (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; - - spin_lock_irqsave(&etsects->lock, flags); - - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); - gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); - gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); - gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); - gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); - - spin_unlock_irqrestore(&etsects->lock, flags); - - etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); - if (IS_ERR(etsects->clock)) { - err = PTR_ERR(etsects->clock); - goto no_clock; - } - gfar_phc_index = ptp_clock_index(etsects->clock); - - platform_set_drvdata(dev, etsects); - - return 0; - -no_clock: - iounmap(etsects->regs); -no_ioremap: - release_resource(etsects->rsrc); -no_resource: - free_irq(etsects->irq, etsects); -no_node: - kfree(etsects); -no_memory: - return err; -} - -static int gianfar_ptp_remove(struct platform_device *dev) -{ - struct etsects *etsects = platform_get_drvdata(dev); - - gfar_write(&etsects->regs->tmr_temask, 0); - gfar_write(&etsects->regs->tmr_ctrl, 0); - - gfar_phc_index = -1; - ptp_clock_unregister(etsects->clock); - iounmap(etsects->regs); - release_resource(etsects->rsrc); - free_irq(etsects->irq, etsects); - kfree(etsects); - - return 0; -} - -static const struct of_device_id match_table[] = { - { .compatible = "fsl,etsec-ptp" }, - {}, -}; -MODULE_DEVICE_TABLE(of, match_table); - -static struct platform_driver gianfar_ptp_driver = { - .driver = { - .name = "gianfar_ptp", - .of_match_table = match_table, - }, - .probe = gianfar_ptp_probe, - .remove = gianfar_ptp_remove, -}; - -module_platform_driver(gianfar_ptp_driver); - -MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); -MODULE_DESCRIPTION("PTP clock using the eTSEC"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index e0bc79ea3d88..85e1d14514fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1648,6 +1648,15 @@ int hns_dsaf_rm_mac_addr( mac_entry->addr); } +static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev, + u8 port_num, u8 *mask, u8 *addr) +{ + if (MAC_IS_BROADCAST(addr)) + memset(mask, 0xff, ETH_ALEN); + else + memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN); +} + static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) { u16 *a = (u16 *)dst; @@ -1676,7 +1685,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key tmp_mac_key; struct dsaf_tbl_tcam_data tcam_data; u8 mc_addr[ETH_ALEN]; - u8 *mc_mask; int mskid; /*chechk mac addr */ @@ -1687,9 +1695,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, } ether_addr_copy(mc_addr, mac_entry->addr); - mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + u8 mc_mask[ETH_ALEN]; + /* prepare for key data setting */ + hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num, + mc_mask, mac_entry->addr); hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ @@ -1844,7 +1855,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; u8 mc_addr[ETH_ALEN]; - u8 *mc_mask; if (!(void *)mac_entry) { dev_err(dsaf_dev->dev, @@ -1861,14 +1871,17 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* always mask vlan_id field */ ether_addr_copy(mc_addr, mac_entry->addr); - mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + u8 mc_mask[ETH_ALEN]; + /* prepare for key data setting */ + hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num, + mc_mask, mac_entry->addr); hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ - hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr); + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); mask_key.high.val = le32_to_cpu(mask_key.high.val); mask_key.low.val = le32_to_cpu(mask_key.low.val); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 519e2bd6aa60..be9dc08ccf67 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ + HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ + HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 63d7dbfb90bf..9d79dad2c6aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, return false; } +static void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited) +{ + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_UNIC: + hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_ROCE: + hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + break; + default: + break; + } +} + +static int hnae3_get_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + int inited = 0; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_KNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_UNIC: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_UNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_ROCE: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); + break; + default: + break; + } + + return inited; +} + static int hnae3_match_n_instantiate(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, bool is_reg) { @@ -56,14 +99,14 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, return ret; } - hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 1); + hnae3_set_client_init_flag(client, ae_dev, 1); return 0; } - if (hnae_get_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B)) { + if (hnae3_get_client_init_flag(client, ae_dev)) { ae_dev->ops->uninit_client_instance(client, ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 0); + hnae3_set_client_init_flag(client, ae_dev, 0); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 45c571eea2ae..8acb1d116a02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -54,7 +54,9 @@ #define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_DCB_B 0x2 -#define HNAE3_CLIENT_INITED_B 0x3 +#define HNAE3_KNIC_CLIENT_INITED_B 0x3 +#define HNAE3_UNIC_CLIENT_INITED_B 0x4 +#define HNAE3_ROCE_CLIENT_INITED_B 0x5 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -314,7 +316,8 @@ struct hnae3_ae_ops { int (*set_loopback)(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en); - void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en); + void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, @@ -350,6 +353,7 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); + int (*update_mta_status)(struct hnae3_handle *handle); void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index cac51954f2cf..f2b31d278bc9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -25,6 +25,9 @@ #include "hnae3.h" #include "hns3_enet.h" +static void hns3_clear_all_ring(struct hnae3_handle *h); +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); + static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; static const char hns3_driver_string[] = @@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev) int i, j; int ret; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + /* get irq resource for all vectors */ ret = hns3_nic_init_irq(priv); if (ret) { @@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) ops->stop(priv->ae_handle); - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - /* free irq resources */ hns3_nic_uninit_irq(priv); + + hns3_clear_all_ring(priv->ae_handle); } static int hns3_nic_net_stop(struct net_device *netdev) @@ -406,15 +415,21 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) if (h->ae_algo->ops->set_promisc_mode) { if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, 1); + h->ae_algo->ops->set_promisc_mode(h, true, true); + else if (netdev->flags & IFF_ALLMULTI) + h->ae_algo->ops->set_promisc_mode(h, false, true); else - h->ae_algo->ops->set_promisc_mode(h, 0); + h->ae_algo->ops->set_promisc_mode(h, false, false); } if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) + if (netdev->flags & IFF_MULTICAST) { if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) netdev_err(netdev, "sync mc address fail\n"); + + if (h->ae_algo->ops->update_mta_status) + h->ae_algo->ops->update_mta_status(h); + } } static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, @@ -644,6 +659,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, } } +/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is + * 4789. + */ +static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +{ +#define IANA_VXLAN_PORT 4789 + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + + l4.hdr = skb_transport_header(skb); + + if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) + return false; + + skb_checksum_help(skb); + + return true; +} + static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) @@ -732,6 +773,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L4T_TCP); break; case IPPROTO_UDP: + if (hns3_tunnel_csum_bug(skb)) + break; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, @@ -1121,6 +1165,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) return -EADDRNOTAVAIL; + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + mac_addr->sa_data); + return 0; + } + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); @@ -1819,6 +1869,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) @@ -1826,6 +1877,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc_cb[i].reuse_flag = 0; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, @@ -2066,6 +2118,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } +static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info) +{ + struct pci_dev *pdev = ring->tqp->handle->pdev; + u16 vlan_tag; + + if (pdev->revision == 0x20) { + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return vlan_tag; + } + +#define HNS3_STRP_OUTER_VLAN 0x1 +#define HNS3_STRP_INNER_VLAN 0x2 + + switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { + case HNS3_STRP_OUTER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + break; + case HNS3_STRP_INNER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + break; + default: + vlan_tag = 0; + break; + } + + return vlan_tag; +} + static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb, int *out_bnum) { @@ -2085,9 +2170,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetch(desc); - length = le16_to_cpu(desc->rx.pkt_len); + length = le16_to_cpu(desc->rx.size); bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); /* Check valid BD */ if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) @@ -2121,22 +2205,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetchw(skb->data); - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - bnum = 1; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -2173,6 +2241,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, *out_bnum = bnum; + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); @@ -2905,8 +2989,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) goto out_when_alloc_ring_memory; } - hns3_init_ring_hw(priv->ring_data[i].ring); - u64_stats_init(&priv->ring_data[i].ring->syncp); } @@ -2958,6 +3040,15 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } +static void hns3_uninit_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); +} + static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -3068,6 +3159,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_force_clear_all_rx_ring(handle); + ret = hns3_nic_uninit_vector_data(priv); if (ret) netdev_err(netdev, "uninit vector error\n"); @@ -3084,6 +3177,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + free_netdev(netdev); } @@ -3183,23 +3278,51 @@ static void hns3_recover_hw_addr(struct net_device *ndev) static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { - if (!HNAE3_IS_TX_RING(ring)) - return; - while (ring->next_to_clean != ring->next_to_use) { + ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); } } -static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) +static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) { - if (HNAE3_IS_TX_RING(ring)) - return; + struct hns3_desc_cb res_cbs; + int ret; while (ring->next_to_use != ring->next_to_clean) { /* When a buffer is not reused, it's memory has been * freed in hns3_handle_rx_bd or will be freed by + * stack, so we need to replace the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + /* if alloc new buffer fail, exit directly + * and reclear in up flow. + */ + netdev_warn(ring->tqp->handle->kinfo.netdev, + "reserve buffer map failed, ret = %d\n", + ret); + return ret; + } + hns3_replace_buffer(ring, ring->next_to_use, + &res_cbs); + } + ring_ptr_move_fw(ring, next_to_use); + } + + return 0; +} + +static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) +{ + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by * stack, so only need to unmap the buffer here. */ if (!ring->desc_cb[ring->next_to_use].reuse_flag) { @@ -3212,6 +3335,19 @@ static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) } } +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *ring; + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_force_clear_rx_ring(ring); + } +} + static void hns3_clear_all_ring(struct hnae3_handle *h) { struct net_device *ndev = h->kinfo.netdev; @@ -3229,10 +3365,51 @@ static void hns3_clear_all_ring(struct hnae3_handle *h) netdev_tx_reset_queue(dev_queue); ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + /* Continue to clear other rings even if clearing some + * rings failed. + */ hns3_clear_rx_ring(ring); } } +int hns3_nic_reset_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *rx_ring; + int i, j; + int ret; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->ae_algo->ops->reset_queue(h, i); + hns3_init_ring_hw(priv->ring_data[i].ring); + + /* We need to clear tx ring here because self test will + * use the ring and will not run down before up + */ + hns3_clear_tx_ring(priv->ring_data[i].ring); + priv->ring_data[i].ring->next_to_clean = 0; + priv->ring_data[i].ring->next_to_use = 0; + + rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_init_ring_hw(rx_ring); + ret = hns3_clear_rx_ring(rx_ring); + if (ret) + return ret; + + /* We can not know the hardware head and tail when this + * function is called in reset flow, so we reuse all desc. + */ + for (j = 0; j < rx_ring->desc_num; j++) + hns3_reuse_buffer(rx_ring, j); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + } + + return 0; +} + static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -3302,7 +3479,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_clear_all_ring(handle); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); if (ret) { @@ -3318,6 +3495,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + return ret; } @@ -3438,8 +3617,6 @@ int hns3_set_channels(struct net_device *netdev, if (if_running) hns3_nic_net_stop(netdev); - hns3_clear_all_ring(h); - ret = hns3_nic_uninit_vector_data(priv); if (ret) { dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 5b40f5a53761..3b083d5ae9ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -104,6 +104,9 @@ enum hns3_nic_state { #define HNS3_RXD_L4ID_S 8 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) #define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_STRP_TAGP_S 13 +#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) + #define HNS3_RXD_L2E_B 16 #define HNS3_RXD_L3E_B 17 #define HNS3_RXD_L4E_B 18 @@ -622,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev, bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); +int hns3_nic_reset_all_ring(struct hnae3_handle *h); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c16bb6cb0564..40c0425b4023 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -95,7 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) if (ret) return ret; - h->ae_algo->ops->set_promisc_mode(h, en); + h->ae_algo->ops->set_promisc_mode(h, en, en); return ret; } @@ -108,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) if (!h->ae_algo->ops->start) return -EOPNOTSUPP; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + ret = h->ae_algo->ops->start(h); if (ret) { netdev_err(ndev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ee3cbac6dfaa..d9aaa76c76eb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -115,7 +115,6 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, - HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314, /* MACSEC command */ /* PFC/Pause CMD*/ @@ -484,6 +483,8 @@ struct hclge_promisc_param { u8 enable; }; +#define HCLGE_PROMISC_TX_EN_B BIT(4) +#define HCLGE_PROMISC_RX_EN_B BIT(5) #define HCLGE_PROMISC_EN_B 1 #define HCLGE_PROMISC_EN_ALL 0x7 #define HCLGE_PROMISC_EN_UC 0x1 @@ -704,11 +705,14 @@ struct hclge_vlan_filter_vf_cfg_cmd { u8 vf_bitmap[16]; }; -#define HCLGE_ACCEPT_TAG_B 0 -#define HCLGE_ACCEPT_UNTAG_B 1 +#define HCLGE_ACCEPT_TAG1_B 0 +#define HCLGE_ACCEPT_UNTAG1_B 1 #define HCLGE_PORT_INS_TAG1_EN_B 2 #define HCLGE_PORT_INS_TAG2_EN_B 3 #define HCLGE_CFG_NIC_ROCE_SEL_B 4 +#define HCLGE_ACCEPT_TAG2_B 5 +#define HCLGE_ACCEPT_UNTAG2_B 6 + struct hclge_vport_vtag_tx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; @@ -813,21 +817,13 @@ struct hclge_reset_cmd { #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 -#define HCLGE_LED_PORT_SPEED_STATE_S 0 -#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0) -#define HCLGE_LED_ACTIVITY_STATE_S 0 -#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0) -#define HCLGE_LED_LINK_STATE_S 0 -#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0) #define HCLGE_LED_LOCATE_STATE_S 0 #define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) struct hclge_set_led_state_cmd { - u8 port_speed_led_config; - u8 link_led_config; - u8 activity_led_config; + u8 rsv1[3]; u8 locate_led_config; - u8 rsv[20]; + u8 rsv2[20]; }; int hclge_cmd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2f0bbb6708b9..2a801344eafb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); -static int hclge_update_led_status(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -504,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) return 0; } -static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev) -{ - struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats; - struct hclge_desc desc; - __le64 *desc_data; - int ret; - - /* for fiber port, need to query the total rx/tx packets statstics, - * used for data transferring checking. - */ - if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return 0; - - if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) - return 0; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get MAC total pkt stats fail, ret = %d\n", ret); - - return ret; - } - - desc_data = (__le64 *)(&desc.data[0]); - mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++); - mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data); - - return 0; -} - static int hclge_mac_update_stats(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -2321,8 +2288,10 @@ static int hclge_mac_init(struct hclge_dev *hdev) struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct hclge_vport *vport; int mtu; int ret; + int i; ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); if (ret) { @@ -2334,7 +2303,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; /* Initialize the MTA table work mode */ - hdev->accept_mta_mc = true; hdev->enable_mta = true; hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; @@ -2347,11 +2315,17 @@ static int hclge_mac_init(struct hclge_dev *hdev) return ret; } - ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->accept_mta_mc = false; + + memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); + ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", ret); + return ret; + } } ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); @@ -2613,16 +2587,18 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) * mbx messages reported by this interrupt. */ hclge_mbx_task_schedule(hdev); - + break; default: - dev_dbg(&hdev->pdev->dev, - "received unknown or unhandled event of vector0\n"); + dev_warn(&hdev->pdev->dev, + "received unknown or unhandled event of vector0\n"); break; } - /* we should clear the source of interrupt */ - hclge_clear_event_cause(hdev, event_cause, clearval); - hclge_enable_vector(&hdev->misc_vector, true); + /* clear the source of interrupt if it is not cause by reset */ + if (event_cause != HCLGE_VECTOR0_EVENT_RST) { + hclge_clear_event_cause(hdev, event_cause, clearval); + hclge_enable_vector(&hdev->misc_vector, true); + } return IRQ_HANDLED; } @@ -2810,6 +2786,33 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, return rst_level; } +static void hclge_clear_reset_cause(struct hclge_dev *hdev) +{ + u32 clearval = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + break; + case HNAE3_GLOBAL_RESET: + clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + break; + case HNAE3_CORE_RESET: + clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + break; + default: + dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", + hdev->reset_type); + break; + } + + if (!clearval) + return; + + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); + hclge_enable_vector(&hdev->misc_vector, true); +} + static void hclge_reset(struct hclge_dev *hdev) { /* perform reset of the stack & ae device for a client */ @@ -2822,6 +2825,8 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); rtnl_unlock(); + + hclge_clear_reset_cause(hdev); } else { /* schedule again to check pending resets later */ set_bit(hdev->reset_type, &hdev->reset_pending); @@ -2916,20 +2921,13 @@ static void hclge_service_task(struct work_struct *work) struct hclge_dev *hdev = container_of(work, struct hclge_dev, service_task); - /* The total rx/tx packets statstics are wanted to be updated - * per second. Both hclge_update_stats_for_all() and - * hclge_mac_get_traffic_stats() can do it. - */ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { hclge_update_stats_for_all(hdev); hdev->hw_stats.stats_timer = 0; - } else { - hclge_mac_get_traffic_stats(hdev); } hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); - hclge_update_led_status(hdev); hclge_service_complete(hdev); } @@ -3586,7 +3584,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, req = (struct hclge_promisc_cfg_cmd *)desc.data; req->vf_id = param->vf_id; - req->flag = (param->enable << HCLGE_PROMISC_EN_B); + + /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on + * pdev revision(0x20), new revision support them. The + * value of this two fields will not return error when driver + * send command to fireware in revision(0x20). + */ + req->flag = (param->enable << HCLGE_PROMISC_EN_B) | + HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3613,13 +3618,15 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, param->vf_id = vport_id; } -static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) +static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_promisc_param param; - hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, + vport->vport_id); hclge_cmd_set_promisc_mode(hdev, ¶m); } @@ -3761,9 +3768,6 @@ static int hclge_ae_start(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - return 0; - ret = hclge_mac_start_phy(hdev); if (ret) return ret; @@ -3779,9 +3783,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle) del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_mac_stop_phy(hdev); return; + } for (i = 0; i < vport->alloc_tqps; i++) hclge_tqp_enable(hdev, i, 0, false); @@ -4005,9 +4012,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, return ret; } + if (enable) + set_bit(idx, vport->mta_shadow); + else + clear_bit(idx, vport->mta_shadow); + return 0; } +static int hclge_update_mta_status(struct hnae3_handle *handle) +{ + unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct net_device *netdev = handle->kinfo.netdev; + struct netdev_hw_addr *ha; + u16 tbl_idx; + + memset(mta_status, 0, sizeof(mta_status)); + + /* update mta_status from mc addr list */ + netdev_for_each_mc_addr(ha, netdev) { + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); + set_bit(tbl_idx, mta_status); + } + + return hclge_update_mta_status_common(vport, mta_status, + 0, HCLGE_MTA_TBL_SIZE, true); +} + +int hclge_update_mta_status_common(struct hclge_vport *vport, + unsigned long *status, + u16 idx, + u16 count, + bool update_filter) +{ + struct hclge_dev *hdev = vport->back; + u16 update_max = idx + count; + u16 check_max; + int ret = 0; + bool used; + u16 i; + + /* setup mta check range */ + if (update_filter) { + i = 0; + check_max = HCLGE_MTA_TBL_SIZE; + } else { + i = idx; + check_max = update_max; + } + + used = false; + /* check and update all mta item */ + for (; i < check_max; i++) { + /* ignore unused item */ + if (!test_bit(i, vport->mta_shadow)) + continue; + + /* if i in update range then update it */ + if (i >= idx && i < update_max) + if (!test_bit(i - idx, status)) + hclge_set_mta_table_item(vport, i, false); + + if (!used && test_bit(i, vport->mta_shadow)) + used = true; + } + + /* no longer use mta, disable it */ + if (vport->accept_mta_mc && update_filter && !used) { + ret = hclge_cfg_func_mta_filter(hdev, + vport->vport_id, + false); + if (ret) + dev_err(&hdev->pdev->dev, + "disable func mta filter fail ret=%d\n", + ret); + else + vport->accept_mta_mc = false; + } + + return ret; +} + static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -4275,9 +4361,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); + /* If mc mac vlan table is full, use MTA table */ + if (status == -ENOSPC) { + if (!vport->accept_mta_mc) { + status = hclge_cfg_func_mta_filter(hdev, + vport->vport_id, + true); + if (status) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", + status); + return status; + } + vport->accept_mta_mc = true; + } + + /* Set MTA table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, true); + } return status; } @@ -4297,7 +4399,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; struct hclge_desc desc[3]; - u16 tbl_idx; /* mac addr check */ if (!is_multicast_ether_addr(addr)) { @@ -4326,17 +4427,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } else { - /* This mac addr do not exist, can't delete it */ - dev_err(&hdev->pdev->dev, - "Rm multicast mac addr failed, ret = %d.\n", - status); - return -EIO; + /* Maybe this mac address is in mta table, but it cannot be + * deleted here because an entry of mta represents an address + * range rather than a specific address. the delete action to + * all entries will take effect in update_mta_status called by + * hns3_nic_set_rx_mode. + */ + status = 0; } - /* Set MTB table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, false); - return status; } @@ -4558,9 +4657,16 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, } if (!is_kill) { +#define HCLGE_VF_VLAN_NO_ENTRY 2 if (!req0->resp_code || req0->resp_code == 1) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { + dev_warn(&hdev->pdev->dev, + "vf vlan table is full, vf vlan filter is disabled\n"); + return 0; + } + dev_err(&hdev->pdev->dev, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4687,10 +4793,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, - vcfg->accept_tag ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, - vcfg->accept_untag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, vcfg->insert_tag1_en ? 1 : 0); hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, @@ -4814,8 +4924,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag = true; - vport->txvlan_cfg.accept_untag = true; + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them. The + * value of this two fields will not return error when driver + * send command to fireware in revision(0x20). + * This two fields can not configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag1_en = false; vport->txvlan_cfg.insert_tag2_en = false; vport->txvlan_cfg.default_tag1 = 0; @@ -5670,9 +5790,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - /* Enable MISC vector(vector0) */ - hclge_enable_vector(&hdev->misc_vector, true); - dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -6010,9 +6127,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, "Get 64 bit register failed, ret = %d.\n", ret); } -static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, - u8 act_led_status, u8 link_led_status, - u8 locate_led_status) +static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) { struct hclge_set_led_state_cmd *req; struct hclge_desc desc; @@ -6021,12 +6136,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M, - HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status); - hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M, - HCLGE_LED_ACTIVITY_STATE_S, act_led_status); - hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M, - HCLGE_LED_LINK_STATE_S, link_led_status); hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, HCLGE_LED_LOCATE_STATE_S, locate_led_status); @@ -6047,105 +6156,17 @@ enum hclge_led_status { static int hclge_set_led_id(struct hnae3_handle *handle, enum ethtool_phys_id_state status) { -#define BLINK_FREQUENCY 2 struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - int ret = 0; - - if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return -EOPNOTSUPP; switch (status) { case ETHTOOL_ID_ACTIVE: - ret = hclge_set_led_status_sfp(hdev, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_ON); - break; + return hclge_set_led_status(hdev, HCLGE_LED_ON); case ETHTOOL_ID_INACTIVE: - ret = hclge_set_led_status_sfp(hdev, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_OFF); - break; + return hclge_set_led_status(hdev, HCLGE_LED_OFF); default: - ret = -EINVAL; - break; - } - - return ret; -} - -enum hclge_led_port_speed { - HCLGE_SPEED_LED_FOR_1G, - HCLGE_SPEED_LED_FOR_10G, - HCLGE_SPEED_LED_FOR_25G, - HCLGE_SPEED_LED_FOR_40G, - HCLGE_SPEED_LED_FOR_50G, - HCLGE_SPEED_LED_FOR_100G, -}; - -static u8 hclge_led_get_speed_status(u32 speed) -{ - u8 speed_led; - - switch (speed) { - case HCLGE_MAC_SPEED_1G: - speed_led = HCLGE_SPEED_LED_FOR_1G; - break; - case HCLGE_MAC_SPEED_10G: - speed_led = HCLGE_SPEED_LED_FOR_10G; - break; - case HCLGE_MAC_SPEED_25G: - speed_led = HCLGE_SPEED_LED_FOR_25G; - break; - case HCLGE_MAC_SPEED_40G: - speed_led = HCLGE_SPEED_LED_FOR_40G; - break; - case HCLGE_MAC_SPEED_50G: - speed_led = HCLGE_SPEED_LED_FOR_50G; - break; - case HCLGE_MAC_SPEED_100G: - speed_led = HCLGE_SPEED_LED_FOR_100G; - break; - default: - speed_led = HCLGE_LED_NO_CHANGE; + return -EINVAL; } - - return speed_led; -} - -static int hclge_update_led_status(struct hclge_dev *hdev) -{ - u8 port_speed_status, link_status, activity_status; - u64 rx_pkts, tx_pkts; - - if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return 0; - - port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed); - - rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num; - tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num; - if (rx_pkts != hdev->rx_pkts_for_led || - tx_pkts != hdev->tx_pkts_for_led) - activity_status = HCLGE_LED_ON; - else - activity_status = HCLGE_LED_OFF; - hdev->rx_pkts_for_led = rx_pkts; - hdev->tx_pkts_for_led = tx_pkts; - - if (hdev->hw.mac.link) - link_status = HCLGE_LED_ON; - else - link_status = HCLGE_LED_OFF; - - return hclge_set_led_status_sfp(hdev, port_speed_status, - activity_status, link_status, - HCLGE_LED_NO_CHANGE); } static void hclge_get_link_mode(struct hnae3_handle *handle, @@ -6215,6 +6236,7 @@ static const struct hnae3_ae_ops hclge_ops = { .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, + .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 93177d91eea4..7488534528cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -61,6 +61,8 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 +#define HCLGE_MTA_TBL_SIZE 4096 + #define HCLGE_TQP_RESET_TRY_TIMES 10 #define HCLGE_PHY_PAGE_MDIX 0 @@ -559,19 +561,18 @@ struct hclge_dev { enum hclge_mta_dmac_sel_type mta_mac_sel_type; bool enable_mta; /* Mutilcast filter enable */ - bool accept_mta_mc; /* Whether accept mta filter multicast */ struct hclge_vlan_type_cfg vlan_type_cfg; - u64 rx_pkts_for_led; - u64 tx_pkts_for_led; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; }; /* VPort level vlan tag configuration for TX direction */ struct hclge_tx_vtag_cfg { - bool accept_tag; /* Whether accept tagged packet from host */ - bool accept_untag; /* Whether accept untagged packet from host */ + bool accept_tag1; /* Whether accept tag1 packet from host */ + bool accept_untag1; /* Whether accept untag1 packet from host */ + bool accept_tag2; + bool accept_untag2; bool insert_tag1_en; /* Whether insert inner vlan tag */ bool insert_tag2_en; /* Whether insert outer vlan tag */ u16 default_tag1; /* The default inner vlan tag to insert */ @@ -620,6 +621,9 @@ struct hclge_vport { struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; + + bool accept_mta_mc; /* whether to accept mta filter multicast */ + unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -637,6 +641,12 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, u8 func_id, bool enable); +int hclge_update_mta_status_common(struct hclge_vport *vport, + unsigned long *status, + u16 idx, + u16 count, + bool update_filter); + struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index b6ae26ba0a46..7541cb9b71ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -190,11 +190,12 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { - bool en = req->msg[1] ? true : false; + bool en_uc = req->msg[1] ? true : false; + bool en_mc = req->msg[2] ? true : false; struct hclge_promisc_param param; /* always enable broadcast promisc bit */ - hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); return hclge_cmd_set_promisc_mode(vport->back, ¶m); } @@ -230,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } +static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, + u8 *msg, u8 idx, bool is_end) +{ +#define HCLGE_MTA_STATUS_MSG_SIZE 13 +#define HCLGE_MTA_STATUS_MSG_BITS \ + (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) +#define HCLGE_MTA_STATUS_MSG_END_BITS \ + (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) + unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; + u16 tbl_cnt; + u16 tbl_idx; + u8 msg_ofs; + u8 msg_bit; + + tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : + HCLGE_MTA_STATUS_MSG_BITS; + + /* set msg field */ + msg_ofs = 0; + msg_bit = 0; + memset(status, 0, sizeof(status)); + for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { + if (msg[msg_ofs] & BIT(msg_bit)) + set_bit(tbl_idx, status); + + msg_bit++; + if (msg_bit == BITS_PER_BYTE) { + msg_bit = 0; + msg_ofs++; + } + } + + return hclge_update_mta_status_common(vport, + status, idx * HCLGE_MTA_STATUS_MSG_BITS, + tbl_cnt, is_end); +} + static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); struct hclge_dev *hdev = vport->back; + u8 resp_len = 0; + u8 resp_data; int status; if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { @@ -247,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, bool enable = mbx_req->msg[2]; status = hclge_cfg_func_mta_filter(hdev, func_id, enable); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { + resp_data = hdev->mta_mac_sel_type; + resp_len = sizeof(u8); + gen_resp = true; + status = 0; + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { + /* mta status update msg format + * msg[2.6 : 2.0] msg index + * msg[2.7] msg is end + * msg[15 : 3] mta status bits[103 : 0] + */ + bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; + + status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], + mbx_req->msg[2] & 0x7F, + is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -255,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, } if (gen_resp) - hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + hclge_gen_resp_to_vf(vport, mbx_req, status, + &resp_data, resp_len); return 0; } @@ -382,6 +439,13 @@ static void hclge_reset_vf(struct hclge_vport *vport, hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); } +static bool hclge_cmd_crq_empty(struct hclge_hw *hw) +{ + u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); + + return tail == hw->cmq.crq.next_to_use; +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -390,12 +454,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_desc *desc; int ret, flag; - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); /* handle all the mailbox requests in the queue */ - while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { + while (!hclge_cmd_crq_empty(&hdev->hw)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %d\n", + req->msg[0]); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + vport = &hdev->vport[req->mbx_src_vfid]; switch (req->msg[0]) { @@ -470,7 +545,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev) } crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2b0e3295989f..bc8a5760d959 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -654,7 +654,8 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) return 0; } -static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) +static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, + bool en_uc_pmc, bool en_mc_pmc) { struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; @@ -664,7 +665,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; - req->msg[1] = en; + req->msg[1] = en_uc_pmc ? 1 : 0; + req->msg[2] = en_mc_pmc ? 1 : 0; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -674,11 +676,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) return status; } -static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) +static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, + bool en_uc_pmc, bool en_mc_pmc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - hclgevf_cmd_set_promisc_mode(hdev, en); + hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, @@ -725,15 +728,124 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) } } -static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) +static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) { + u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, + NULL, 0, true, &resp_msg, sizeof(u8)); + + if (ret) { + dev_err(&hdev->pdev->dev, + "Read mta type fail, ret=%d.\n", ret); + return ret; + } + + if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { + dev_err(&hdev->pdev->dev, + "Read mta type invalid, resp=%d.\n", resp_msg); + return -EINVAL; + } + + hdev->mta_mac_sel_type = resp_msg; + + return 0; +} + +static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, + const u8 *addr) +{ + u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; + u16 high_val = addr[1] | (addr[0] << 8); + + return (high_val >> rsh) & 0xfff; +} + +static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, + unsigned long *status) +{ +#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 +#define HCLGEVF_MTA_STATUS_MSG_BITS \ + (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) +#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ + (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) + u16 tbl_cnt; + u16 tbl_idx; + u8 msg_cnt; + u8 msg_idx; + int ret; + + msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, + HCLGEVF_MTA_STATUS_MSG_BITS); + tbl_idx = 0; + msg_idx = 0; + while (msg_cnt--) { + u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; + u8 *p = &msg[1]; + u8 msg_ofs; + u8 msg_bit; + + memset(msg, 0, sizeof(msg)); + + /* set index field */ + msg[0] = 0x7F & msg_idx; + + /* set end flag field */ + if (msg_cnt == 0) { + msg[0] |= 0x80; + tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; + } else { + tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; + } + + /* set status field */ + msg_ofs = 0; + msg_bit = 0; + while (tbl_cnt--) { + if (test_bit(tbl_idx, status)) + p[msg_ofs] |= BIT(msg_bit); + + tbl_idx++; + + msg_bit++; + if (msg_bit == BITS_PER_BYTE) { + msg_bit = 0; + msg_ofs++; + } + } + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, + msg, sizeof(msg), false, NULL, 0); + if (ret) + break; + + msg_idx++; + } + + return ret; +} + +static int hclgevf_update_mta_status(struct hnae3_handle *handle) +{ + unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 msg[2] = {0}; + struct net_device *netdev = hdev->nic.kinfo.netdev; + struct netdev_hw_addr *ha; + u16 tbl_idx; - msg[0] = en; - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, - msg, 1, false, NULL, 0); + /* clear status */ + memset(mta_status, 0, sizeof(mta_status)); + + /* update status from mc addr list */ + netdev_for_each_mc_addr(ha, netdev) { + tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); + set_bit(tbl_idx, mta_status); + } + + return hclgevf_do_update_mta_status(hdev, mta_status); } static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) @@ -1334,6 +1446,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) hclgevf_reset_tqp_stats(handle); del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); hclgevf_update_link_status(hdev, 0); } @@ -1500,10 +1613,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return ret; break; case HNAE3_CLIENT_ROCE: - hdev->roce_client = client; - hdev->roce.client = client; + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + hdev->roce.client = client; + } - if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + if (hdev->roce_client && hdev->nic_client) { ret = hclgevf_init_roce_base_info(hdev); if (ret) return ret; @@ -1663,12 +1778,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize VF's MTA */ - hdev->accept_mta_mc = true; - ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); + /* Initialize mta type for this VF */ + ret = hclgevf_cfg_func_mta_type(hdev); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to set mta filter mode\n", ret); + "failed(%d) to initialize MTA type\n", ret); goto err_config; } @@ -1823,6 +1937,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, + .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 9763e742e6fb..0656e8e5c5f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -48,6 +48,9 @@ #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) +#define HCLGEVF_MTA_TBL_SIZE 4096 +#define HCLGEVF_MTA_TYPE_SEL_MAX 4 + /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ @@ -152,6 +155,7 @@ struct hclgevf_dev { int *vector_irq; bool accept_mta_mc; /* whether to accept mta filter multicast */ + u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d3fef7fefea8..acf1e8b52b8e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3527,15 +3527,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) } break; case e1000_pch_spt: - if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { - /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHZ; - incvalue = INCVALUE_24MHZ; - shift = INCVALUE_SHIFT_24MHZ; - adapter->cc.shift = shift; - break; - } - return -EINVAL; + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + break; case e1000_pch_cnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 713995d04783..8ffb7454e67c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2032,6 +2032,21 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, #if L1_CACHE_BYTES < 128 prefetch(xdp->data + L1_CACHE_BYTES); #endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags <dev> legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via i40e_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For i40e_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, @@ -2083,19 +2098,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, struct xdp_buff *xdp) { - unsigned int size = xdp->data_end - xdp->data; + unsigned int metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + size); + SKB_DATA_ALIGN(I40E_SKB_PAD + + (xdp->data_end - + xdp->data_hard_start)); #endif struct sk_buff *skb; - /* prefetch first cache line of first page */ - prefetch(xdp->data); + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points exactly as xdp->data, otherwise we + * likely have a consumer accessing first few bytes of meta + * data, and then actual data. + */ + prefetch(xdp->data_meta); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); + prefetch(xdp->data_meta + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ skb = build_skb(xdp->data_hard_start, truesize); @@ -2103,8 +2124,10 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD); - __skb_put(skb, size); + skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start)); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); /* buffer is used by skb, update page_offset */ #if (PAGE_SIZE < 8192) @@ -2341,7 +2364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; - xdp_set_data_meta_invalid(&xdp); + xdp.data_meta = xdp.data; xdp.data_hard_start = xdp.data - i40e_rx_offset(rx_ring); xdp.data_end = xdp.data + size; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 78574c06635b..c33821d2afb3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2058,6 +2058,7 @@ int igb_up(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ + rd32(E1000_TSICR); rd32(E1000_ICR); igb_irq_enable(adapter); @@ -3865,6 +3866,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) napi_enable(&(adapter->q_vector[i]->napi)); /* Clear any pending interrupts. */ + rd32(E1000_TSICR); rd32(E1000_ICR); igb_irq_enable(adapter); @@ -4053,11 +4055,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, u64 tdba = ring->dma; int reg_idx = ring->reg_idx; - /* disable the queue */ - wr32(E1000_TXDCTL(reg_idx), 0); - wrfl(); - mdelay(10); - wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), @@ -4088,8 +4085,16 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, **/ static void igb_configure_tx(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; int i; + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + for (i = 0; i < adapter->num_tx_queues; i++) igb_configure_tx_ring(adapter, adapter->tx_ring[i]); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 55fe8114fe99..50dfb02fa34c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -10,15 +10,9 @@ static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; -/** - * ixgbe_dbg_reg_ops_read - read for reg_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) +static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos, + char *dbg_buf) { struct ixgbe_adapter *adapter = filp->private_data; char *buf; @@ -29,8 +23,7 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, return 0; buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_reg_ops_buf); + adapter->netdev->name, dbg_buf); if (!buf) return -ENOMEM; @@ -46,6 +39,20 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, } /** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, + ixgbe_dbg_reg_ops_buf); +} + +/** * ixgbe_dbg_reg_ops_write - write into reg_ops datum * @filp: the opened file * @buffer: where to find the user's data @@ -121,33 +128,11 @@ static char ixgbe_dbg_netdev_ops_buf[256] = ""; * @count: the size of the user's buffer * @ppos: file position offset **/ -static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, - char __user *buffer, +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - struct ixgbe_adapter *adapter = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; + return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, + ixgbe_dbg_netdev_ops_buf); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 99b170f1efd1..344a1f213a5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -445,6 +445,89 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, } /** + * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters + * @xs: pointer to transformer state struct + **/ +static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 mfval, manc, reg; + int num_filters = 4; + bool manc_ipv4; + u32 bmcipval; + int i, j; + +#define MANC_EN_IPV4_FILTER BIT(24) +#define MFVAL_IPV4_FILTER_SHIFT 16 +#define MFVAL_IPV6_FILTER_SHIFT 24 +#define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4)) + +#define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4)) +#define IXGBE_BMCIPVAL 0x5060 +#define BMCIP_V4 0x2 +#define BMCIP_V6 0x3 +#define BMCIP_MASK 0x3 + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); + mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); + bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL); + + if (xs->props.family == AF_INET) { + /* are there any IPv4 filters to check? */ + if (manc_ipv4) { + /* the 4 ipv4 filters are all in MIPAF(3, i) */ + for (i = 0; i < num_filters; i++) { + if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i))) + continue; + + reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i)); + if (reg == xs->id.daddr.a4) + return 1; + } + } + + if ((bmcipval & BMCIP_MASK) == BMCIP_V4) { + reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3)); + if (reg == xs->id.daddr.a4) + return 1; + } + + } else { + /* if there are ipv4 filters, they are in the last ipv6 slot */ + if (manc_ipv4) + num_filters = 3; + + for (i = 0; i < num_filters; i++) { + if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i))) + continue; + + for (j = 0; j < 4; j++) { + reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j)); + if (reg != xs->id.daddr.a6[j]) + break; + } + if (j == 4) /* did we match all 4 words? */ + return 1; + } + + if ((bmcipval & BMCIP_MASK) == BMCIP_V6) { + for (j = 0; j < 4; j++) { + reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j)); + if (reg != xs->id.daddr.a6[j]) + break; + } + if (j == 4) /* did we match all 4 words? */ + return 1; + } + } + + return 0; +} + +/** * ixgbe_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct **/ @@ -465,6 +548,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (ixgbe_ipsec_check_mgmt_ip(xs)) { + netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); + return -EINVAL; + } + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa rsa; @@ -575,7 +663,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) /* hash the new entry for faster search in Rx path */ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, - (__force u64)rsa.xs->id.spi); + (__force u32)rsa.xs->id.spi); } else { struct tx_sa tsa; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ef1afb3a8a97..38b4e4899490 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7621,17 +7621,19 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; + rtnl_lock(); /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) + test_bit(__IXGBE_RESETTING, &adapter->state)) { + rtnl_unlock(); return; + } ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; - rtnl_lock(); ixgbe_reinit_locked(adapter); rtnl_unlock(); } @@ -9049,7 +9051,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, { const struct tc_action *a; LIST_HEAD(actions); - int err; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -9070,11 +9071,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, if (!dev) return -EINVAL; - err = handle_redirect_action(adapter, dev->ifindex, queue, - action); - if (err == 0) - return err; + return handle_redirect_action(adapter, dev->ifindex, + queue, action); } + + return -EINVAL; } return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 70c75681495f..56a1031dcc07 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -76,6 +76,7 @@ enum ixgbevf_ring_state_t { __IXGBEVF_TX_DETECT_HANG, __IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_TX_XDP_RING, + __IXGBEVF_TX_XDP_RING_PRIMED, }; #define ring_is_xdp(ring) \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 083041129539..59416eddd840 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -991,24 +991,45 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, return IXGBEVF_XDP_CONSUMED; /* record the location of the first descriptor for this packet */ - tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; - i = ring->next_to_use; - tx_desc = IXGBEVF_TX_DESC(ring, i); + tx_buffer = &ring->tx_buffer_info[i]; dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); tx_buffer->data = xdp->data; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + /* Populate minimal context descriptor that will provide for the + * fact that we are expected to process Ethernet frames. + */ + if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { + struct ixgbe_adv_tx_context_desc *context_desc; + + set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); + + context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); + context_desc->vlan_macip_lens = + cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = + cpu_to_le32(IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + context_desc->mss_l4len_idx = 0; + + i = 1; + } /* put descriptor type bits */ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; cmd_type |= len | IXGBE_TXD_CMD; + + tx_desc = IXGBEVF_TX_DESC(ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | @@ -1688,6 +1709,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, sizeof(struct ixgbevf_tx_buffer) * ring->count); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); + clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); @@ -3119,15 +3141,17 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; + rtnl_lock(); /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_REMOVING, &adapter->state) || - test_bit(__IXGBEVF_RESETTING, &adapter->state)) + test_bit(__IXGBEVF_RESETTING, &adapter->state)) { + rtnl_unlock(); return; + } adapter->tx_timeout_count++; - rtnl_lock(); ixgbevf_reinit_locked(adapter); rtnl_unlock(); } diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 9498ed26dbe5..55d4d10aa7d3 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o obj-$(CONFIG_MVNETA) += mvneta.o -obj-$(CONFIG_MVPP2) += mvpp2.o +obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile new file mode 100644 index 000000000000..4d11dd9e3246 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Marvell PPv2 driver. +# +obj-$(CONFIG_MVPP2) := mvpp2.o + +mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h new file mode 100644 index 000000000000..def00dc3eb4e --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -0,0 +1,1046 @@ +/* + * Definitions for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#ifndef _MVPP2_H_ +#define _MVPP2_H_ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/phylink.h> + +/* Fifo Registers */ +#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) +#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) +#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 +#define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) +#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) + +/* RX DMA Top Registers */ +#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) +#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) +#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) +#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) +#define MVPP2_POOL_BUF_SIZE_OFFSET 5 +#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) +#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff +#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) +#define MVPP2_RXQ_POOL_SHORT_OFFS 20 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 +#define MVPP2_RXQ_POOL_LONG_OFFS 24 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 +#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 +#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 +#define MVPP2_RXQ_DISABLE_MASK BIT(31) + +/* Top Registers */ +#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) +#define MVPP2_DSA_EXTENDED BIT(5) + +/* Parser Registers */ +#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 +#define MVPP2_PRS_PORT_LU_MAX 0xf +#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) +#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) +#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) +#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) +#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) +#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_TCAM_IDX_REG 0x1100 +#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) +#define MVPP2_PRS_TCAM_INV_MASK BIT(31) +#define MVPP2_PRS_SRAM_IDX_REG 0x1200 +#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) +#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 +#define MVPP2_PRS_TCAM_EN_MASK BIT(0) + +/* RSS Registers */ +#define MVPP22_RSS_INDEX 0x1500 +#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) +#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) +#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 +#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_WIDTH 0x150c + +/* Classifier Registers */ +#define MVPP2_CLS_MODE_REG 0x1800 +#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) +#define MVPP2_CLS_PORT_WAY_REG 0x1810 +#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) +#define MVPP2_CLS_LKP_INDEX_REG 0x1814 +#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 +#define MVPP2_CLS_LKP_TBL_REG 0x1818 +#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) +#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 +#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 +#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) +#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 +#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) + +/* Descriptor Manager Top Registers */ +#define MVPP2_RXQ_NUM_REG 0x2040 +#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 +#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 +#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) +#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 +#define MVPP2_RXQ_NUM_NEW_OFFSET 16 +#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) +#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff +#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 +#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 +#define MVPP2_RXQ_THRESH_REG 0x204c +#define MVPP2_OCCUPIED_THRESH_OFFSET 0 +#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff +#define MVPP2_RXQ_INDEX_REG 0x2050 +#define MVPP2_TXQ_NUM_REG 0x2080 +#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 +#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 +#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff +#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 +#define MVPP2_TXQ_INDEX_REG 0x2098 +#define MVPP2_TXQ_PREF_BUF_REG 0x209c +#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) +#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) +#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) +#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) +#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) +#define MVPP2_TXQ_PENDING_REG 0x20a0 +#define MVPP2_TXQ_PENDING_MASK 0x3fff +#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 +#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) +#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 +#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 +#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 +#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 +#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 +#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff +#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 +#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 +#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 +#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff +#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) + +/* MBUS bridge registers */ +#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) +#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) +#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) +#define MVPP2_BASE_ADDR_ENABLE 0x4060 + +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + +/* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + +#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + +#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) +#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) +#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) +#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 +#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) +#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) +#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) +#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) +#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) +#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) +#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc +#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 +#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 + +/* Buffer Manager registers */ +#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) +#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 +#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) +#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 +#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) +#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 +#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) +#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 +#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 +#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) +#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) +#define MVPP2_BM_START_MASK BIT(0) +#define MVPP2_BM_STOP_MASK BIT(1) +#define MVPP2_BM_STATE_MASK BIT(4) +#define MVPP2_BM_LOW_THRESH_OFFS 8 +#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 +#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_LOW_THRESH_OFFS) +#define MVPP2_BM_HIGH_THRESH_OFFS 16 +#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 +#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_HIGH_THRESH_OFFS) +#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) +#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) +#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) +#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) +#define MVPP2_BM_BPPE_FULL_MASK BIT(3) +#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) +#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) +#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 +#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) +#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) +#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) +#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) +#define MVPP2_BM_VIRT_RLS_REG 0x64c0 +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 + +/* TX Scheduler registers */ +#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 +#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 +#define MVPP2_TXP_SCHED_ENQ_MASK 0xff +#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 +#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 +#define MVPP2_TXP_SCHED_MTU_REG 0x801c +#define MVPP2_TXP_MTU_MAX 0x7FFFF +#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 +#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 +#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff +#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) +#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff + +/* TX general registers */ +#define MVPP2_TX_SNOOP_REG 0x8800 +#define MVPP2_TX_PORT_FLUSH_REG 0x8810 +#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) + +/* LMS registers */ +#define MVPP2_SRC_ADDR_MIDDLE 0x24 +#define MVPP2_SRC_ADDR_HIGH 0x28 +#define MVPP2_PHY_AN_CFG0_REG 0x34 +#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) +#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 + +/* Per-port registers */ +#define MVPP2_GMAC_CTRL_0_REG 0x0 +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_CTRL_1_REG 0x4 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_CTRL_2_REG 0x8 +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) +#define MVPP2_GMAC_DISABLE_PADDING BIT(5) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_AUTONEG_CONFIG 0xc +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) +#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) +#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) +#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_STATUS0 0x10 +#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) +#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) +#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) +#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) +#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) +#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ + MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_INT_STAT 0x20 +#define MVPP22_GMAC_INT_STAT_LINK BIT(1) +#define MVPP22_GMAC_INT_MASK 0x24 +#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) +#define MVPP22_GMAC_CTRL_4_REG 0x90 +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_RX_FC_EN BIT(3) +#define MVPP22_CTRL4_TX_FC_EN BIT(4) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_GMAC_INT_SUM_MASK 0xa4 +#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) + +/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) +#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff +#define MVPP22_XLG_STATUS 0x10c +#define MVPP22_XLG_STATUS_LINK_UP BIT(0) +#define MVPP22_XLG_INT_STAT 0x114 +#define MVPP22_XLG_INT_STAT_LINK BIT(1) +#define MVPP22_XLG_INT_MASK 0x118 +#define MVPP22_XLG_INT_MASK_LINK BIT(1) +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_EXT_INT_MASK 0x15c +#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) +#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) +#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) +#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) +#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) + +/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ +#define MVPP22_SMI_MISC_CFG_REG 0x1204 +#define MVPP22_SMI_POLLING_EN BIT(10) + +#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) + +#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVPP2_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) +#define MVPP22_MPCS_CTRL 0x14 +#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) +#define MVPP22_MPCS_CLK_RESET 0x14c +#define MAC_CLK_RESET_SD_TX BIT(0) +#define MAC_CLK_RESET_SD_RX BIT(1) +#define MAC_CLK_RESET_MAC BIT(2) +#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) +#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) +#define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) +#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) + +/* System controller registers. Accessed through a regmap. */ +#define GENCONF_SOFT_RESET1 0x1108 +#define GENCONF_SOFT_RESET1_GOP BIT(6) +#define GENCONF_PORT_CTRL0 0x1110 +#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) +#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) +#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) +#define GENCONF_PORT_CTRL1 0x1114 +#define GENCONF_PORT_CTRL1_EN(p) BIT(p) +#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) +#define GENCONF_CTRL0 0x1120 +#define GENCONF_CTRL0_PORT0_RGMII BIT(0) +#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) +#define GENCONF_CTRL0_PORT1_RGMII BIT(2) + +/* Various constants */ + +/* Coalescing */ +#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 +#define MVPP2_RX_COAL_PKTS 32 +#define MVPP2_RX_COAL_USEC 64 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVPP2_MH_SIZE 2 +#define MVPP2_ETH_TYPE_LEN 2 +#define MVPP2_PPPOE_HDR_SIZE 8 +#define MVPP2_VLAN_TAG_LEN 4 +#define MVPP2_VLAN_TAG_EDSA_LEN 8 + +/* Lbtd 802.3 type */ +#define MVPP2_IP_LBDT_TYPE 0xfffa + +#define MVPP2_TX_CSUM_MAX_SIZE 9800 + +/* Timeout constants */ +#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 + +#define MVPP2_TX_MTU_MAX 0x7ffff + +/* Maximum number of T-CONTs of PON port */ +#define MVPP2_MAX_TCONT 16 + +/* Maximum number of supported ports */ +#define MVPP2_MAX_PORTS 4 + +/* Maximum number of TXQs used by single port */ +#define MVPP2_MAX_TXQ 8 + +/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO + * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), + * multiply this value by two to count the maximum number of skb descs needed. + */ +#define MVPP2_MAX_TSO_SEGS 300 +#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +/* Dfault number of RXQs in use */ +#define MVPP2_DEFAULT_RXQ 4 + +/* Max number of Rx descriptors */ +#define MVPP2_MAX_RXD_MAX 1024 +#define MVPP2_MAX_RXD_DFLT 128 + +/* Max number of Tx descriptors */ +#define MVPP2_MAX_TXD_MAX 2048 +#define MVPP2_MAX_TXD_DFLT 1024 + +/* Amount of Tx descriptors that can be reserved at once by CPU */ +#define MVPP2_CPU_DESC_CHUNK 64 + +/* Max number of Tx descriptors in each aggregated queue */ +#define MVPP2_AGGR_TXQ_SIZE 256 + +/* Descriptor aligned size */ +#define MVPP2_DESC_ALIGNED_SIZE 32 + +/* Descriptor alignment mask */ +#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) + +/* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +/* TX FIFO constants */ +#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa +#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 +#define MVPP2_TX_FIFO_THRESHOLD_10KB \ + (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP2_TX_FIFO_THRESHOLD_3KB \ + (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) + +/* RX buffer constants */ +#define MVPP2_SKB_SHINFO_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +#define MVPP2_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, cache_line_size()) + +#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) +#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ + ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) + +#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) + +/* IPv6 max L3 address size */ +#define MVPP2_MAX_L3_ADDR_SIZE 16 + +/* Port flags */ +#define MVPP2_F_LOOPBACK BIT(0) + +/* Marvell tag types */ +enum mvpp2_tag_type { + MVPP2_TAG_TYPE_NONE = 0, + MVPP2_TAG_TYPE_MH = 1, + MVPP2_TAG_TYPE_DSA = 2, + MVPP2_TAG_TYPE_EDSA = 3, + MVPP2_TAG_TYPE_VLAN = 4, + MVPP2_TAG_TYPE_LAST = 5 +}; + +/* L2 cast enum */ +enum mvpp2_prs_l2_cast { + MVPP2_PRS_L2_UNI_CAST, + MVPP2_PRS_L2_MULTI_CAST, +}; + +/* L3 cast enum */ +enum mvpp2_prs_l3_cast { + MVPP2_PRS_L3_UNI_CAST, + MVPP2_PRS_L3_MULTI_CAST, + MVPP2_PRS_L3_BROAD_CAST +}; + +/* BM constants */ +#define MVPP2_BM_JUMBO_BUF_NUM 512 +#define MVPP2_BM_LONG_BUF_NUM 1024 +#define MVPP2_BM_SHORT_BUF_NUM 2048 +#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) +#define MVPP2_BM_POOL_PTR_ALIGN 128 + +/* BM cookie (32 bits) definition */ +#define MVPP2_BM_COOKIE_POOL_OFFS 8 +#define MVPP2_BM_COOKIE_CPU_OFFS 24 + +#define MVPP2_BM_SHORT_FRAME_SIZE 512 +#define MVPP2_BM_LONG_FRAME_SIZE 2048 +#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 +/* BM short pool packet size + * These value assure that for SWF the total number + * of bytes allocated for each buffer will be 512 + */ +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) +#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) +#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) + +#define MVPP21_ADDR_SPACE_SZ 0 +#define MVPP22_ADDR_SPACE_SZ SZ_64K + +#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS + +/* GMAC MIB Counters register definitions */ +#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 +#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 +#define MVPP22_MIB_COUNTERS_OFFSET 0x0 +#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 + +#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 +#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 +#define MVPP2_MIB_CRC_ERRORS_SENT 0xc +#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 +#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 +#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c +#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 +#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 +#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 +#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 +#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c +#define MVPP2_MIB_FC_SENT 0x54 +#define MVPP2_MIB_FC_RCVD 0x58 +#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c +#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 +#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 +#define MVPP2_MIB_OVERSIZE_RCVD 0x68 +#define MVPP2_MIB_JABBER_RCVD 0x6c +#define MVPP2_MIB_MAC_RCV_ERROR 0x70 +#define MVPP2_MIB_BAD_CRC_EVENT 0x74 +#define MVPP2_MIB_COLLISION 0x78 +#define MVPP2_MIB_LATE_COLLISION 0x7c + +#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) + +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) + +/* Definitions */ + +/* Shared Packet Processor resources */ +struct mvpp2 { + /* Shared registers' base addresses */ + void __iomem *lms_base; + void __iomem *iface_base; + + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. + */ + void __iomem *swth_base[MVPP2_MAX_THREADS]; + + /* On PPv2.2, some port control registers are located into the system + * controller space. These registers are accessible through a regmap. + */ + struct regmap *sysctrl_base; + + /* Common clocks */ + struct clk *pp_clk; + struct clk *gop_clk; + struct clk *mg_clk; + struct clk *mg_core_clk; + struct clk *axi_clk; + + /* List of pointers to port structures */ + int port_count; + struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + + /* Aggregated TXQs */ + struct mvpp2_tx_queue *aggr_txqs; + + /* BM pools */ + struct mvpp2_bm_pool *bm_pools; + + /* PRS shadow table */ + struct mvpp2_prs_shadow *prs_shadow; + /* PRS auxiliary table for double vlan entries control */ + bool *prs_double_vlans; + + /* Tclk value */ + u32 tclk; + + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; + + /* Workqueue to gather hardware statistics */ + char queue_name[30]; + struct workqueue_struct *stats_queue; +}; + +struct mvpp2_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; +}; + +struct mvpp2_port { + u8 id; + + /* Index of the port from the "group of ports" complex point + * of view + */ + int gop_id; + + int link_irq; + + struct mvpp2 *priv; + + /* Firmware node associated to the port */ + struct fwnode_handle *fwnode; + + /* Is a PHY always connected to the port */ + bool has_phy; + + /* Per-port registers' base address */ + void __iomem *base; + void __iomem *stats_base; + + struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; + struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; + struct net_device *dev; + + int pkt_size; + + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + + /* Flags */ + unsigned long flags; + + u16 tx_ring_size; + u16 rx_ring_size; + struct mvpp2_pcpu_stats __percpu *stats; + u64 *ethtool_stats; + + /* Per-port work and its lock to gather hardware statistics */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; + + struct device_node *of_node; + + phy_interface_t phy_interface; + struct phylink *phylink; + struct phy *comphy; + + struct mvpp2_bm_pool *pool_long; + struct mvpp2_bm_pool *pool_short; + + /* Index of first port's physical RXQ */ + u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; +}; + +/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVPP2_TXD_L3_OFF_SHIFT 0 +#define MVPP2_TXD_IP_HLEN_SHIFT 8 +#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) +#define MVPP2_TXD_L4_CSUM_NOT BIT(14) +#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) +#define MVPP2_TXD_PADDING_DISABLE BIT(23) +#define MVPP2_TXD_L4_UDP BIT(24) +#define MVPP2_TXD_L3_IP6 BIT(26) +#define MVPP2_TXD_L_DESC BIT(28) +#define MVPP2_TXD_F_DESC BIT(29) + +#define MVPP2_RXD_ERR_SUMMARY BIT(15) +#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) +#define MVPP2_RXD_ERR_CRC 0x0 +#define MVPP2_RXD_ERR_OVERRUN BIT(13) +#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) +#define MVPP2_RXD_BM_POOL_ID_OFFS 16 +#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) +#define MVPP2_RXD_HWF_SYNC BIT(21) +#define MVPP2_RXD_L4_CSUM_OK BIT(22) +#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) +#define MVPP2_RXD_L4_TCP BIT(25) +#define MVPP2_RXD_L4_UDP BIT(26) +#define MVPP2_RXD_L3_IP4 BIT(28) +#define MVPP2_RXD_L3_IP6 BIT(30) +#define MVPP2_RXD_BUF_HDR BIT(31) + +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u8 packet_offset; /* the offset from the buffer beginning */ + u8 phys_txq; /* destination queue ID */ + u16 data_size; /* data size of transmitted packet in bytes */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ + u32 buf_cookie; /* cookie for access to TX buffer in tx path */ + u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + u32 reserved2; /* reserved (for future use) */ +}; + +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { + u32 status; /* info about received packet */ + u16 reserved1; /* parser_info (for future use, PnC) */ + u16 data_size; /* size of received packet in bytes */ + u32 buf_dma_addr; /* physical address of the buffer */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u16 reserved2; /* gem_port_id (for future use, PON) */ + u16 reserved3; /* csum_l4 (for future use, PnC) */ + u8 reserved4; /* bm_qset (for future use, BM) */ + u8 reserved5; + u16 reserved6; /* classify_info (for future use, PnC) */ + u32 reserved7; /* flow_id (for future use, PnC) */ + u32 reserved8; +}; + +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + u32 command; + u8 packet_offset; + u8 phys_txq; + u16 data_size; + u64 reserved1; + u64 buf_dma_addr_ptp; + u64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + u32 status; + u16 reserved1; + u16 data_size; + u32 reserved2; + u32 reserved3; + u64 buf_dma_addr_key_hash; + u64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + +struct mvpp2_txq_pcpu_buf { + /* Transmitted SKB */ + struct sk_buff *skb; + + /* Physical address of transmitted buffer */ + dma_addr_t dma; + + /* Size transmitted */ + size_t size; +}; + +/* Per-CPU Tx queue control */ +struct mvpp2_txq_pcpu { + int cpu; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the + * descriptor ring + */ + int count; + + int wake_threshold; + int stop_threshold; + + /* Number of Tx DMA descriptors reserved for each CPU */ + int reserved_num; + + /* Infos about transmitted buffers */ + struct mvpp2_txq_pcpu_buf *buffs; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; +}; + +struct mvpp2_tx_queue { + /* Physical number of this Tx queue */ + u8 id; + + /* Logical number of this Tx queue */ + u8 log_id; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the descriptor ring */ + int count; + + /* Per-CPU control of physical Tx queues */ + struct mvpp2_txq_pcpu __percpu *pcpu; + + u32 done_pkts_coal; + + /* Virtual address of thex Tx DMA descriptors array */ + struct mvpp2_tx_desc *descs; + + /* DMA address of the Tx DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last Tx DMA descriptor */ + int last_desc; + + /* Index of the next Tx DMA descriptor to process */ + int next_desc_to_proc; +}; + +struct mvpp2_rx_queue { + /* RX queue number, in the range 0-31 for physical RXQs */ + u8 id; + + /* Num of rx descriptors in the rx descriptor ring */ + int size; + + u32 pkts_coal; + u32 time_coal; + + /* Virtual address of the RX DMA descriptors array */ + struct mvpp2_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; + + /* ID of port to which physical RXQ is mapped */ + int port; + + /* Port's logic RXQ number to which physical RXQ is mapped */ + int logic_rxq; +}; + +struct mvpp2_bm_pool { + /* Pool number in the range 0-7 */ + int id; + + /* Buffer Pointers Pool External (BPPE) size */ + int size; + /* BPPE size in bytes */ + int size_bytes; + /* Number of buffers for this pool */ + int buf_num; + /* Pool buffer size */ + int buf_size; + /* Packet size */ + int pkt_size; + int frag_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; + + /* Ports using BM pool */ + u32 port_map; +}; + +#define IS_TSO_HEADER(txq_pcpu, addr) \ + ((addr) >= (txq_pcpu)->tso_headers_dma && \ + (addr) < (txq_pcpu)->tso_headers_dma + \ + (txq_pcpu)->size * TSO_HEADER_SIZE) + +#define MVPP2_DRIVER_NAME "mvpp2" +#define MVPP2_DRIVER_VERSION "1.0" + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data); +u32 mvpp2_read(struct mvpp2 *priv, u32 offset); + +u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset); + +void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data); +u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); + +void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, + u32 data); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c new file mode 100644 index 000000000000..8581d5b17dd5 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -0,0 +1,141 @@ +/* + * RSS and Classifier helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include "mvpp2.h" +#include "mvpp2_cls.h" + +/* Update classification flow table registers */ +static void mvpp2_cls_flow_write(struct mvpp2 *priv, + struct mvpp2_cls_flow_entry *fe) +{ + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); +} + +/* Update classification lookup table register */ +static void mvpp2_cls_lookup_write(struct mvpp2 *priv, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); +} + +/* Classifier default initialization */ +void mvpp2_cls_init(struct mvpp2 *priv) +{ + struct mvpp2_cls_lookup_entry le; + struct mvpp2_cls_flow_entry fe; + int index; + + /* Enable classifier */ + mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); + + /* Clear classifier flow table */ + memset(&fe.data, 0, sizeof(fe.data)); + for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { + fe.index = index; + mvpp2_cls_flow_write(priv, &fe); + } + + /* Clear classifier lookup table */ + le.data = 0; + for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { + le.lkpid = index; + le.way = 0; + mvpp2_cls_lookup_write(priv, &le); + + le.way = 1; + mvpp2_cls_lookup_write(priv, &le); + } +} + +void mvpp2_cls_port_config(struct mvpp2_port *port) +{ + struct mvpp2_cls_lookup_entry le; + u32 val; + + /* Set way for the port */ + val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); + val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); + + /* Pick the entry to be accessed in lookup ID decoding table + * according to the way and lkpid. + */ + le.lkpid = port->id; + le.way = 0; + le.data = 0; + + /* Set initial CPU queue for receiving packets */ + le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; + le.data |= port->first_rxq; + + /* Disable classification engines */ + le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + /* Update lookup ID table entry */ + mvpp2_cls_lookup_write(port->priv, &le); +} + +/* Set CPU queue number for oversize packets */ +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) +{ + u32 val; + + mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), + port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); + + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); + val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); +} + +void mvpp22_init_rss(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int i; + + /* Set the table width: replace the whole classifier Rx queue number + * with the ones configured in RSS table entries. + */ + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); + + /* Loop through the classifier Rx Queues and map them to a RSS table. + * Map them all to the first table (0) by default. + */ + for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); + mvpp2_write(priv, MVPP22_RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(0)); + } + + /* Configure the first table to evenly distribute the packets across + * real Rx Queues. The table entries map a hash to an port Rx Queue. + */ + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(0) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); + } + +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h new file mode 100644 index 000000000000..8e1d7f9ffa0b --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -0,0 +1,44 @@ +/* + * RSS and Classifier definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MVPP2_CLS_H_ +#define _MVPP2_CLS_H_ + +/* Classifier constants */ +#define MVPP2_CLS_FLOWS_TBL_SIZE 512 +#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 +#define MVPP2_CLS_LKP_TBL_SIZE 64 +#define MVPP2_CLS_RX_QUEUES 256 + +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 + +struct mvpp2_cls_flow_entry { + u32 index; + u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; +}; + +struct mvpp2_cls_lookup_entry { + u32 lkpid; + u32 way; + u32 data; +}; + +void mvpp22_init_rss(struct mvpp2_port *port); + +void mvpp2_cls_init(struct mvpp2 *priv); + +void mvpp2_cls_port_config(struct mvpp2_port *port); + +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 6847cd431aa0..0319ed9ef8b8 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -40,846 +40,9 @@ #include <net/ipv6.h> #include <net/tso.h> -/* Fifo Registers */ -#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) -#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) -#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 -#define MVPP2_RX_FIFO_INIT_REG 0x64 -#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) -#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) - -/* RX DMA Top Registers */ -#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) -#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) -#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) -#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) -#define MVPP2_POOL_BUF_SIZE_OFFSET 5 -#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) -#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff -#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) -#define MVPP2_RXQ_POOL_SHORT_OFFS 20 -#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 -#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 -#define MVPP2_RXQ_POOL_LONG_OFFS 24 -#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 -#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 -#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 -#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 -#define MVPP2_RXQ_DISABLE_MASK BIT(31) - -/* Top Registers */ -#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) -#define MVPP2_DSA_EXTENDED BIT(5) - -/* Parser Registers */ -#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 -#define MVPP2_PRS_PORT_LU_MAX 0xf -#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) -#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) -#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) -#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) -#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) -#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) -#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) -#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) -#define MVPP2_PRS_TCAM_IDX_REG 0x1100 -#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) -#define MVPP2_PRS_TCAM_INV_MASK BIT(31) -#define MVPP2_PRS_SRAM_IDX_REG 0x1200 -#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) -#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 -#define MVPP2_PRS_TCAM_EN_MASK BIT(0) - -/* RSS Registers */ -#define MVPP22_RSS_INDEX 0x1500 -#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) -#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) -#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) -#define MVPP22_RSS_TABLE_ENTRY 0x1508 -#define MVPP22_RSS_TABLE 0x1510 -#define MVPP22_RSS_TABLE_POINTER(p) (p) -#define MVPP22_RSS_WIDTH 0x150c - -/* Classifier Registers */ -#define MVPP2_CLS_MODE_REG 0x1800 -#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) -#define MVPP2_CLS_PORT_WAY_REG 0x1810 -#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) -#define MVPP2_CLS_LKP_INDEX_REG 0x1814 -#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 -#define MVPP2_CLS_LKP_TBL_REG 0x1818 -#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff -#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) -#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 -#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 -#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 -#define MVPP2_CLS_FLOW_TBL2_REG 0x182c -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 -#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) -#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 -#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) - -/* Descriptor Manager Top Registers */ -#define MVPP2_RXQ_NUM_REG 0x2040 -#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 -#define MVPP22_DESC_ADDR_OFFS 8 -#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 -#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) -#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 -#define MVPP2_RXQ_NUM_NEW_OFFSET 16 -#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) -#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff -#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 -#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 -#define MVPP2_RXQ_THRESH_REG 0x204c -#define MVPP2_OCCUPIED_THRESH_OFFSET 0 -#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff -#define MVPP2_RXQ_INDEX_REG 0x2050 -#define MVPP2_TXQ_NUM_REG 0x2080 -#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 -#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 -#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_TXQ_THRESH_REG 0x2094 -#define MVPP2_TXQ_THRESH_OFFSET 16 -#define MVPP2_TXQ_THRESH_MASK 0x3fff -#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 -#define MVPP2_TXQ_INDEX_REG 0x2098 -#define MVPP2_TXQ_PREF_BUF_REG 0x209c -#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) -#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) -#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) -#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) -#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) -#define MVPP2_TXQ_PENDING_REG 0x20a0 -#define MVPP2_TXQ_PENDING_MASK 0x3fff -#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 -#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) -#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 -#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 -#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 -#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 -#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 -#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff -#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 -#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 -#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) -#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 -#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) -#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) -#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff -#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) - -/* MBUS bridge registers */ -#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) -#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) -#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) -#define MVPP2_BASE_ADDR_ENABLE 0x4060 - -/* AXI Bridge Registers */ -#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 -#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 -#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 -#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 -#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 -#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c -#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 -#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 -#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 -#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 -#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 -#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 - -/* Values for AXI Bridge registers */ -#define MVPP22_AXI_ATTR_CACHE_OFFS 0 -#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 - -#define MVPP22_AXI_CODE_CACHE_OFFS 0 -#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 - -#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 -#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 -#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb - -#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 -#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 - -/* Interrupt Cause and Mask registers */ -#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) -#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 - -#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) -#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) - -#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 -#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 - -#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 - -#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 -#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 - -#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) -#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) -#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) -#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) -#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff -#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 -#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 -#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) -#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) -#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) -#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) -#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) -#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) -#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) -#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc -#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff -#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 -#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) -#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 - -/* Buffer Manager registers */ -#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) -#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 -#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) -#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 -#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) -#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 -#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) -#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 -#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) -#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) -#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff -#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 -#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) -#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) -#define MVPP2_BM_START_MASK BIT(0) -#define MVPP2_BM_STOP_MASK BIT(1) -#define MVPP2_BM_STATE_MASK BIT(4) -#define MVPP2_BM_LOW_THRESH_OFFS 8 -#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 -#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ - MVPP2_BM_LOW_THRESH_OFFS) -#define MVPP2_BM_HIGH_THRESH_OFFS 16 -#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 -#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ - MVPP2_BM_HIGH_THRESH_OFFS) -#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) -#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) -#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) -#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) -#define MVPP2_BM_BPPE_FULL_MASK BIT(3) -#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) -#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) -#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) -#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) -#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 -#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 -#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 -#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 -#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) -#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) -#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) -#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) -#define MVPP2_BM_VIRT_RLS_REG 0x64c0 -#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 -#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 - -/* TX Scheduler registers */ -#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 -#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 -#define MVPP2_TXP_SCHED_ENQ_MASK 0xff -#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 -#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 -#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 -#define MVPP2_TXP_SCHED_MTU_REG 0x801c -#define MVPP2_TXP_MTU_MAX 0x7FFFF -#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 -#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff -#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 -#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) -#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 -#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff -#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) -#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff -#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 -#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) -#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) -#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff -#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) -#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff - -/* TX general registers */ -#define MVPP2_TX_SNOOP_REG 0x8800 -#define MVPP2_TX_PORT_FLUSH_REG 0x8810 -#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) - -/* LMS registers */ -#define MVPP2_SRC_ADDR_MIDDLE 0x24 -#define MVPP2_SRC_ADDR_HIGH 0x28 -#define MVPP2_PHY_AN_CFG0_REG 0x34 -#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 - -/* Per-port registers */ -#define MVPP2_GMAC_CTRL_0_REG 0x0 -#define MVPP2_GMAC_PORT_EN_MASK BIT(0) -#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) -#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 -#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc -#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) -#define MVPP2_GMAC_CTRL_1_REG 0x4 -#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) -#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) -#define MVPP2_GMAC_PCS_LB_EN_BIT 6 -#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) -#define MVPP2_GMAC_SA_LOW_OFFS 7 -#define MVPP2_GMAC_CTRL_2_REG 0x8 -#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) -#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) -#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) -#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) -#define MVPP2_GMAC_DISABLE_PADDING BIT(5) -#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) -#define MVPP2_GMAC_AUTONEG_CONFIG 0xc -#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) -#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) -#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) -#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) -#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) -#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) -#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) -#define MVPP2_GMAC_AN_SPEED_EN BIT(7) -#define MVPP2_GMAC_FC_ADV_EN BIT(9) -#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) -#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) -#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) -#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) -#define MVPP2_GMAC_STATUS0 0x10 -#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) -#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) -#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) -#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) -#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) -#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) -#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) -#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c -#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ - MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) -#define MVPP22_GMAC_INT_STAT 0x20 -#define MVPP22_GMAC_INT_STAT_LINK BIT(1) -#define MVPP22_GMAC_INT_MASK 0x24 -#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) -#define MVPP22_GMAC_CTRL_4_REG 0x90 -#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) -#define MVPP22_CTRL4_RX_FC_EN BIT(3) -#define MVPP22_CTRL4_TX_FC_EN BIT(4) -#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) -#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) -#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) -#define MVPP22_GMAC_INT_SUM_MASK 0xa4 -#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) - -/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, - * relative to port->base. - */ -#define MVPP22_XLG_CTRL0_REG 0x100 -#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) -#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) -#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) -#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) -#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) -#define MVPP22_XLG_CTRL1_REG 0x104 -#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 -#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff -#define MVPP22_XLG_STATUS 0x10c -#define MVPP22_XLG_STATUS_LINK_UP BIT(0) -#define MVPP22_XLG_INT_STAT 0x114 -#define MVPP22_XLG_INT_STAT_LINK BIT(1) -#define MVPP22_XLG_INT_MASK 0x118 -#define MVPP22_XLG_INT_MASK_LINK BIT(1) -#define MVPP22_XLG_CTRL3_REG 0x11c -#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) -#define MVPP22_XLG_EXT_INT_MASK 0x15c -#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) -#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) -#define MVPP22_XLG_CTRL4_REG 0x184 -#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) -#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) -#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) -#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) - -/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ -#define MVPP22_SMI_MISC_CFG_REG 0x1204 -#define MVPP22_SMI_POLLING_EN BIT(10) - -#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) - -#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff - -/* Descriptor ring Macros */ -#define MVPP2_QUEUE_NEXT_DESC(q, index) \ - (((index) < (q)->last_desc) ? ((index) + 1) : 0) - -/* XPCS registers. PPv2.2 only */ -#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) -#define MVPP22_MPCS_CTRL 0x14 -#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) -#define MVPP22_MPCS_CLK_RESET 0x14c -#define MAC_CLK_RESET_SD_TX BIT(0) -#define MAC_CLK_RESET_SD_RX BIT(1) -#define MAC_CLK_RESET_MAC BIT(2) -#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) -#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) - -/* XPCS registers. PPv2.2 only */ -#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) -#define MVPP22_XPCS_CFG0 0x0 -#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) -#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) - -/* System controller registers. Accessed through a regmap. */ -#define GENCONF_SOFT_RESET1 0x1108 -#define GENCONF_SOFT_RESET1_GOP BIT(6) -#define GENCONF_PORT_CTRL0 0x1110 -#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) -#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) -#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) -#define GENCONF_PORT_CTRL1 0x1114 -#define GENCONF_PORT_CTRL1_EN(p) BIT(p) -#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) -#define GENCONF_CTRL0 0x1120 -#define GENCONF_CTRL0_PORT0_RGMII BIT(0) -#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) -#define GENCONF_CTRL0_PORT1_RGMII BIT(2) - -/* Various constants */ - -/* Coalescing */ -#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 -#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL -#define MVPP2_TXDONE_COAL_USEC 1000 -#define MVPP2_RX_COAL_PKTS 32 -#define MVPP2_RX_COAL_USEC 64 - -/* The two bytes Marvell header. Either contains a special value used - * by Marvell switches when a specific hardware mode is enabled (not - * supported by this driver) or is filled automatically by zeroes on - * the RX side. Those two bytes being at the front of the Ethernet - * header, they allow to have the IP header aligned on a 4 bytes - * boundary automatically: the hardware skips those two bytes on its - * own. - */ -#define MVPP2_MH_SIZE 2 -#define MVPP2_ETH_TYPE_LEN 2 -#define MVPP2_PPPOE_HDR_SIZE 8 -#define MVPP2_VLAN_TAG_LEN 4 -#define MVPP2_VLAN_TAG_EDSA_LEN 8 - -/* Lbtd 802.3 type */ -#define MVPP2_IP_LBDT_TYPE 0xfffa - -#define MVPP2_TX_CSUM_MAX_SIZE 9800 - -/* Timeout constants */ -#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 -#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 - -#define MVPP2_TX_MTU_MAX 0x7ffff - -/* Maximum number of T-CONTs of PON port */ -#define MVPP2_MAX_TCONT 16 - -/* Maximum number of supported ports */ -#define MVPP2_MAX_PORTS 4 - -/* Maximum number of TXQs used by single port */ -#define MVPP2_MAX_TXQ 8 - -/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO - * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), - * multiply this value by two to count the maximum number of skb descs needed. - */ -#define MVPP2_MAX_TSO_SEGS 300 -#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) - -/* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 4 - -/* Max number of Rx descriptors */ -#define MVPP2_MAX_RXD_MAX 1024 -#define MVPP2_MAX_RXD_DFLT 128 - -/* Max number of Tx descriptors */ -#define MVPP2_MAX_TXD_MAX 2048 -#define MVPP2_MAX_TXD_DFLT 1024 - -/* Amount of Tx descriptors that can be reserved at once by CPU */ -#define MVPP2_CPU_DESC_CHUNK 64 - -/* Max number of Tx descriptors in each aggregated queue */ -#define MVPP2_AGGR_TXQ_SIZE 256 - -/* Descriptor aligned size */ -#define MVPP2_DESC_ALIGNED_SIZE 32 - -/* Descriptor alignment mask */ -#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) - -/* RX FIFO constants */ -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 -#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 - -/* TX FIFO constants */ -#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa -#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 -#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 -#define MVPP2_TX_FIFO_THRESHOLD_10KB \ - (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) -#define MVPP2_TX_FIFO_THRESHOLD_3KB \ - (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) - -/* RX buffer constants */ -#define MVPP2_SKB_SHINFO_SIZE \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - -#define MVPP2_RX_PKT_SIZE(mtu) \ - ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ - ETH_HLEN + ETH_FCS_LEN, cache_line_size()) - -#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) -#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) -#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ - ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) - -#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) - -/* IPv6 max L3 address size */ -#define MVPP2_MAX_L3_ADDR_SIZE 16 - -/* Port flags */ -#define MVPP2_F_LOOPBACK BIT(0) - -/* Marvell tag types */ -enum mvpp2_tag_type { - MVPP2_TAG_TYPE_NONE = 0, - MVPP2_TAG_TYPE_MH = 1, - MVPP2_TAG_TYPE_DSA = 2, - MVPP2_TAG_TYPE_EDSA = 3, - MVPP2_TAG_TYPE_VLAN = 4, - MVPP2_TAG_TYPE_LAST = 5 -}; - -/* Parser constants */ -#define MVPP2_PRS_TCAM_SRAM_SIZE 256 -#define MVPP2_PRS_TCAM_WORDS 6 -#define MVPP2_PRS_SRAM_WORDS 4 -#define MVPP2_PRS_FLOW_ID_SIZE 64 -#define MVPP2_PRS_FLOW_ID_MASK 0x3f -#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 -#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) -#define MVPP2_PRS_IPV4_HEAD 0x40 -#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 -#define MVPP2_PRS_IPV4_MC 0xe0 -#define MVPP2_PRS_IPV4_MC_MASK 0xf0 -#define MVPP2_PRS_IPV4_BC_MASK 0xff -#define MVPP2_PRS_IPV4_IHL 0x5 -#define MVPP2_PRS_IPV4_IHL_MASK 0xf -#define MVPP2_PRS_IPV6_MC 0xff -#define MVPP2_PRS_IPV6_MC_MASK 0xff -#define MVPP2_PRS_IPV6_HOP_MASK 0xff -#define MVPP2_PRS_TCAM_PROTO_MASK 0xff -#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f -#define MVPP2_PRS_DBL_VLANS_MAX 100 -#define MVPP2_PRS_CAST_MASK BIT(0) -#define MVPP2_PRS_MCAST_VAL BIT(0) -#define MVPP2_PRS_UCAST_VAL 0x0 - -/* Tcam structure: - * - lookup ID - 4 bits - * - port ID - 1 byte - * - additional information - 1 byte - * - header data - 8 bytes - * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). - */ -#define MVPP2_PRS_AI_BITS 8 -#define MVPP2_PRS_PORT_MASK 0xff -#define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 - -#define MVPP2_PRS_VID_TCAM_BYTE 2 - -/* TCAM range for unicast and multicast filtering. We have 25 entries per port, - * with 4 dedicated to UC filtering and the rest to multicast filtering. - * Additionnally we reserve one entry for the broadcast address, and one for - * each port's own address. - */ -#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 -#define MVPP2_PRS_MAC_RANGE_SIZE 80 - -/* Number of entries per port dedicated to UC and MC filtering */ -#define MVPP2_PRS_MAC_UC_FILT_MAX 4 -#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ - MVPP2_PRS_MAC_UC_FILT_MAX) - -/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 - * 10 VLAN ID filter entries per port - * 1 default VLAN filter entry per port - * It is assumed that there are 3 ports for filter, not including loopback port - */ -#define MVPP2_PRS_VLAN_FILT_MAX 11 -#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 - -#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) -#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) - -/* Tcam entries ID */ -#define MVPP2_PE_DROP_ALL 0 -#define MVPP2_PE_FIRST_FREE_TID 1 - -/* MAC filtering range */ -#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) -#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ - MVPP2_PRS_MAC_RANGE_SIZE + 1) -/* VLAN filtering range */ -#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) -#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ - MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) -#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) -#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) -#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) -#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) -#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) -#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) -#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) -#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) -#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) -#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) -#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) -#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) -#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) -#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) -#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) -#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) -#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) -#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) -#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) -#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) -#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) -#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) -/* reserved */ -#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) -#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) -#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) - -#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ - ((port) * MVPP2_PRS_VLAN_FILT_MAX)) -#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ - + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) -/* Index of default vid filter for given port */ -#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ - + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) - -/* Sram structure - * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). - */ -#define MVPP2_PRS_SRAM_RI_OFFS 0 -#define MVPP2_PRS_SRAM_RI_WORD 0 -#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 -#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 -#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 -#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 -#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 -#define MVPP2_PRS_SRAM_UDF_OFFS 73 -#define MVPP2_PRS_SRAM_UDF_BITS 8 -#define MVPP2_PRS_SRAM_UDF_MASK 0xff -#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 -#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 -#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 -#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 -#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 -#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 -#define MVPP2_PRS_SRAM_AI_OFFS 90 -#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 -#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 -#define MVPP2_PRS_SRAM_AI_MASK 0xff -#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 -#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf -#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 -#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 - -/* Sram result info bits assignment */ -#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 -#define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) -#define MVPP2_PRS_RI_VLAN_NONE 0x0 -#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) -#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) -#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) -#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 -#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) -#define MVPP2_PRS_RI_L2_UCAST 0x0 -#define MVPP2_PRS_RI_L2_MCAST BIT(9) -#define MVPP2_PRS_RI_L2_BCAST BIT(10) -#define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_UN 0x0 -#define MVPP2_PRS_RI_L3_IP4 BIT(12) -#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) -#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) -#define MVPP2_PRS_RI_L3_IP6 BIT(14) -#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) -#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) -#define MVPP2_PRS_RI_L3_UCAST 0x0 -#define MVPP2_PRS_RI_L3_MCAST BIT(15) -#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) -#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 -#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) -#define MVPP2_PRS_RI_UDF3_MASK 0x300000 -#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) -#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 -#define MVPP2_PRS_RI_L4_TCP BIT(22) -#define MVPP2_PRS_RI_L4_UDP BIT(23) -#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) -#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 -#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) -#define MVPP2_PRS_RI_DROP_MASK 0x80000000 - -/* Sram additional info bits assignment */ -#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) -#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) -#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) -#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) -#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) -#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) -#define MVPP2_PRS_SINGLE_VLAN_AI 0 -#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) -#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) - -/* DSA/EDSA type */ -#define MVPP2_PRS_TAGGED true -#define MVPP2_PRS_UNTAGGED false -#define MVPP2_PRS_EDSA true -#define MVPP2_PRS_DSA false - -/* MAC entries, shadow udf */ -enum mvpp2_prs_udf { - MVPP2_PRS_UDF_MAC_DEF, - MVPP2_PRS_UDF_MAC_RANGE, - MVPP2_PRS_UDF_L2_DEF, - MVPP2_PRS_UDF_L2_DEF_COPY, - MVPP2_PRS_UDF_L2_USER, -}; - -/* Lookup ID */ -enum mvpp2_prs_lookup { - MVPP2_PRS_LU_MH, - MVPP2_PRS_LU_MAC, - MVPP2_PRS_LU_DSA, - MVPP2_PRS_LU_VLAN, - MVPP2_PRS_LU_VID, - MVPP2_PRS_LU_L2, - MVPP2_PRS_LU_PPPOE, - MVPP2_PRS_LU_IP4, - MVPP2_PRS_LU_IP6, - MVPP2_PRS_LU_FLOWS, - MVPP2_PRS_LU_LAST, -}; - -/* L2 cast enum */ -enum mvpp2_prs_l2_cast { - MVPP2_PRS_L2_UNI_CAST, - MVPP2_PRS_L2_MULTI_CAST, -}; - -/* L3 cast enum */ -enum mvpp2_prs_l3_cast { - MVPP2_PRS_L3_UNI_CAST, - MVPP2_PRS_L3_MULTI_CAST, - MVPP2_PRS_L3_BROAD_CAST -}; - -/* Classifier constants */ -#define MVPP2_CLS_FLOWS_TBL_SIZE 512 -#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 -#define MVPP2_CLS_LKP_TBL_SIZE 64 -#define MVPP2_CLS_RX_QUEUES 256 - -/* RSS constants */ -#define MVPP22_RSS_TABLE_ENTRIES 32 - -/* BM constants */ -#define MVPP2_BM_JUMBO_BUF_NUM 512 -#define MVPP2_BM_LONG_BUF_NUM 1024 -#define MVPP2_BM_SHORT_BUF_NUM 2048 -#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) -#define MVPP2_BM_POOL_PTR_ALIGN 128 - -/* BM cookie (32 bits) definition */ -#define MVPP2_BM_COOKIE_POOL_OFFS 8 -#define MVPP2_BM_COOKIE_CPU_OFFS 24 - -#define MVPP2_BM_SHORT_FRAME_SIZE 512 -#define MVPP2_BM_LONG_FRAME_SIZE 2048 -#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 -/* BM short pool packet size - * These value assure that for SWF the total number - * of bytes allocated for each buffer will be 512 - */ -#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) -#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) -#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) - -#define MVPP21_ADDR_SPACE_SZ 0 -#define MVPP22_ADDR_SPACE_SZ SZ_64K - -#define MVPP2_MAX_THREADS 8 -#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" enum mvpp2_bm_pool_log_num { MVPP2_BM_SHORT, @@ -893,466 +56,6 @@ static struct { int buf_num; } mvpp2_pools[MVPP2_BM_POOLS_NUM]; -/* GMAC MIB Counters register definitions */ -#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 -#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 -#define MVPP22_MIB_COUNTERS_OFFSET 0x0 -#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 - -#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 -#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 -#define MVPP2_MIB_CRC_ERRORS_SENT 0xc -#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 -#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 -#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c -#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 -#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 -#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 -#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c -#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 -#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 -#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 -#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 -#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 -#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c -#define MVPP2_MIB_FC_SENT 0x54 -#define MVPP2_MIB_FC_RCVD 0x58 -#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c -#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 -#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 -#define MVPP2_MIB_OVERSIZE_RCVD 0x68 -#define MVPP2_MIB_JABBER_RCVD 0x6c -#define MVPP2_MIB_MAC_RCV_ERROR 0x70 -#define MVPP2_MIB_BAD_CRC_EVENT 0x74 -#define MVPP2_MIB_COLLISION 0x78 -#define MVPP2_MIB_LATE_COLLISION 0x7c - -#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) - -#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) - -/* Definitions */ - -/* Shared Packet Processor resources */ -struct mvpp2 { - /* Shared registers' base addresses */ - void __iomem *lms_base; - void __iomem *iface_base; - - /* On PPv2.2, each "software thread" can access the base - * register through a separate address space, each 64 KB apart - * from each other. Typically, such address spaces will be - * used per CPU. - */ - void __iomem *swth_base[MVPP2_MAX_THREADS]; - - /* On PPv2.2, some port control registers are located into the system - * controller space. These registers are accessible through a regmap. - */ - struct regmap *sysctrl_base; - - /* Common clocks */ - struct clk *pp_clk; - struct clk *gop_clk; - struct clk *mg_clk; - struct clk *mg_core_clk; - struct clk *axi_clk; - - /* List of pointers to port structures */ - int port_count; - struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; - - /* Aggregated TXQs */ - struct mvpp2_tx_queue *aggr_txqs; - - /* BM pools */ - struct mvpp2_bm_pool *bm_pools; - - /* PRS shadow table */ - struct mvpp2_prs_shadow *prs_shadow; - /* PRS auxiliary table for double vlan entries control */ - bool *prs_double_vlans; - - /* Tclk value */ - u32 tclk; - - /* HW version */ - enum { MVPP21, MVPP22 } hw_version; - - /* Maximum number of RXQs per port */ - unsigned int max_port_rxqs; - - /* Workqueue to gather hardware statistics */ - char queue_name[30]; - struct workqueue_struct *stats_queue; -}; - -struct mvpp2_pcpu_stats { - struct u64_stats_sync syncp; - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; -}; - -/* Per-CPU port control */ -struct mvpp2_port_pcpu { - struct hrtimer tx_done_timer; - bool timer_scheduled; - /* Tasklet for egress finalization */ - struct tasklet_struct tx_done_tasklet; -}; - -struct mvpp2_queue_vector { - int irq; - struct napi_struct napi; - enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; - int sw_thread_id; - u16 sw_thread_mask; - int first_rxq; - int nrxqs; - u32 pending_cause_rx; - struct mvpp2_port *port; -}; - -struct mvpp2_port { - u8 id; - - /* Index of the port from the "group of ports" complex point - * of view - */ - int gop_id; - - int link_irq; - - struct mvpp2 *priv; - - /* Firmware node associated to the port */ - struct fwnode_handle *fwnode; - - /* Is a PHY always connected to the port */ - bool has_phy; - - /* Per-port registers' base address */ - void __iomem *base; - void __iomem *stats_base; - - struct mvpp2_rx_queue **rxqs; - unsigned int nrxqs; - struct mvpp2_tx_queue **txqs; - unsigned int ntxqs; - struct net_device *dev; - - int pkt_size; - - /* Per-CPU port control */ - struct mvpp2_port_pcpu __percpu *pcpu; - - /* Flags */ - unsigned long flags; - - u16 tx_ring_size; - u16 rx_ring_size; - struct mvpp2_pcpu_stats __percpu *stats; - u64 *ethtool_stats; - - /* Per-port work and its lock to gather hardware statistics */ - struct mutex gather_stats_lock; - struct delayed_work stats_work; - - struct device_node *of_node; - - phy_interface_t phy_interface; - struct phylink *phylink; - struct phy *comphy; - - struct mvpp2_bm_pool *pool_long; - struct mvpp2_bm_pool *pool_short; - - /* Index of first port's physical RXQ */ - u8 first_rxq; - - struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; - unsigned int nqvecs; - bool has_tx_irqs; - - u32 tx_time_coal; -}; - -/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the - * layout of the transmit and reception DMA descriptors, and their - * layout is therefore defined by the hardware design - */ - -#define MVPP2_TXD_L3_OFF_SHIFT 0 -#define MVPP2_TXD_IP_HLEN_SHIFT 8 -#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) -#define MVPP2_TXD_L4_CSUM_NOT BIT(14) -#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) -#define MVPP2_TXD_PADDING_DISABLE BIT(23) -#define MVPP2_TXD_L4_UDP BIT(24) -#define MVPP2_TXD_L3_IP6 BIT(26) -#define MVPP2_TXD_L_DESC BIT(28) -#define MVPP2_TXD_F_DESC BIT(29) - -#define MVPP2_RXD_ERR_SUMMARY BIT(15) -#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) -#define MVPP2_RXD_ERR_CRC 0x0 -#define MVPP2_RXD_ERR_OVERRUN BIT(13) -#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) -#define MVPP2_RXD_BM_POOL_ID_OFFS 16 -#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) -#define MVPP2_RXD_HWF_SYNC BIT(21) -#define MVPP2_RXD_L4_CSUM_OK BIT(22) -#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) -#define MVPP2_RXD_L4_TCP BIT(25) -#define MVPP2_RXD_L4_UDP BIT(26) -#define MVPP2_RXD_L3_IP4 BIT(28) -#define MVPP2_RXD_L3_IP6 BIT(30) -#define MVPP2_RXD_BUF_HDR BIT(31) - -/* HW TX descriptor for PPv2.1 */ -struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ - u8 packet_offset; /* the offset from the buffer beginning */ - u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ -}; - -/* HW RX descriptor for PPv2.1 */ -struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ - u8 reserved4; /* bm_qset (for future use, BM) */ - u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; -}; - -/* HW TX descriptor for PPv2.2 */ -struct mvpp22_tx_desc { - u32 command; - u8 packet_offset; - u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; -}; - -/* HW RX descriptor for PPv2.2 */ -struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; -}; - -/* Opaque type used by the driver to manipulate the HW TX and RX - * descriptors - */ -struct mvpp2_tx_desc { - union { - struct mvpp21_tx_desc pp21; - struct mvpp22_tx_desc pp22; - }; -}; - -struct mvpp2_rx_desc { - union { - struct mvpp21_rx_desc pp21; - struct mvpp22_rx_desc pp22; - }; -}; - -struct mvpp2_txq_pcpu_buf { - /* Transmitted SKB */ - struct sk_buff *skb; - - /* Physical address of transmitted buffer */ - dma_addr_t dma; - - /* Size transmitted */ - size_t size; -}; - -/* Per-CPU Tx queue control */ -struct mvpp2_txq_pcpu { - int cpu; - - /* Number of Tx DMA descriptors in the descriptor ring */ - int size; - - /* Number of currently used Tx DMA descriptor in the - * descriptor ring - */ - int count; - - int wake_threshold; - int stop_threshold; - - /* Number of Tx DMA descriptors reserved for each CPU */ - int reserved_num; - - /* Infos about transmitted buffers */ - struct mvpp2_txq_pcpu_buf *buffs; - - /* Index of last TX DMA descriptor that was inserted */ - int txq_put_index; - - /* Index of the TX DMA descriptor to be cleaned up */ - int txq_get_index; - - /* DMA buffer for TSO headers */ - char *tso_headers; - dma_addr_t tso_headers_dma; -}; - -struct mvpp2_tx_queue { - /* Physical number of this Tx queue */ - u8 id; - - /* Logical number of this Tx queue */ - u8 log_id; - - /* Number of Tx DMA descriptors in the descriptor ring */ - int size; - - /* Number of currently used Tx DMA descriptor in the descriptor ring */ - int count; - - /* Per-CPU control of physical Tx queues */ - struct mvpp2_txq_pcpu __percpu *pcpu; - - u32 done_pkts_coal; - - /* Virtual address of thex Tx DMA descriptors array */ - struct mvpp2_tx_desc *descs; - - /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_dma; - - /* Index of the last Tx DMA descriptor */ - int last_desc; - - /* Index of the next Tx DMA descriptor to process */ - int next_desc_to_proc; -}; - -struct mvpp2_rx_queue { - /* RX queue number, in the range 0-31 for physical RXQs */ - u8 id; - - /* Num of rx descriptors in the rx descriptor ring */ - int size; - - u32 pkts_coal; - u32 time_coal; - - /* Virtual address of the RX DMA descriptors array */ - struct mvpp2_rx_desc *descs; - - /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_dma; - - /* Index of the last RX DMA descriptor */ - int last_desc; - - /* Index of the next RX DMA descriptor to process */ - int next_desc_to_proc; - - /* ID of port to which physical RXQ is mapped */ - int port; - - /* Port's logic RXQ number to which physical RXQ is mapped */ - int logic_rxq; -}; - -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - -struct mvpp2_prs_entry { - u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; -}; - -struct mvpp2_prs_shadow { - bool valid; - bool finish; - - /* Lookup ID */ - int lu; - - /* User defined offset */ - int udf; - - /* Result info */ - u32 ri; - u32 ri_mask; -}; - -struct mvpp2_cls_flow_entry { - u32 index; - u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; -}; - -struct mvpp2_cls_lookup_entry { - u32 lkpid; - u32 way; - u32 data; -}; - -struct mvpp2_bm_pool { - /* Pool number in the range 0-7 */ - int id; - - /* Buffer Pointers Pool External (BPPE) size */ - int size; - /* BPPE size in bytes */ - int size_bytes; - /* Number of buffers for this pool */ - int buf_num; - /* Pool buffer size */ - int buf_size; - /* Packet size */ - int pkt_size; - int frag_size; - - /* BPPE virtual base address */ - u32 *virt_addr; - /* BPPE DMA base address */ - dma_addr_t dma_addr; - - /* Ports using BM pool */ - u32 port_map; -}; - -#define IS_TSO_HEADER(txq_pcpu, addr) \ - ((addr) >= (txq_pcpu)->tso_headers_dma && \ - (addr) < (txq_pcpu)->tso_headers_dma + \ - (txq_pcpu)->size * TSO_HEADER_SIZE) - /* The prototype is added here to be used in start_dev when using ACPI. This * will be removed once phylink is used for all modes (dt+ACPI). */ @@ -1368,22 +71,19 @@ static int queue_mode = MVPP2_QDIST_SINGLE_MODE; module_param(queue_mode, int, 0444); MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); -#define MVPP2_DRIVER_NAME "mvpp2" -#define MVPP2_DRIVER_VERSION "1.0" - /* Utility/helper methods */ -static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { writel(data, priv->swth_base[0] + offset); } -static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) +u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { return readl(priv->swth_base[0] + offset); } -static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } @@ -1423,19 +123,19 @@ static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ -static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, +void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data) { writel(data, priv->swth_base[cpu] + offset); } -static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, +u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset) { return readl(priv->swth_base[cpu] + offset); } -static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, +void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, u32 data) { writel_relaxed(data, priv->swth_base[cpu] + offset); @@ -1596,2551 +296,6 @@ static inline int mvpp2_txq_phys(int port, int txq) return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; } -/* Parser configuration routines */ - -/* Update parser tcam and sram hw entries */ -static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) -{ - int i; - - if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) - return -EINVAL; - - /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; - - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); - - /* Write sram index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); - - return 0; -} - -/* Initialize tcam entry from hw */ -static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, - struct mvpp2_prs_entry *pe, int tid) -{ - int i; - - if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) - return -EINVAL; - - memset(pe, 0, sizeof(*pe)); - pe->index = tid; - - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, - MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) - return MVPP2_PRS_TCAM_ENTRY_INVALID; - - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); - - /* Write sram index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); - - return 0; -} - -/* Invalidate tcam hw entry */ -static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) -{ - /* Write index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), - MVPP2_PRS_TCAM_INV_MASK); -} - -/* Enable shadow table entry and set its lookup ID */ -static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) -{ - priv->prs_shadow[index].valid = true; - priv->prs_shadow[index].lu = lu; -} - -/* Update ri fields in shadow table entry */ -static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, - unsigned int ri, unsigned int ri_mask) -{ - priv->prs_shadow[index].ri_mask = ri_mask; - priv->prs_shadow[index].ri = ri; -} - -/* Update lookup field in tcam sw entry */ -static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; -} - -/* Update mask for single port in tcam sw entry */ -static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, - unsigned int port, bool add) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); - else - pe->tcam.byte[enable_off] |= 1 << port; -} - -/* Update port map in tcam sw entry */ -static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, - unsigned int ports) -{ - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; -} - -/* Obtain port map from tcam sw entry */ -static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; -} - -/* Set byte of data and its enable bits in tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char byte, - unsigned char enable) -{ - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; -} - -/* Get byte of data and its enable bits from tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char *byte, - unsigned char *enable) -{ - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; -} - -/* Compare tcam data bytes with a pattern */ -static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, - u16 data) -{ - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); - u16 tcam_data; - - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; -} - -/* Update ai bits in tcam sw entry */ -static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int enable) -{ - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; - - for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { - if (!(enable & BIT(i))) - continue; - - if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; - else - pe->tcam.byte[ai_idx] &= ~(1 << i); - } - - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; -} - -/* Get ai bits from tcam sw entry */ -static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) -{ - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; -} - -/* Set ethertype in tcam sw entry */ -static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, - unsigned short ethertype) -{ - mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); - mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); -} - -/* Set vid in tcam sw entry */ -static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, - unsigned short vid) -{ - mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); - mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); -} - -/* Set bits in sram sw entry */ -static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) -{ - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); -} - -/* Clear bits in sram sw entry */ -static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) -{ - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); -} - -/* Update ri bits in sram sw entry */ -static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int mask) -{ - unsigned int i; - - for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - - if (!(mask & BIT(i))) - continue; - - if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); - else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); - - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); - } -} - -/* Obtain ri bits from sram sw entry */ -static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) -{ - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; -} - -/* Update ai bits in sram sw entry */ -static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int mask) -{ - unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; - - for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { - if (!(mask & BIT(i))) - continue; - - if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); - else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); - - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); - } -} - -/* Read ai bits from sram sw entry */ -static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) -{ - u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; - - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); - - return bits; -} - -/* In sram sw entry set lookup ID field of the tcam key to be used in the next - * lookup interation - */ -static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, - unsigned int lu) -{ - int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; - - mvpp2_prs_sram_bits_clear(pe, sram_next_off, - MVPP2_PRS_SRAM_NEXT_LU_MASK); - mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); -} - -/* In the sram sw entry set sign and value of the next lookup offset - * and the offset value generated to the classifier - */ -static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, - unsigned int op) -{ - /* Set sign */ - if (shift < 0) { - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); - shift = 0 - shift; - } else { - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); - } - - /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; - - /* Reset and set operation */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); - - /* Set base offset as current */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); -} - -/* In the sram sw entry set sign and value of the user defined offset - * generated to the classifier - */ -static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, - unsigned int type, int offset, - unsigned int op) -{ - /* Set sign */ - if (offset < 0) { - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); - offset = 0 - offset; - } else { - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); - } - - /* Set value */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, - MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - - /* Set offset type */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, - MVPP2_PRS_SRAM_UDF_TYPE_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); - - /* Set offset operation */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, - MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - /* Set base offset as current */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); -} - -/* Find parser flow entry */ -static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ - for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { - u8 bits; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - bits = mvpp2_prs_sram_ai_get(&pe); - - /* Sram store classification lookup ID in AI bits [5:0] */ - if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) - return tid; - } - - return -ENOENT; -} - -/* Return first free tcam index, seeking from start to end */ -static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, - unsigned char end) -{ - int tid; - - if (start > end) - swap(start, end); - - if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) - end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; - - for (tid = start; tid <= end; tid++) { - if (!priv->prs_shadow[tid].valid) - return tid; - } - - return -EINVAL; -} - -/* Enable/disable dropping all mac da's */ -static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) -{ - struct mvpp2_prs_entry pe; - - if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = MVPP2_PE_DROP_ALL; - - /* Non-promiscuous mode for all ports - DROP unknown packets */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set port to unicast or multicast promiscuous mode */ -static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) -{ - struct mvpp2_prs_entry pe; - unsigned char cast_match; - unsigned int ri; - int tid; - - if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { - cast_match = MVPP2_PRS_UCAST_VAL; - tid = MVPP2_PE_MAC_UC_PROMISCUOUS; - ri = MVPP2_PRS_RI_L2_UCAST; - } else { - cast_match = MVPP2_PRS_MCAST_VAL; - tid = MVPP2_PE_MAC_MC_PROMISCUOUS; - ri = MVPP2_PRS_RI_L2_MCAST; - } - - /* promiscuous mode - Accept unknown unicast or multicast packets */ - if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = tid; - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); - - /* Match UC or MC addresses */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, - MVPP2_PRS_CAST_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set entry for dsa packets */ -static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, - bool tagged, bool extend) -{ - struct mvpp2_prs_entry pe; - int tid, shift; - - if (extend) { - tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; - shift = 8; - } else { - tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; - shift = 4; - } - - if (priv->prs_shadow[tid].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = tid; - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); - - if (tagged) { - /* Set tagged bit in DSA tag */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - - /* Set ai bits for next iteration */ - if (extend) - mvpp2_prs_sram_ai_update(&pe, 1, - MVPP2_PRS_SRAM_AI_MASK); - else - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - - /* Set result info bits to 'single vlan' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, - MVPP2_PRS_RI_VLAN_MASK); - /* If packet is tagged continue check vid filtering */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - } else { - /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ - mvpp2_prs_sram_shift_set(&pe, shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Set result info bits to 'no vlans' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - } - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set entry for dsa ethertype */ -static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, - bool add, bool tagged, bool extend) -{ - struct mvpp2_prs_entry pe; - int tid, shift, port_mask; - - if (extend) { - tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : - MVPP2_PE_ETYPE_EDSA_UNTAGGED; - port_mask = 0; - shift = 8; - } else { - tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : - MVPP2_PE_ETYPE_DSA_UNTAGGED; - port_mask = MVPP2_PRS_PORT_MASK; - shift = 4; - } - - if (priv->prs_shadow[tid].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = tid; - - /* Set ethertype */ - mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); - mvpp2_prs_match_etype(&pe, 2, 0); - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, - MVPP2_PRS_RI_DSA_MASK); - /* Shift ethertype + 2 byte reserved + tag*/ - mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); - - if (tagged) { - /* Set tagged bit in DSA tag */ - mvpp2_prs_tcam_data_byte_set(&pe, - MVPP2_ETH_TYPE_LEN + 2 + 3, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - /* If packet is tagged continue check vlans */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - } else { - /* Set result info bits to 'no vlans' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - } - /* Mask/unmask all ports, depending on dsa type */ - mvpp2_prs_tcam_port_map_set(&pe, port_mask); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Search for existing single/triple vlan entry */ -static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { - unsigned int ri_bits, ai_bits; - bool match; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); - if (!match) - continue; - - /* Get vlan type */ - ri_bits = mvpp2_prs_sram_ri_get(&pe); - ri_bits &= MVPP2_PRS_RI_VLAN_MASK; - - /* Get current ai value from tcam */ - ai_bits = mvpp2_prs_tcam_ai_get(&pe); - /* Clear double vlan bit */ - ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; - - if (ai != ai_bits) - continue; - - if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || - ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - return tid; - } - - return -ENOENT; -} - -/* Add/update single/triple vlan entry */ -static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, - unsigned int port_map) -{ - struct mvpp2_prs_entry pe; - int tid_aux, tid; - int ret = 0; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_vlan_find(priv, tpid, ai); - - if (tid < 0) { - /* Create new tcam entry */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, - MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) - return tid; - - /* Get last double vlan tid */ - for (tid_aux = MVPP2_PE_LAST_FREE_TID; - tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { - unsigned int ri_bits; - - if (!priv->prs_shadow[tid_aux].valid || - priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); - ri_bits = mvpp2_prs_sram_ri_get(&pe); - if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == - MVPP2_PRS_RI_VLAN_DOUBLE) - break; - } - - if (tid <= tid_aux) - return -EINVAL; - - memset(&pe, 0, sizeof(pe)); - pe.index = tid; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - - mvpp2_prs_match_etype(&pe, 0, tpid); - - /* VLAN tag detected, proceed with VID filtering */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, - MVPP2_PRS_RI_VLAN_MASK); - } else { - ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, - MVPP2_PRS_RI_VLAN_MASK); - } - mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(&pe, port_map); - - mvpp2_prs_hw_write(priv, &pe); - - return ret; -} - -/* Get first free double vlan ai number */ -static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) -{ - int i; - - for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { - if (!priv->prs_double_vlans[i]) - return i; - } - - return -EINVAL; -} - -/* Search for existing double vlan entry */ -static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, - unsigned short tpid2) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { - unsigned int ri_mask; - bool match; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); - - if (!match) - continue; - - ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; - if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) - return tid; - } - - return -ENOENT; -} - -/* Add or update double vlan entry */ -static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, - unsigned short tpid2, - unsigned int port_map) -{ - int tid_aux, tid, ai, ret = 0; - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); - - if (tid < 0) { - /* Create new tcam entry */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - /* Set ai value for new double vlan entry */ - ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) - return ai; - - /* Get first single/triple vlan tid */ - for (tid_aux = MVPP2_PE_FIRST_FREE_TID; - tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { - unsigned int ri_bits; - - if (!priv->prs_shadow[tid_aux].valid || - priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); - ri_bits = mvpp2_prs_sram_ri_get(&pe); - ri_bits &= MVPP2_PRS_RI_VLAN_MASK; - if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || - ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - break; - } - - if (tid >= tid_aux) - return -ERANGE; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = tid; - - priv->prs_double_vlans[ai] = true; - - mvpp2_prs_match_etype(&pe, 0, tpid1); - mvpp2_prs_match_etype(&pe, 4, tpid2); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - /* Shift 4 bytes - skip outer vlan tag */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, - MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(&pe, port_map); - mvpp2_prs_hw_write(priv, &pe); - - return ret; -} - -/* IPv4 header parsing for fragmentation and L4 offset */ -static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, - unsigned int ri, unsigned int ri_mask) -{ - struct mvpp2_prs_entry pe; - int tid; - - if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && - (proto != IPPROTO_IGMP)) - return -EINVAL; - - /* Not fragmented packet */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = tid; - - /* Set next lu to IPv4 */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, - MVPP2_PRS_TCAM_PROTO_MASK_L); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, - MVPP2_PRS_TCAM_PROTO_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Fragmented packet */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - - mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, - ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* IPv4 L3 multicast or broadcast */ -static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) -{ - struct mvpp2_prs_entry pe; - int mask, tid; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = tid; - - switch (l3_cast) { - case MVPP2_PRS_L3_MULTI_CAST: - mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, - MVPP2_PRS_IPV4_MC_MASK); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - break; - case MVPP2_PRS_L3_BROAD_CAST: - mask = MVPP2_PRS_IPV4_BC_MASK; - mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - break; - default: - return -EINVAL; - } - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Set entries for protocols over IPv6 */ -static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, - unsigned int ri, unsigned int ri_mask) -{ - struct mvpp2_prs_entry pe; - int tid; - - if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && - (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) - return -EINVAL; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct ipv6hdr) - 6, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Write HW */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* IPv6 L3 multicast entry */ -static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) -{ - struct mvpp2_prs_entry pe; - int tid; - - if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) - return -EINVAL; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Shift back to IPv6 NH */ - mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, - MVPP2_PRS_IPV6_MC_MASK); - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Parser per-port initialization */ -static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, - int lu_max, int offset) -{ - u32 val; - - /* Set lookup ID */ - val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); - val &= ~MVPP2_PRS_PORT_LU_MASK(port); - val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); - mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); - - /* Set maximum number of loops for packet received from port */ - val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); - val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); - val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); - mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); - - /* Set initial offset for packet header extraction for the first - * searching loop - */ - val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); - val &= ~MVPP2_PRS_INIT_OFF_MASK(port); - val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); - mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); -} - -/* Default flow entries initialization for all ports */ -static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int port; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Set flow ID*/ - mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_hw_write(priv, &pe); - } -} - -/* Set default entry for Marvell Header field */ -static void mvpp2_prs_mh_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - pe.index = MVPP2_PE_MH_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); - mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set default entires (place holder) for promiscuous, non-promiscuous and - * multicast MAC addresses - */ -static void mvpp2_prs_mac_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - /* Non-promiscuous mode for all ports - DROP unknown packets */ - pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, &pe); - - /* Create dummy entries for drop all and promiscuous modes */ - mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); -} - -/* Set default entries for various types of dsa packets */ -static void mvpp2_prs_dsa_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - /* None tagged EDSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, - MVPP2_PRS_EDSA); - - /* Tagged EDSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - - /* None tagged DSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, - MVPP2_PRS_DSA); - - /* Tagged DSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - - /* None tagged EDSA ethertype entry - place holder*/ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - - /* Tagged EDSA ethertype entry - place holder*/ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - - /* None tagged DSA ethertype entry */ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - - /* Tagged DSA ethertype entry */ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - - /* Set default entry, in case DSA or EDSA tag not found */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = MVPP2_PE_DSA_DEFAULT; - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - - /* Shift 0 bytes */ - mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - - /* Clear all sram ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Initialize parser entries for VID filtering */ -static void mvpp2_prs_vid_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - /* Set default vid entry */ - pe.index = MVPP2_PE_VID_FLTR_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); - - /* Skip VLAN header - Set offset to 4 bytes */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); - - /* Set default vid entry for extended DSA*/ - memset(&pe, 0, sizeof(pe)); - - /* Set default vid entry */ - pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, - MVPP2_PRS_EDSA_VID_AI_BIT); - - /* Skip VLAN header - Set offset to 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Match basic ethertypes */ -static int mvpp2_prs_etype_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Ethertype: PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); - - mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, - MVPP2_PRS_RI_PPPOE_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, - MVPP2_PRS_RI_PPPOE_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: ARP */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: LBTD */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IP header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD, - MVPP2_PRS_IPV4_HEAD_MASK); - - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv6 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); - - /* Skip DIP of IPV6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + - MVPP2_MAX_L3_ADDR_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = MVPP2_PE_ETH_TYPE_UN; - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset even it's unknown L3 */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Configure vlan entries and detect up to 2 successive VLAN tags. - * Possible options: - * 0x8100, 0x88A8 - * 0x8100, 0x8100 - * 0x8100 - * 0x88A8 - */ -static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int err; - - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - - /* Double VLAN: 0x8100, 0x88A8 */ - err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Double VLAN: 0x8100, 0x8100 */ - err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Single VLAN: 0x88a8 */ - err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Single VLAN: 0x8100 */ - err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Set default double vlan entry */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = MVPP2_PE_VLAN_DBL; - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Clear ai for next iterations */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, - MVPP2_PRS_RI_VLAN_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, - MVPP2_PRS_DBL_VLAN_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - mvpp2_prs_hw_write(priv, &pe); - - /* Set default vlan none entry */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = MVPP2_PE_VLAN_NONE; - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Set entries for PPPoE ethertype */ -static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* IPv4 over PPPoE with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, PPP_IP); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IP header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* IPv4 over PPPoE without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* IPv6 over PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IPv6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* Non-IP over PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L3 offset even if it's unknown L3 */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Initialize entries for IPv4 */ -static int mvpp2_prs_ip4_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int err; - - /* Set entries for TCP, UDP and IGMP over IPv4 */ - err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, - MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - if (err) - return err; - - /* IPv4 Broadcast */ - err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); - if (err) - return err; - - /* IPv4 Multicast */ - err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); - if (err) - return err; - - /* Default IPv4 entry for unknown protocols */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = MVPP2_PE_IP4_PROTO_UN; - - /* Set next lu to IPv4 */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv4 entry for unicast address */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = MVPP2_PE_IP4_ADDR_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Initialize entries for IPv6 */ -static int mvpp2_prs_ip6_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid, err; - - /* Set entries for TCP, UDP and ICMP over IPv6 */ - err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, - MVPP2_PRS_RI_L4_TCP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, - MVPP2_PRS_RI_L4_UDP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, - MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - if (err) - return err; - - /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ - /* Result Info: UDF7=1, DS lite */ - err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, - MVPP2_PRS_RI_UDF7_IP6_LITE, - MVPP2_PRS_RI_UDF7_MASK); - if (err) - return err; - - /* IPv6 multicast */ - err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); - if (err) - return err; - - /* Entry for checking hop limit */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | - MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_L3_PROTO_MASK | - MVPP2_PRS_RI_DROP_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unknown protocols */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_PROTO_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - /* Set L4 offset relatively to our current place */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct ipv6hdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unknown ext protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, - MVPP2_PRS_IPV6_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unicast address */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_ADDR_UN; - - /* Finished: go to IPv6 again */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Shift back to IPV6 NH */ - mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Find tcam entry with matched pair <vid,port> */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) -{ - unsigned char byte[2], enable[2]; - struct mvpp2_prs_entry pe; - u16 rvid, rmask; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); - mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); - - rvid = ((byte[0] & 0xf) << 8) + byte[1]; - rmask = ((enable[0] & 0xf) << 8) + enable[1]; - - if (rvid != vid || rmask != mask) - continue; - - return tid; - } - - return -ENOENT; -} - -/* Write parser entry for VID filtering */ -static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) -{ - unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + - port->id * MVPP2_PRS_VLAN_FILT_MAX; - unsigned int mask = 0xfff, reg_val, shift; - struct mvpp2 *priv = port->priv; - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); - - reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); - if (reg_val & MVPP2_DSA_EXTENDED) - shift = MVPP2_VLAN_TAG_EDSA_LEN; - else - shift = MVPP2_VLAN_TAG_LEN; - - /* No such entry */ - if (tid < 0) { - - /* Go through all entries from first to last in vlan range */ - tid = mvpp2_prs_tcam_first_free(priv, vid_start, - vid_start + - MVPP2_PRS_VLAN_FILT_MAX_ENTRY); - - /* There isn't room for a new VID filter */ - if (tid < 0) - return tid; - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - pe.index = tid; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - /* Enable the current port */ - mvpp2_prs_tcam_port_set(&pe, port->id, true); - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Skip VLAN header - Set offset to 4 or 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Set match on VID */ - mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Write parser entry for VID filtering */ -static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) -{ - struct mvpp2 *priv = port->priv; - int tid; - - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); - - /* No such entry */ - if (tid < 0) - return; - - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; -} - -/* Remove all existing VID filters on this port */ -static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - int tid; - - for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); - tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); - } -} - -/* Remove VID filering entry for this port */ -static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) -{ - unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); - struct mvpp2 *priv = port->priv; - - /* Invalidate the guard entry */ - mvpp2_prs_hw_inv(priv, tid); - - priv->prs_shadow[tid].valid = false; -} - -/* Add guard entry that drops packets when no VID is matched on this port */ -static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) -{ - unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); - struct mvpp2 *priv = port->priv; - unsigned int reg_val, shift; - struct mvpp2_prs_entry pe; - - if (priv->prs_shadow[tid].valid) - return; - - memset(&pe, 0, sizeof(pe)); - - pe.index = tid; - - reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); - if (reg_val & MVPP2_DSA_EXTENDED) - shift = MVPP2_VLAN_TAG_EDSA_LEN; - else - shift = MVPP2_VLAN_TAG_LEN; - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port->id, true); - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Skip VLAN header - Set offset to 4 or 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Drop VLAN packets that don't belong to any VIDs on this port */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Parser default initialization */ -static int mvpp2_prs_default_init(struct platform_device *pdev, - struct mvpp2 *priv) -{ - int err, index, i; - - /* Enable tcam table */ - mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); - - /* Clear all tcam and sram entries */ - for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); - - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); - } - - /* Invalidate all tcam entries */ - for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) - mvpp2_prs_hw_inv(priv, index); - - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - - /* Always start from lookup = 0 */ - for (index = 0; index < MVPP2_MAX_PORTS; index++) - mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, - MVPP2_PRS_PORT_LU_MAX, 0); - - mvpp2_prs_def_flow_init(priv); - - mvpp2_prs_mh_init(priv); - - mvpp2_prs_mac_init(priv); - - mvpp2_prs_dsa_init(priv); - - mvpp2_prs_vid_init(priv); - - err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; - - return 0; -} - -/* Compare MAC DA with tcam entry data */ -static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, - const u8 *da, unsigned char *mask) -{ - unsigned char tcam_byte, tcam_mask; - int index; - - for (index = 0; index < ETH_ALEN; index++) { - mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); - if (tcam_mask != mask[index]) - return false; - - if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) - return false; - } - - return true; -} - -/* Find tcam entry with matched pair <MAC DA, port> */ -static int -mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, - unsigned char *mask, int udf_type) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entires with MVPP2_PRS_LU_MAC */ - for (tid = MVPP2_PE_MAC_RANGE_START; - tid <= MVPP2_PE_MAC_RANGE_END; tid++) { - unsigned int entry_pmap; - - if (!priv->prs_shadow[tid].valid || - (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || - (priv->prs_shadow[tid].udf != udf_type)) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); - - if (mvpp2_prs_mac_range_equals(&pe, da, mask) && - entry_pmap == pmap) - return tid; - } - - return -ENOENT; -} - -/* Update parser's mac da entry */ -static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, - bool add) -{ - unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - struct mvpp2 *priv = port->priv; - unsigned int pmap, len, ri; - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ - tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, - MVPP2_PRS_UDF_MAC_DEF); - - /* No such entry */ - if (tid < 0) { - if (!add) - return 0; - - /* Create new TCAM entry */ - /* Go through the all entries from first to last */ - tid = mvpp2_prs_tcam_first_free(priv, - MVPP2_PE_MAC_RANGE_START, - MVPP2_PE_MAC_RANGE_END); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port->id, add); - - /* Invalidate the entry if no ports are left enabled */ - pmap = mvpp2_prs_tcam_port_map_get(&pe); - if (pmap == 0) { - if (add) - return -EINVAL; - - mvpp2_prs_hw_inv(priv, pe.index); - priv->prs_shadow[pe.index].valid = false; - return 0; - } - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set match on DA */ - len = ETH_ALEN; - while (len--) - mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); - - /* Set result info bits */ - if (is_broadcast_ether_addr(da)) { - ri = MVPP2_PRS_RI_L2_BCAST; - } else if (is_multicast_ether_addr(da)) { - ri = MVPP2_PRS_RI_L2_MCAST; - } else { - ri = MVPP2_PRS_RI_L2_UCAST; - - if (ether_addr_equal(da, port->dev->dev_addr)) - ri |= MVPP2_PRS_RI_MAC_ME_MASK; - } - - mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | - MVPP2_PRS_RI_MAC_ME_MASK); - mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | - MVPP2_PRS_RI_MAC_ME_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Update shadow table and hw entry */ - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) -{ - struct mvpp2_port *port = netdev_priv(dev); - int err; - - /* Remove old parser entry */ - err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); - if (err) - return err; - - /* Add new parser entry */ - err = mvpp2_prs_mac_da_accept(port, da, true); - if (err) - return err; - - /* Set addr in the device */ - ether_addr_copy(dev->dev_addr, da); - - return 0; -} - -static void mvpp2_prs_mac_del_all(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - struct mvpp2_prs_entry pe; - unsigned long pmap; - int index, tid; - - for (tid = MVPP2_PE_MAC_RANGE_START; - tid <= MVPP2_PE_MAC_RANGE_END; tid++) { - unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; - - if (!priv->prs_shadow[tid].valid || - (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || - (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - pmap = mvpp2_prs_tcam_port_map_get(&pe); - - /* We only want entries active on this port */ - if (!test_bit(port->id, &pmap)) - continue; - - /* Read mac addr from entry */ - for (index = 0; index < ETH_ALEN; index++) - mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], - &da_mask[index]); - - /* Special cases : Don't remove broadcast and port's own - * address - */ - if (is_broadcast_ether_addr(da) || - ether_addr_equal(da, port->dev->dev_addr)) - continue; - - /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); - } -} - -static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) -{ - switch (type) { - case MVPP2_TAG_TYPE_EDSA: - /* Add port to EDSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - /* Remove port from DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - break; - - case MVPP2_TAG_TYPE_DSA: - /* Add port to DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - /* Remove port from EDSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - break; - - case MVPP2_TAG_TYPE_MH: - case MVPP2_TAG_TYPE_NONE: - /* Remove port form EDSA and DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - break; - - default: - if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) - return -EINVAL; - } - - return 0; -} - -/* Set prs flow for the port */ -static int mvpp2_prs_def_flow(struct mvpp2_port *port) -{ - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_flow_find(port->priv, port->id); - - /* Such entry not exist */ - if (tid < 0) { - /* Go through the all entires from last to first */ - tid = mvpp2_prs_tcam_first_free(port->priv, - MVPP2_PE_LAST_FREE_TID, - MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Set flow ID*/ - mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); - - /* Update shadow table */ - mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); - } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); - } - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); - mvpp2_prs_hw_write(port->priv, &pe); - - return 0; -} - -/* Classifier configuration routines */ - -/* Update classification flow table registers */ -static void mvpp2_cls_flow_write(struct mvpp2 *priv, - struct mvpp2_cls_flow_entry *fe) -{ - mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); -} - -/* Update classification lookup table register */ -static void mvpp2_cls_lookup_write(struct mvpp2 *priv, - struct mvpp2_cls_lookup_entry *le) -{ - u32 val; - - val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; - mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); - mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); -} - -/* Classifier default initialization */ -static void mvpp2_cls_init(struct mvpp2 *priv) -{ - struct mvpp2_cls_lookup_entry le; - struct mvpp2_cls_flow_entry fe; - int index; - - /* Enable classifier */ - mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); - - /* Clear classifier flow table */ - memset(&fe.data, 0, sizeof(fe.data)); - for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { - fe.index = index; - mvpp2_cls_flow_write(priv, &fe); - } - - /* Clear classifier lookup table */ - le.data = 0; - for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { - le.lkpid = index; - le.way = 0; - mvpp2_cls_lookup_write(priv, &le); - - le.way = 1; - mvpp2_cls_lookup_write(priv, &le); - } -} - -static void mvpp2_cls_port_config(struct mvpp2_port *port) -{ - struct mvpp2_cls_lookup_entry le; - u32 val; - - /* Set way for the port */ - val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); - val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); - mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); - - /* Pick the entry to be accessed in lookup ID decoding table - * according to the way and lkpid. - */ - le.lkpid = port->id; - le.way = 0; - le.data = 0; - - /* Set initial CPU queue for receiving packets */ - le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; - le.data |= port->first_rxq; - - /* Disable classification engines */ - le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; - - /* Update lookup ID table entry */ - mvpp2_cls_lookup_write(port->priv, &le); -} - -/* Set CPU queue number for oversize packets */ -static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) -{ - u32 val; - - mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), - port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); - - mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), - (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); - - val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); - mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); -} - static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) { if (likely(pool->frag_size <= PAGE_SIZE)) @@ -7118,39 +3273,6 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) } } -static void mvpp22_init_rss(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - int i; - - /* Set the table width: replace the whole classifier Rx queue number - * with the ones configured in RSS table entries. - */ - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); - mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); - - /* Loop through the classifier Rx Queues and map them to a RSS table. - * Map them all to the first table (0) by default. - */ - for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RSS_TABLE, - MVPP22_RSS_TABLE_POINTER(0)); - } - - /* Configure the first table to evenly distribute the packets across - * real Rx Queues. The table entries map a hash to an port Rx Queue. - */ - for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(0) | - MVPP22_RSS_INDEX_TABLE_ENTRY(i); - mvpp2_write(priv, MVPP22_RSS_INDEX, sel); - - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); - } - -} - static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c new file mode 100644 index 000000000000..6bb69f086794 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -0,0 +1,2467 @@ +/* + * Header Parser helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <uapi/linux/ppp_defs.h> +#include <net/ip.h> +#include <net/ipv6.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" + +/* Update parser tcam and sram hw entries */ +static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + int i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + /* Clear entry invalidation bit */ + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + + return 0; +} + +/* Initialize tcam entry from hw */ +static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) +{ + int i; + + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + memset(pe, 0, sizeof(*pe)); + pe->index = tid; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); + if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + return MVPP2_PRS_TCAM_ENTRY_INVALID; + + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + + return 0; +} + +/* Invalidate tcam hw entry */ +static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) +{ + /* Write index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), + MVPP2_PRS_TCAM_INV_MASK); +} + +/* Enable shadow table entry and set its lookup ID */ +static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) +{ + priv->prs_shadow[index].valid = true; + priv->prs_shadow[index].lu = lu; +} + +/* Update ri fields in shadow table entry */ +static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, + unsigned int ri, unsigned int ri_mask) +{ + priv->prs_shadow[index].ri_mask = ri_mask; + priv->prs_shadow[index].ri = ri; +} + +/* Update lookup field in tcam sw entry */ +static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; + pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; +} + +/* Update mask for single port in tcam sw entry */ +static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, + unsigned int port, bool add) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + if (add) + pe->tcam.byte[enable_off] &= ~(1 << port); + else + pe->tcam.byte[enable_off] |= 1 << port; +} + +/* Update port map in tcam sw entry */ +static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, + unsigned int ports) +{ + unsigned char port_mask = MVPP2_PRS_PORT_MASK; + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; + pe->tcam.byte[enable_off] &= ~port_mask; + pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; +} + +/* Obtain port map from tcam sw entry */ +static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; +} + +/* Set byte of data and its enable bits in tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char byte, + unsigned char enable) +{ + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; +} + +/* Get byte of data and its enable bits from tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) +{ + *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; + *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; +} + +/* Compare tcam data bytes with a pattern */ +static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, + u16 data) +{ + int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); + u16 tcam_data; + + tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; + if (tcam_data != data) + return false; + return true; +} + +/* Update ai bits in tcam sw entry */ +static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int enable) +{ + int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + + for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { + if (!(enable & BIT(i))) + continue; + + if (bits & BIT(i)) + pe->tcam.byte[ai_idx] |= 1 << i; + else + pe->tcam.byte[ai_idx] &= ~(1 << i); + } + + pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; +} + +/* Get ai bits from tcam sw entry */ +static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) +{ + return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; +} + +/* Set ethertype in tcam sw entry */ +static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, + unsigned short ethertype) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); +} + +/* Set vid in tcam sw entry */ +static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, + unsigned short vid) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); +} + +/* Set bits in sram sw entry */ +static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); +} + +/* Clear bits in sram sw entry */ +static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); +} + +/* Update ri bits in sram sw entry */ +static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + + for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { + int ri_off = MVPP2_PRS_SRAM_RI_OFFS; + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); + } +} + +/* Obtain ri bits from sram sw entry */ +static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) +{ + return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; +} + +/* Update ai bits in sram sw entry */ +static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + int ai_off = MVPP2_PRS_SRAM_AI_OFFS; + + for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); + } +} + +/* Read ai bits from sram sw entry */ +static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) +{ + u8 bits; + int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); + int ai_en_off = ai_off + 1; + int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + + bits = (pe->sram.byte[ai_off] >> ai_shift) | + (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + + return bits; +} + +/* In sram sw entry set lookup ID field of the tcam key to be used in the next + * lookup interation + */ +static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, + unsigned int lu) +{ + int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; + + mvpp2_prs_sram_bits_clear(pe, sram_next_off, + MVPP2_PRS_SRAM_NEXT_LU_MASK); + mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); +} + +/* In the sram sw entry set sign and value of the next lookup offset + * and the offset value generated to the classifier + */ +static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, + unsigned int op) +{ + /* Set sign */ + if (shift < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + shift = 0 - shift; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + } + + /* Set value */ + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = + (unsigned char)shift; + + /* Reset and set operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* In the sram sw entry set sign and value of the user defined offset + * generated to the classifier + */ +static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, + unsigned int type, int offset, + unsigned int op) +{ + /* Set sign */ + if (offset < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + offset = 0 - offset; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + } + + /* Set value */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, + MVPP2_PRS_SRAM_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] |= + (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + + /* Set offset type */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, + MVPP2_PRS_SRAM_UDF_TYPE_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); + + /* Set offset operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> + (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= + (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* Find parser flow entry */ +static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ + for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { + u8 bits; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + bits = mvpp2_prs_sram_ai_get(&pe); + + /* Sram store classification lookup ID in AI bits [5:0] */ + if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) + return tid; + } + + return -ENOENT; +} + +/* Return first free tcam index, seeking from start to end */ +static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, + unsigned char end) +{ + int tid; + + if (start > end) + swap(start, end); + + if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) + end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; + + for (tid = start; tid <= end; tid++) { + if (!priv->prs_shadow[tid].valid) + return tid; + } + + return -EINVAL; +} + +/* Enable/disable dropping all mac da's */ +static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) +{ + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_DROP_ALL; + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set port to unicast or multicast promiscuous mode */ +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + struct mvpp2_prs_entry pe; + unsigned char cast_match; + unsigned int ri; + int tid; + + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { + cast_match = MVPP2_PRS_UCAST_VAL; + tid = MVPP2_PE_MAC_UC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_UCAST; + } else { + cast_match = MVPP2_PRS_MCAST_VAL; + tid = MVPP2_PE_MAC_MC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_MCAST; + } + + /* promiscuous mode - Accept unknown unicast or multicast packets */ + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = tid; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); + + /* Match UC or MC addresses */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, + MVPP2_PRS_CAST_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa packets */ +static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, + bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift; + + if (extend) { + tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + + /* Set ai bits for next iteration */ + if (extend) + mvpp2_prs_sram_ai_update(&pe, 1, + MVPP2_PRS_SRAM_AI_MASK); + else + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + + /* Set result info bits to 'single vlan' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + /* If packet is tagged continue check vid filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + } else { + /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa ethertype */ +static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, + bool add, bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift, port_mask; + + if (extend) { + tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : + MVPP2_PE_ETYPE_EDSA_UNTAGGED; + port_mask = 0; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : + MVPP2_PE_ETYPE_DSA_UNTAGGED; + port_mask = MVPP2_PRS_PORT_MASK; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Set ethertype */ + mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); + mvpp2_prs_match_etype(&pe, 2, 0); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, + MVPP2_PRS_RI_DSA_MASK); + /* Shift ethertype + 2 byte reserved + tag*/ + mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, + MVPP2_ETH_TYPE_LEN + 2 + 3, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + /* Mask/unmask all ports, depending on dsa type */ + mvpp2_prs_tcam_port_map_set(&pe, port_mask); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Search for existing single/triple vlan entry */ +static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_bits, ai_bits; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + if (!match) + continue; + + /* Get vlan type */ + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + + /* Get current ai value from tcam */ + ai_bits = mvpp2_prs_tcam_ai_get(&pe); + /* Clear double vlan bit */ + ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; + + if (ai != ai_bits) + continue; + + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + return tid; + } + + return -ENOENT; +} + +/* Add/update single/triple vlan entry */ +static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, + unsigned int port_map) +{ + struct mvpp2_prs_entry pe; + int tid_aux, tid; + int ret = 0; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_vlan_find(priv, tpid, ai); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + /* Get last double vlan tid */ + for (tid_aux = MVPP2_PE_LAST_FREE_TID; + tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == + MVPP2_PRS_RI_VLAN_DOUBLE) + break; + } + + if (tid <= tid_aux) + return -EINVAL; + + memset(&pe, 0, sizeof(pe)); + pe.index = tid; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + mvpp2_prs_match_etype(&pe, 0, tpid); + + /* VLAN tag detected, proceed with VID filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + } else { + ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, + MVPP2_PRS_RI_VLAN_MASK); + } + mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* Get first free double vlan ai number */ +static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) +{ + int i; + + for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { + if (!priv->prs_double_vlans[i]) + return i; + } + + return -EINVAL; +} + +/* Search for existing double vlan entry */ +static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_mask; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && + mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + + if (!match) + continue; + + ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; + if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) + return tid; + } + + return -ENOENT; +} + +/* Add or update double vlan entry */ +static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2, + unsigned int port_map) +{ + int tid_aux, tid, ai, ret = 0; + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + /* Set ai value for new double vlan entry */ + ai = mvpp2_prs_double_vlan_ai_free_get(priv); + if (ai < 0) + return ai; + + /* Get first single/triple vlan tid */ + for (tid_aux = MVPP2_PE_FIRST_FREE_TID; + tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + break; + } + + if (tid >= tid_aux) + return -ERANGE; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = tid; + + priv->prs_double_vlans[ai] = true; + + mvpp2_prs_match_etype(&pe, 0, tpid1); + mvpp2_prs_match_etype(&pe, 4, tpid2); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + /* Shift 4 bytes - skip outer vlan tag */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* IPv4 header parsing for fragmentation and L4 offset */ +static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_IGMP)) + return -EINVAL; + + /* Not fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv4 L3 multicast or broadcast */ +static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int mask, tid; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + switch (l3_cast) { + case MVPP2_PRS_L3_MULTI_CAST: + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, + MVPP2_PRS_IPV4_MC_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + case MVPP2_PRS_L3_BROAD_CAST: + mask = MVPP2_PRS_IPV4_BC_MASK; + mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + default: + return -EINVAL; + } + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for protocols over IPv6 */ +static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 6, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Write HW */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv6 L3 multicast entry */ +static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int tid; + + if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPv6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, + MVPP2_PRS_IPV6_MC_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser per-port initialization */ +static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, + int lu_max, int offset) +{ + u32 val; + + /* Set lookup ID */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); + val &= ~MVPP2_PRS_PORT_LU_MASK(port); + val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); + mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); + + /* Set maximum number of loops for packet received from port */ + val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); + val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); + val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); + mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); + + /* Set initial offset for packet header extraction for the first + * searching loop + */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); + val &= ~MVPP2_PRS_INIT_OFF_MASK(port); + val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); + mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); +} + +/* Default flow entries initialization for all ports */ +static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_hw_write(priv, &pe); + } +} + +/* Set default entry for Marvell Header field */ +static void mvpp2_prs_mh_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + pe.index = MVPP2_PE_MH_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); + mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set default entires (place holder) for promiscuous, non-promiscuous and + * multicast MAC addresses + */ +static void mvpp2_prs_mac_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + /* Create dummy entries for drop all and promiscuous modes */ + mvpp2_prs_mac_drop_all_set(priv, 0, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); +} + +/* Set default entries for various types of dsa packets */ +static void mvpp2_prs_dsa_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + /* None tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_EDSA); + + /* Tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_DSA); + + /* Tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* None tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + + /* Tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + + /* Tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* Set default entry, in case DSA or EDSA tag not found */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = MVPP2_PE_DSA_DEFAULT; + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + /* Shift 0 bytes */ + mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Clear all sram ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Initialize parser entries for VID filtering */ +static void mvpp2_prs_vid_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 4 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vid entry for extended DSA*/ + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, + MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Match basic ethertypes */ +static int mvpp2_prs_etype_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Ethertype: PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); + + mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: ARP */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: LBTD */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Clear tcam data before updating */ + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD, + MVPP2_PRS_IPV4_HEAD_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv6 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); + + /* Skip DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = MVPP2_PE_ETH_TYPE_UN; + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset even it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Configure vlan entries and detect up to 2 successive VLAN tags. + * Possible options: + * 0x8100, 0x88A8 + * 0x8100, 0x8100 + * 0x8100 + * 0x88A8 + */ +static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + /* Double VLAN: 0x8100, 0x88A8 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Double VLAN: 0x8100, 0x8100 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x88a8 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x8100 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Set default double vlan entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_DBL; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear ai for next iterations */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_DBL_VLAN_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vlan none entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_NONE; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for PPPoE ethertype */ +static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* IPv4 over PPPoE with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IP); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv4 over PPPoE without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv6 over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IPv6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* Non-IP over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + /* Set L3 offset even if it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv4 */ +static int mvpp2_prs_ip4_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + /* Set entries for TCP, UDP and IGMP over IPv4 */ + err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 Broadcast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); + if (err) + return err; + + /* IPv4 Multicast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Default IPv4 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_PROTO_UN; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv4 entry for unicast address */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_ADDR_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv6 */ +static int mvpp2_prs_ip6_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid, err; + + /* Set entries for TCP, UDP and ICMP over IPv6 */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ + /* Result Info: UDF7=1, DS lite */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, + MVPP2_PRS_RI_UDF7_IP6_LITE, + MVPP2_PRS_RI_UDF7_MASK); + if (err) + return err; + + /* IPv6 multicast */ + err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Entry for checking hop limit */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | + MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_L3_PROTO_MASK | + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + /* Set L4 offset relatively to our current place */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown ext protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, + MVPP2_PRS_IPV6_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unicast address */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_ADDR_UN; + + /* Finished: go to IPv6 again */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPV6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Find tcam entry with matched pair <vid,port> */ +static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, + u16 mask) +{ + unsigned char byte[2], enable[2]; + struct mvpp2_prs_entry pe; + u16 rvid, rmask; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VID */ + for (tid = MVPP2_PE_VID_FILT_RANGE_START; + tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + rmask = ((enable[0] & 0xf) << 8) + enable[1]; + + if (rvid != vid || rmask != mask) + continue; + + return tid; + } + + return -ENOENT; +} + +/* Write parser entry for VID filtering */ +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) +{ + unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + + port->id * MVPP2_PRS_VLAN_FILT_MAX; + unsigned int mask = 0xfff, reg_val, shift; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + /* No such entry */ + if (tid < 0) { + + /* Go through all entries from first to last in vlan range */ + tid = mvpp2_prs_tcam_first_free(priv, vid_start, + vid_start + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); + + /* There isn't room for a new VID filter */ + if (tid < 0) + return tid; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Enable the current port */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set match on VID */ + mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Write parser entry for VID filtering */ +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) +{ + struct mvpp2 *priv = port->priv; + int tid; + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + + /* No such entry */ + if (tid < 0) + return; + + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; +} + +/* Remove all existing VID filters on this port */ +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (priv->prs_shadow[tid].valid) + mvpp2_prs_vid_entry_remove(port, tid); + } +} + +/* Remove VID filering entry for this port */ +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + + /* Invalidate the guard entry */ + mvpp2_prs_hw_inv(priv, tid); + + priv->prs_shadow[tid].valid = false; +} + +/* Add guard entry that drops packets when no VID is matched on this port */ +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + unsigned int reg_val, shift; + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[tid].valid) + return; + + memset(&pe, 0, sizeof(pe)); + + pe.index = tid; + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Drop VLAN packets that don't belong to any VIDs on this port */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Parser default initialization */ +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int err, index, i; + + /* Enable tcam table */ + mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); + + /* Clear all tcam and sram entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); + + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); + } + + /* Invalidate all tcam entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) + mvpp2_prs_hw_inv(priv, index); + + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + /* Always start from lookup = 0 */ + for (index = 0; index < MVPP2_MAX_PORTS; index++) + mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, + MVPP2_PRS_PORT_LU_MAX, 0); + + mvpp2_prs_def_flow_init(priv); + + mvpp2_prs_mh_init(priv); + + mvpp2_prs_mac_init(priv); + + mvpp2_prs_dsa_init(priv); + + mvpp2_prs_vid_init(priv); + + err = mvpp2_prs_etype_init(priv); + if (err) + return err; + + err = mvpp2_prs_vlan_init(pdev, priv); + if (err) + return err; + + err = mvpp2_prs_pppoe_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip6_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip4_init(priv); + if (err) + return err; + + return 0; +} + +/* Compare MAC DA with tcam entry data */ +static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, + const u8 *da, unsigned char *mask) +{ + unsigned char tcam_byte, tcam_mask; + int index; + + for (index = 0; index < ETH_ALEN; index++) { + mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); + if (tcam_mask != mask[index]) + return false; + + if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) + return false; + } + + return true; +} + +/* Find tcam entry with matched pair <MAC DA, port> */ +static int +mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, + unsigned char *mask, int udf_type) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_MAC */ + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned int entry_pmap; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != udf_type)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (mvpp2_prs_mac_range_equals(&pe, da, mask) && + entry_pmap == pmap) + return tid; + } + + return -ENOENT; +} + +/* Update parser's mac da entry */ +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct mvpp2 *priv = port->priv; + unsigned int pmap, len, ri; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ + tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, + MVPP2_PRS_UDF_MAC_DEF); + + /* No such entry */ + if (tid < 0) { + if (!add) + return 0; + + /* Create new TCAM entry */ + /* Go through the all entries from first to last */ + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_MAC_RANGE_START, + MVPP2_PE_MAC_RANGE_END); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, add); + + /* Invalidate the entry if no ports are left enabled */ + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (pmap == 0) { + if (add) + return -EINVAL; + + mvpp2_prs_hw_inv(priv, pe.index); + priv->prs_shadow[pe.index].valid = false; + return 0; + } + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); + + /* Set result info bits */ + if (is_broadcast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_BCAST; + } else if (is_multicast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_MCAST; + } else { + ri = MVPP2_PRS_RI_L2_UCAST; + + if (ether_addr_equal(da, port->dev->dev_addr)) + ri |= MVPP2_PRS_RI_MAC_ME_MASK; + } + + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table and hw entry */ + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + /* Remove old parser entry */ + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); + if (err) + return err; + + /* Add new parser entry */ + err = mvpp2_prs_mac_da_accept(port, da, true); + if (err) + return err; + + /* Set addr in the device */ + ether_addr_copy(dev->dev_addr, da); + + return 0; +} + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + /* Special cases : Don't remove broadcast and port's own + * address + */ + if (is_broadcast_ether_addr(da) || + ether_addr_equal(da, port->dev->dev_addr)) + continue; + + /* Remove entry from TCAM */ + mvpp2_prs_mac_da_accept(port, da, false); + } +} + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) +{ + switch (type) { + case MVPP2_TAG_TYPE_EDSA: + /* Add port to EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + /* Remove port from DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + break; + + case MVPP2_TAG_TYPE_DSA: + /* Add port to DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + /* Remove port from EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + case MVPP2_TAG_TYPE_MH: + case MVPP2_TAG_TYPE_NONE: + /* Remove port form EDSA and DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + default: + if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) + return -EINVAL; + } + + return 0; +} + +/* Set prs flow for the port */ +int mvpp2_prs_def_flow(struct mvpp2_port *port) +{ + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_flow_find(port->priv, port->id); + + /* Such entry not exist */ + if (tid < 0) { + /* Go through the all entires from last to first */ + tid = mvpp2_prs_tcam_first_free(port->priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table */ + mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); + } else { + mvpp2_prs_init_from_hw(port->priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, &pe); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h new file mode 100644 index 000000000000..22fbbc4c8b28 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -0,0 +1,314 @@ +/* + * Header Parser definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#include <linux/kernel.h> +#include <linux/netdevice.h> + +#include "mvpp2.h" + +#ifndef _MVPP2_PRS_H_ +#define _MVPP2_PRS_H_ + +/* Parser constants */ +#define MVPP2_PRS_TCAM_SRAM_SIZE 256 +#define MVPP2_PRS_TCAM_WORDS 6 +#define MVPP2_PRS_SRAM_WORDS 4 +#define MVPP2_PRS_FLOW_ID_SIZE 64 +#define MVPP2_PRS_FLOW_ID_MASK 0x3f +#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 +#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) +#define MVPP2_PRS_IPV4_HEAD 0x40 +#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 +#define MVPP2_PRS_IPV4_MC 0xe0 +#define MVPP2_PRS_IPV4_MC_MASK 0xf0 +#define MVPP2_PRS_IPV4_BC_MASK 0xff +#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MASK 0xf +#define MVPP2_PRS_IPV6_MC 0xff +#define MVPP2_PRS_IPV6_MC_MASK 0xff +#define MVPP2_PRS_IPV6_HOP_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f +#define MVPP2_PRS_DBL_VLANS_MAX 100 +#define MVPP2_PRS_CAST_MASK BIT(0) +#define MVPP2_PRS_MCAST_VAL BIT(0) +#define MVPP2_PRS_UCAST_VAL 0x0 + +/* Tcam structure: + * - lookup ID - 4 bits + * - port ID - 1 byte + * - additional information - 1 byte + * - header data - 8 bytes + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). + */ +#define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_PORT_MASK 0xff +#define MVPP2_PRS_LU_MASK 0xf +#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ + (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) +#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ + (((offs) * 2) - ((offs) % 2) + 2) +#define MVPP2_PRS_TCAM_AI_BYTE 16 +#define MVPP2_PRS_TCAM_PORT_BYTE 17 +#define MVPP2_PRS_TCAM_LU_BYTE 20 +#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) +#define MVPP2_PRS_TCAM_INV_WORD 5 + +#define MVPP2_PRS_VID_TCAM_BYTE 2 + +/* TCAM range for unicast and multicast filtering. We have 25 entries per port, + * with 4 dedicated to UC filtering and the rest to multicast filtering. + * Additionnally we reserve one entry for the broadcast address, and one for + * each port's own address. + */ +#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 +#define MVPP2_PRS_MAC_RANGE_SIZE 80 + +/* Number of entries per port dedicated to UC and MC filtering */ +#define MVPP2_PRS_MAC_UC_FILT_MAX 4 +#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ + MVPP2_PRS_MAC_UC_FILT_MAX) + +/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 + * 10 VLAN ID filter entries per port + * 1 default VLAN filter entry per port + * It is assumed that there are 3 ports for filter, not including loopback port + */ +#define MVPP2_PRS_VLAN_FILT_MAX 11 +#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 + +#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) +#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) + +/* Tcam entries ID */ +#define MVPP2_PE_DROP_ALL 0 +#define MVPP2_PE_FIRST_FREE_TID 1 + +/* MAC filtering range */ +#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ + MVPP2_PRS_MAC_RANGE_SIZE + 1) +/* VLAN filtering range */ +#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ + MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) +#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +/* reserved */ +#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) + +#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ + ((port) * MVPP2_PRS_VLAN_FILT_MAX)) +#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) +/* Index of default vid filter for given port */ +#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) + +/* Sram structure + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). + */ +#define MVPP2_PRS_SRAM_RI_OFFS 0 +#define MVPP2_PRS_SRAM_RI_WORD 0 +#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 +#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 +#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 +#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 +#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_UDF_OFFS 73 +#define MVPP2_PRS_SRAM_UDF_BITS 8 +#define MVPP2_PRS_SRAM_UDF_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 +#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 +#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 +#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 +#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 +#define MVPP2_PRS_SRAM_AI_OFFS 90 +#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 +#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 +#define MVPP2_PRS_SRAM_AI_MASK 0xff +#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 +#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf +#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 +#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 + +/* Sram result info bits assignment */ +#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 +#define MVPP2_PRS_RI_DSA_MASK 0x2 +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 +#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) +#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) +#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 +#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 +#define MVPP2_PRS_RI_L2_MCAST BIT(9) +#define MVPP2_PRS_RI_L2_BCAST BIT(10) +#define MVPP2_PRS_RI_PPPOE_MASK 0x800 +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 +#define MVPP2_PRS_RI_L3_IP4 BIT(12) +#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) +#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) +#define MVPP2_PRS_RI_L3_IP6 BIT(14) +#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) +#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 +#define MVPP2_PRS_RI_L3_MCAST BIT(15) +#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) +#define MVPP2_PRS_RI_UDF3_MASK 0x300000 +#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) +#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 +#define MVPP2_PRS_RI_L4_TCP BIT(22) +#define MVPP2_PRS_RI_L4_UDP BIT(23) +#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) +#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 +#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) +#define MVPP2_PRS_RI_DROP_MASK 0x80000000 + +/* Sram additional info bits assignment */ +#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) +#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) +#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) +#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) +#define MVPP2_PRS_SINGLE_VLAN_AI 0 +#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) +#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) + +/* DSA/EDSA type */ +#define MVPP2_PRS_TAGGED true +#define MVPP2_PRS_UNTAGGED false +#define MVPP2_PRS_EDSA true +#define MVPP2_PRS_DSA false + +/* MAC entries, shadow udf */ +enum mvpp2_prs_udf { + MVPP2_PRS_UDF_MAC_DEF, + MVPP2_PRS_UDF_MAC_RANGE, + MVPP2_PRS_UDF_L2_DEF, + MVPP2_PRS_UDF_L2_DEF_COPY, + MVPP2_PRS_UDF_L2_USER, +}; + +/* Lookup ID */ +enum mvpp2_prs_lookup { + MVPP2_PRS_LU_MH, + MVPP2_PRS_LU_MAC, + MVPP2_PRS_LU_DSA, + MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_VID, + MVPP2_PRS_LU_L2, + MVPP2_PRS_LU_PPPOE, + MVPP2_PRS_LU_IP4, + MVPP2_PRS_LU_IP6, + MVPP2_PRS_LU_FLOWS, + MVPP2_PRS_LU_LAST, +}; + +union mvpp2_prs_tcam_entry { + u32 word[MVPP2_PRS_TCAM_WORDS]; + u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; +}; + +union mvpp2_prs_sram_entry { + u32 word[MVPP2_PRS_SRAM_WORDS]; + u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; +}; + +struct mvpp2_prs_entry { + u32 index; + union mvpp2_prs_tcam_entry tcam; + union mvpp2_prs_sram_entry sram; +}; + +struct mvpp2_prs_shadow { + bool valid; + bool finish; + + /* Lookup ID */ + int lu; + + /* User defined offset */ + int udf; + + /* Result info */ + u32 ri; + u32 ri_mask; +}; + +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); + +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); + +int mvpp2_prs_def_flow(struct mvpp2_port *port); + +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); + +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port); + +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port); + +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add); + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port); + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 685337d58276..5342bd8a3d0b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -43,12 +43,13 @@ #include "fw.h" /* - * We allocate in page size (default 4KB on many archs) chunks to avoid high - * order memory allocations in fragmented/high usage memory situation. + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. Note that the chunks are not necessarily in contiguous + * physical memory. */ enum { - MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, - MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; + gfp_t mask; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ @@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; + mask = gfp_mask; + if (cur_order) + mask &= ~__GFP_DIRECT_RECLAIM; + if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], - cur_order, gfp_mask); + cur_order, mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], - cur_order, gfp_mask, + cur_order, mask, dev->numa_node); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 3ef3406ff4cb..10fcc22f4590 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -614,9 +614,9 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int index_at_dup_port = -1; for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { - if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) + if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))) index_at_port = i; - if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))) + if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))) index_at_dup_port = i; } /* check that same vlan is not in the tables at different indices */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index ee6684779d11..2545296a0c08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -91,6 +91,7 @@ config MLX5_EN_TLS bool "TLS cryptography-offload accelaration" depends on MLX5_CORE_EN depends on TLS_DEVICE + depends on TLS=y || MLX5_CORE=m depends on MLX5_ACCEL default n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c5c7a6d687ff..eb9eb7aa953a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -101,18 +101,22 @@ struct page_pool; (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) +#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) +#define MLX5E_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) + #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) +#define MLX5E_RX_MAX_HEAD (256) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_DEFAULT_LRO_TIMEOUT 32 @@ -183,11 +187,16 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; }; -struct mlx5e_rx_wqe { +struct mlx5e_rx_wqe_ll { struct mlx5_wqe_srq_next_seg next; - struct mlx5_wqe_data_seg data; + struct mlx5_wqe_data_seg data[0]; +}; + +struct mlx5e_rx_wqe_cyc { + struct mlx5_wqe_data_seg data[0]; }; struct mlx5e_umr_wqe { @@ -313,7 +322,7 @@ struct mlx5e_cq { /* control */ struct mlx5_core_dev *mdev; - struct mlx5_frag_wq_ctrl wq_ctrl; + struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; struct mlx5e_tx_wqe_info { @@ -357,7 +366,6 @@ struct mlx5e_txqsq { /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; u32 dma_fifo_pc; - struct mlx5e_sq_stats stats; struct mlx5e_cq cq; @@ -370,11 +378,11 @@ struct mlx5e_txqsq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; + struct mlx5e_sq_stats *stats; void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; u8 min_inline_mode; - u16 edge; struct device *pdev; __be32 mkey_be; unsigned long state; @@ -439,7 +447,6 @@ struct mlx5e_icosq { struct mlx5_wq_cyc wq; void __iomem *uar_map; u32 sqn; - u16 edge; unsigned long state; /* control path */ @@ -450,7 +457,7 @@ struct mlx5e_icosq { static inline bool mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) { - return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc)); + return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); } struct mlx5e_dma_info { @@ -459,8 +466,9 @@ struct mlx5e_dma_info { }; struct mlx5e_wqe_frag_info { - struct mlx5e_dma_info di; + struct mlx5e_dma_info *di; u32 offset; + bool last_in_page; }; struct mlx5e_umr_dma_info { @@ -473,6 +481,8 @@ struct mlx5e_mpw_info { DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; +#define MLX5E_MAX_RX_FRAGS 4 + /* a single cache unit is capable to serve one napi call (for non-striding rq) * or a MPWQE (for striding rq). */ @@ -490,6 +500,9 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); typedef struct sk_buff * (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); +typedef struct sk_buff * +(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); @@ -497,19 +510,30 @@ enum mlx5e_rq_flag { MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), }; +struct mlx5e_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct mlx5e_rq_frags_info { + struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; +}; + struct mlx5e_rq { /* data path */ - struct mlx5_wq_ll wq; - union { struct { - struct mlx5e_wqe_frag_info *frag_info; - u32 frag_sz; /* max possible skb frag_sz */ - union { - bool page_reuse; - }; + struct mlx5_wq_cyc wq; + struct mlx5e_wqe_frag_info *frags; + struct mlx5e_dma_info *di; + struct mlx5e_rq_frags_info info; + mlx5e_fp_skb_from_cqe skb_from_cqe; } wqe; struct { + struct mlx5_wq_ll wq; struct mlx5e_umr_wqe umr_wqe; struct mlx5e_mpw_info *info; mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; @@ -520,14 +544,13 @@ struct mlx5e_rq { }; struct { u16 headroom; - u8 page_order; u8 map_dir; /* dma map direction */ } buff; struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; - struct mlx5e_rq_stats stats; + struct mlx5e_rq_stats *stats; struct mlx5e_cq cq; struct mlx5e_page_cache page_cache; struct hwtstamp_config *tstamp; @@ -575,7 +598,7 @@ struct mlx5e_channel { /* data path - accessed per napi poll */ struct irq_desc *irq_desc; - struct mlx5e_ch_stats stats; + struct mlx5e_ch_stats *stats; /* control */ struct mlx5e_priv *priv; @@ -591,6 +614,12 @@ struct mlx5e_channels { struct mlx5e_params params; }; +struct mlx5e_channel_stats { + struct mlx5e_ch_stats ch; + struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_rq_stats rq; +} ____cacheline_aligned_in_smp; + enum mlx5e_traffic_types { MLX5E_TT_IPV4_TCP, MLX5E_TT_IPV6_TCP, @@ -792,6 +821,8 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; + struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + u8 max_opened_tc; struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; @@ -868,6 +899,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); +struct sk_buff * +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +struct sk_buff * +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); void mlx5e_update_stats(struct mlx5e_priv *priv); @@ -956,10 +993,9 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe **wqe, u16 *pi) { - struct mlx5_wq_cyc *wq; + struct mlx5_wq_cyc *wq = &sq->wq; - wq = &sq->wq; - *pi = sq->pc & wq->sz_m1; + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); memset(*wqe, 0, sizeof(**wqe)); } @@ -967,7 +1003,7 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, static inline struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) { - u16 pi = *pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; @@ -1096,6 +1132,10 @@ void mlx5e_update_stats_work(struct work_struct *work); int mlx5e_bits_invert(unsigned long a, int size); +typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); +int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + change_hw_mtu_cb set_mtu_cb); + /* ethtool helpers */ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index ad2790fb5966..15aef71d1957 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, int headln; int i; - sq->stats.tls_ooo++; + sq->stats->tls_ooo++; if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { /* We might get here if a retransmission reaches the driver @@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, skb_shinfo(nskb)->nr_frags = info.nr_frags; nskb->data_len = info.sync_len; nskb->len += info.sync_len; - sq->stats.tls_resync_bytes += nskb->len; + sq->stats->tls_resync_bytes += nskb->len; mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, cpu_to_be64(info.rcd_sn)); mlx5e_sq_xmit(sq, nskb, *wqe, *pi); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index f64b5e78519b..75e4308ba786 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -213,7 +213,7 @@ out: } #define MLX5E_ARFS_NUM_GROUPS 2 -#define MLX5E_ARFS_GROUP1_SIZE BIT(12) +#define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1) #define MLX5E_ARFS_GROUP2_SIZE BIT(0) #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\ MLX5E_ARFS_GROUP2_SIZE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 42bd256e680d..fffe514ba855 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1515,6 +1515,9 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return -EOPNOTSUPP; if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) return -EINVAL; + } else if (priv->channels.params.lro_en) { + netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n"); + return -EINVAL; } new_channels.params = priv->channels.params; @@ -1589,6 +1592,10 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) out: mutex_unlock(&priv->state_lock); + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cee44c21766c..89c96a0f708e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -51,6 +51,7 @@ struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; + struct mlx5e_rq_frags_info frags_info; }; struct mlx5e_sq_param { @@ -93,7 +94,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) return true; } -static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) { if (!params->xdp_prog) { u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); @@ -107,19 +108,27 @@ static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) { - u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params); return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); } +static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); + + return !params->lro_en && frag_sz <= PAGE_SIZE; +} + static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); s8 signed_log_num_strides_param; u8 log_num_strides; - if (params->lro_en || frag_sz > PAGE_SIZE) + if (!mlx5e_rx_is_linear_skb(mdev, params)) return false; if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) @@ -145,7 +154,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); return MLX5E_MPWQE_STRIDE_SZ(mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); @@ -163,16 +172,15 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, { u16 linear_rq_headroom = params->xdp_prog ? XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + bool is_linear_skb; linear_rq_headroom += NET_IP_ALIGN; - if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) - return linear_rq_headroom; - - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return linear_rq_headroom; + is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ? + mlx5e_rx_is_linear_skb(mdev, params) : + mlx5e_rx_mpwqe_is_linear_skb(mdev, params); - return 0; + return is_linear_skb ? linear_rq_headroom : 0; } void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, @@ -182,14 +190,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - /* Extra room needed for build_skb */ - params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - } mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, @@ -213,7 +213,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_LINKED_LIST; + MLX5_WQ_TYPE_CYCLIC; } static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -327,10 +327,30 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_size(&rq->mpwqe.wq); + default: + return mlx5_wq_cyc_get_size(&rq->wqe.wq); + } +} + +static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return rq->mpwqe.wq.cur_sz; + default: + return rq->wqe.wq.cur_sz; + } +} + static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); + int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); @@ -378,7 +398,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq)); + u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } @@ -388,6 +408,61 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; } +static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) +{ + struct mlx5e_wqe_frag_info next_frag, *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = true; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = true; +} + +static int mlx5e_init_di_list(struct mlx5e_rq *rq, + struct mlx5e_params *params, + int wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(len * sizeof(*rq->wqe.di), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + mlx5e_init_frags_partition(rq); + + return 0; +} + +static void mlx5e_free_di_list(struct mlx5e_rq *rq) +{ + kvfree(rq->wqe.di); +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -397,23 +472,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); - u32 byte_count, pool_size; - int npages; + u32 pool_size; int wq_sz; int err; int i; rqp->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, - &rq->wq_ctrl); - if (err) - return err; - - rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; - - wq_sz = mlx5_wq_ll_get_size(&rq->wq); - rq->wq_type = params->rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@ -423,6 +488,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->ix = c->ix; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -441,8 +507,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params); + rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -467,8 +542,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); - byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_wq_destroy; @@ -476,16 +549,31 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5e_rq_alloc_mpwqe_info(rq, c); if (err) - goto err_destroy_umr_mkey; + goto err_free; break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - rq->wqe.frag_info = - kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, cpu_to_node(c->cpu)); - if (!rq->wqe.frag_info) { + default: /* MLX5_WQ_TYPE_CYCLIC */ + err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); + + rq->wqe.info = rqp->frags_info; + rq->wqe.frags = + kvzalloc_node((wq_sz << rq->wqe.info.log_num_frags) * + sizeof(*rq->wqe.frags), + GFP_KERNEL, cpu_to_node(c->cpu)); + if (!rq->wqe.frags) { err = -ENOMEM; - goto err_rq_wq_destroy; + goto err_free; } + + err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu); + if (err) + goto err_free; rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; @@ -496,32 +584,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, #endif rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; if (!rq->handle_rx_cqe) { - kfree(rq->wqe.frag_info); err = -EINVAL; netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); - goto err_rq_wq_destroy; + goto err_free; } - byte_count = params->lro_en ? - params->lro_wqe_sz : - MLX5E_SW2HW_MTU(params, params->sw_mtu); -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) - byte_count += MLX5E_METADATA_ETHER_LEN; -#endif - rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en; - - /* calc the required page order */ - rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count); - npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE); - rq->buff.page_order = order_base_2(npages); - - byte_count |= MLX5_HW_START_PADDING; + rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; rq->mkey_be = c->mkey_be; } /* Create a page_pool and register it with rxq */ - pp_params.order = rq->buff.page_order; + pp_params.order = 0; pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ pp_params.pool_size = pool_size; pp_params.nid = cpu_to_node(c->cpu); @@ -535,28 +610,45 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, */ rq->page_pool = page_pool_create(&pp_params); if (IS_ERR(rq->page_pool)) { - if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - kfree(rq->wqe.frag_info); err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; - goto err_rq_wq_destroy; + goto err_free; } err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); if (err) - goto err_rq_wq_destroy; + goto err_free; for (i = 0; i < wq_sz; i++) { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5e_rx_wqe_ll *wqe = + mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); + u32 byte_count = + rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); - wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); - } + wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); + wqe->data[0].byte_count = cpu_to_be32(byte_count); + wqe->data[0].lkey = rq->mkey_be; + } else { + struct mlx5e_rx_wqe_cyc *wqe = + mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++) { + u32 frag_size = rq->wqe.info.arr[f].frag_size | + MLX5_HW_START_PADDING; - wqe->data.byte_count = cpu_to_be32(byte_count); - wqe->data.lkey = rq->mkey_be; + wqe->data[f].byte_count = cpu_to_be32(frag_size); + wqe->data[f].lkey = rq->mkey_be; + } + /* check if num_frags is not a pow of two */ + if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { + wqe->data[f].byte_count = 0; + wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + wqe->data[f].addr = 0; + } + } } INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); @@ -575,8 +667,16 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, return 0; -err_destroy_umr_mkey: - mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); +err_free: + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + kfree(rq->mpwqe.info); + mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + kvfree(rq->wqe.frags); + mlx5e_free_di_list(rq); + } err_rq_wq_destroy: if (rq->xdp_prog) @@ -605,8 +705,9 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - kfree(rq->wqe.frag_info); + default: /* MLX5_WQ_TYPE_CYCLIC */ + kvfree(rq->wqe.frags); + mlx5e_free_di_list(rq); } for (i = rq->page_cache.head; i != rq->page_cache.tail; @@ -646,8 +747,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); - mlx5_fill_page_array(&rq->wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); @@ -753,52 +854,53 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); struct mlx5e_channel *c = rq->channel; - struct mlx5_wq_ll *wq = &rq->wq; - u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq)); + u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); do { - if (wq->cur_sz >= min_wqes) + if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes) return 0; msleep(20); } while (time_before(jiffies, exp_time)); netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", - c->ix, rq->rqn, wq->cur_sz, min_wqes); + c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); return -ETIMEDOUT; } static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe; __be16 wqe_ix_be; u16 wqe_ix; - /* UMR WQE (if in progress) is always at wq->head */ - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - rq->mpwqe.umr_in_progress) - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); - - while (!mlx5_wq_ll_is_empty(wq)) { - wqe_ix_be = *wq->tail_next; - wqe_ix = be16_to_cpu(wqe_ix_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); - rq->dealloc_wqe(rq, wqe_ix); - mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, - &wqe->next.next_wqe_index); - } + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) { - /* Clean outstanding pages on handled WQEs that decided to do page-reuse, - * but yet to be re-posted. - */ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); + /* UMR WQE (if in progress) is always at wq->head */ + if (rq->mpwqe.umr_in_progress) + mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); + + while (!mlx5_wq_ll_is_empty(wq)) { + struct mlx5e_rx_wqe_ll *wqe; - for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++) + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } + } else { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + + while (!mlx5_wq_cyc_is_empty(wq)) { + wqe_ix = mlx5_wq_cyc_get_tail(wq); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_cyc_pop(wq); + } } + } static int mlx5e_open_rq(struct mlx5e_channel *c, @@ -836,13 +938,15 @@ err_free_rq: static void mlx5e_activate_rq(struct mlx5e_rq *rq) { struct mlx5e_icosq *sq = &rq->channel->icosq; - u16 pi = sq->pc & sq->wq.sz_m1; + struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *nopwqe; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -885,6 +989,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->pdev = c->pdev; @@ -894,10 +999,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->min_inline_mode = params->tx_min_inline_mode; param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -940,23 +1045,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS; - return 0; err_sq_wq_destroy: @@ -1001,10 +1105,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq) + struct mlx5e_txqsq *sq, + int tc) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->pdev = c->pdev; @@ -1015,6 +1121,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); @@ -1022,10 +1129,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_TLS, &sq->state); param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -1034,8 +1141,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); sq->dim.mode = params->tx_cq_moderation.cq_period_mode; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; - return 0; err_sq_wq_destroy: @@ -1095,7 +1200,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); - mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_frag_array(&csp->wq_ctrl->buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_sq(mdev, in, inlen, sqn); @@ -1174,13 +1280,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq) + struct mlx5e_txqsq *sq, + int tc) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; int err; - err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq); + err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); if (err) return err; @@ -1238,6 +1345,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { struct mlx5e_channel *c = sq->channel; + struct mlx5_wq_cyc *wq = &sq->wq; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); /* prevent netif_tx_wake_queue */ @@ -1246,12 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) netif_tx_disable_queue(sq->txq); /* last doorbell out, godspeed .. */ - if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) { + if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *nop; - sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL; - nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl); + sq->db.wqe_info[pi].skb = NULL; + nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); } } @@ -1366,7 +1475,7 @@ static void mlx5e_sq_recover(struct work_struct *work) return; mlx5e_reset_txqsq_cc_pc(sq); - sq->stats.recover++; + sq->stats->recover++; recover->last_recover = jiffies; mlx5e_activate_txqsq(sq); } @@ -1535,7 +1644,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c, static void mlx5e_free_cq(struct mlx5e_cq *cq) { - mlx5_cqwq_destroy(&cq->wq_ctrl); + mlx5_wq_destroy(&cq->wq_ctrl); } static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) @@ -1551,7 +1660,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * cq->wq_ctrl.frag_buf.npages; + sizeof(u64) * cq->wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1560,7 +1669,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) memcpy(cqc, param->cqc, sizeof(param->cqc)); - mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf, + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); @@ -1568,7 +1677,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); @@ -1661,14 +1770,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - int err; - int tc; + struct mlx5e_priv *priv = c->priv; + int err, tc, max_nch = priv->profile->max_nch(priv->mdev); for (tc = 0; tc < params->num_tc; tc++) { - int txq_ix = c->ix + tc * params->num_channels; + int txq_ix = c->ix + tc * max_nch; err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, - params, &cparam->sq, &c->sq[tc]); + params, &cparam->sq, &c->sq[tc], tc); if (err) goto err_close_sqs; } @@ -1798,6 +1907,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; + c->stats = &priv->channel_stats[ix].ch; mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); c->irq_desc = irq_to_desc(irq); @@ -1911,6 +2021,76 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) kfree(c); } +#define DEFAULT_FRAG_SIZE (2048) + +static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_frags_info *info) +{ + u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); + int frag_size_max = DEFAULT_FRAG_SIZE; + u32 buf_size = 0; + int i; + +#ifdef CONFIG_MLX5_EN_IPSEC + if (MLX5_IPSEC_DEV(mdev)) + byte_count += MLX5E_METADATA_ETHER_LEN; +#endif + + if (mlx5e_rx_is_linear_skb(mdev, params)) { + int frag_stride; + + frag_stride = mlx5e_rx_get_linear_frag_sz(params); + frag_stride = roundup_pow_of_two(frag_stride); + + info->arr[0].frag_size = byte_count; + info->arr[0].frag_stride = frag_stride; + info->num_frags = 1; + info->wqe_bulk = PAGE_SIZE / frag_stride; + goto out; + } + + if (byte_count > PAGE_SIZE + + (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max) + frag_size_max = PAGE_SIZE; + + i = 0; + while (buf_size < byte_count) { + int frag_size = byte_count - buf_size; + + if (i < MLX5E_MAX_RX_FRAGS - 1) + frag_size = min(frag_size, frag_size_max); + + info->arr[i].frag_size = frag_size; + info->arr[i].frag_stride = roundup_pow_of_two(frag_size); + + buf_size += frag_size; + i++; + } + info->num_frags = i; + /* number of different wqes sharing a page */ + info->wqe_bulk = 1 + (info->num_frags % 2); + +out: + info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); + info->log_num_frags = order_base_2(info->num_frags); +} + +static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) +{ + int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; + + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + sz += sizeof(struct mlx5e_rx_wqe_ll); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + sz += sizeof(struct mlx5e_rx_wqe_cyc); + } + + return order_base_2(sz); +} + static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_rq_param *param) @@ -1918,6 +2098,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + int ndsegs = 1; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -1927,16 +2108,18 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wqe_stride_size, mlx5e_mpwqe_get_log_stride_size(mdev, params) - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + default: /* MLX5_WQ_TYPE_CYCLIC */ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); + mlx5e_build_rq_frags_info(mdev, params, ¶m->frags_info); + ndsegs = param->frags_info.num_frags; } + MLX5_SET(wq, wq, wq_type, params->rq_wq_type); MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); - MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); @@ -1952,8 +2135,9 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); - MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); @@ -2004,7 +2188,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ + default: /* MLX5_WQ_TYPE_CYCLIC */ log_cq_size = params->log_rq_mtu_frames; } @@ -2624,15 +2808,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } -static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) +static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) { - struct mlx5e_channel *c; - struct mlx5e_txqsq *sq; + int max_nch = priv->profile->max_nch(priv->mdev); int i, tc; - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (tc = 0; tc < priv->profile->max_tc; tc++) - priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num; + priv->channel_tc2txq[i][tc] = i + tc * max_nch; +} + +static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) +{ + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + int i, tc; for (i = 0; i < priv->channels.num; i++) { c = priv->channels.c[i]; @@ -2652,7 +2842,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->channels.num); - mlx5e_build_channels_tx_maps(priv); + mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); @@ -2804,8 +2994,8 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, param->wq.db_numa_node = param->wq.buf_numa_node; - err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, - &rq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); if (err) return err; @@ -3129,6 +3319,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev, if (err) goto out; + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, + new_channels.params.num_tc); mlx5e_switch_priv_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); @@ -3219,6 +3411,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); } else { + mlx5e_grp_sw_update_stats(priv); stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; @@ -3293,12 +3486,18 @@ static int set_feature_lro(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); old_params = &priv->channels.params; + if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + netdev_warn(netdev, "can't set LRO with legacy RQ\n"); + err = -EINVAL; + goto out; + } + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); new_channels.params = *old_params; new_channels.params.lro_en = enable; - if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) reset = false; @@ -3462,22 +3661,31 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params *params; mutex_lock(&priv->state_lock); + params = &priv->channels.params; if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { /* HW strips the outer C-tag header, this is a problem * for S-tag traffic. */ features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (!priv->channels.params.vlan_strip_disable) + if (!params->vlan_strip_disable) netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } + if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + features &= ~NETIF_F_LRO; + if (params->lro_en) + netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + } + mutex_unlock(&priv->state_lock); return features; } -static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) +int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + change_hw_mtu_cb set_mtu_cb) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; @@ -3495,7 +3703,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; - if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); @@ -3504,7 +3712,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) if (!reset) { params->sw_mtu = new_mtu; - mlx5e_set_dev_port_mtu(priv); + set_mtu_cb(priv); netdev->mtu = params->sw_mtu; goto out; } @@ -3513,7 +3721,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb); netdev->mtu = new_channels.params.sw_mtu; out: @@ -3521,6 +3729,11 @@ out: return err; } +static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); +} + int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) { struct hwtstamp_config config; @@ -3815,7 +4028,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, return false; netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); - sq->channel->stats.eq_rearm++; + sq->channel->stats->eq_rearm++; return true; } @@ -4015,7 +4228,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, - .ndo_change_mtu = mlx5e_change_mtu, + .ndo_change_mtu = mlx5e_change_nic_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, @@ -4194,9 +4407,16 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); /* RQ */ - if (mlx5e_striding_rq_possible(mdev, params)) - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, - !slow_pci_heuristic(mdev)); + /* Prefer Striding RQ, unless any of the following holds: + * - Striding RQ configuration is not possible/supported. + * - Slow PCI heuristic. + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. + */ + if (!slow_pci_heuristic(mdev) && + mlx5e_striding_rq_possible(mdev, params) && + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || + !mlx5e_rx_is_linear_skb(mdev, params))) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); @@ -4239,6 +4459,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; + priv->max_opened_tc = 1; mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev), netdev->mtu); @@ -4303,7 +4524,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; - if (!!MLX5_CAP_ETH(mdev, lro_cap)) + if (!!MLX5_CAP_ETH(mdev, lro_cap) && + mlx5e_check_fragmented_striding_rq_cap(mdev)) netdev->vlan_features |= NETIF_F_LRO; netdev->hw_features = netdev->vlan_features; @@ -4426,6 +4648,7 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); + mlx5e_build_tc2txq_maps(priv); mlx5e_vxlan_init(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c3034f58aa33..57987f6546e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -134,13 +134,13 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - rq_stats = &c->rq.stats; + rq_stats = c->rq.stats; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; + sq_stats = c->sq[j].stats; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -148,12 +148,6 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) } } -static void mlx5e_rep_update_stats(struct mlx5e_priv *priv) -{ - mlx5e_rep_update_sw_counters(priv); - mlx5e_rep_update_hw_counters(priv); -} - static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -871,6 +865,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_sw_stats *sstats = &priv->stats.sw; + mlx5e_rep_update_sw_counters(priv); + stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; @@ -904,6 +900,11 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, NULL); +} + static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_open = mlx5e_rep_open, .ndo_stop = mlx5e_rep_close, @@ -913,6 +914,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, + .ndo_change_mtu = mlx5e_change_rep_mtu, }; static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, @@ -925,7 +927,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->hard_mtu = MLX5E_ETH_HARD_MTU; params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; - params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); @@ -939,6 +941,10 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, static void mlx5e_build_rep_netdev(struct net_device *netdev) { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 max_mtu; + netdev->netdev_ops = &mlx5e_netdev_ops_rep; netdev->watchdog_timeo = 15 * HZ; @@ -951,6 +957,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_TC; eth_hw_addr_random(netdev); + + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); } static void mlx5e_init_rep(struct mlx5_core_dev *mdev, @@ -1046,7 +1056,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .cleanup_rx = mlx5e_cleanup_rep_rx, .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, - .update_stats = mlx5e_rep_update_stats, + .update_stats = mlx5e_rep_update_hw_counters, .max_nch = mlx5e_get_rep_max_num_channels, .update_carrier = NULL, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index a6a92c4f5fbb..d3a1dd20e41d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -54,7 +54,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, void *data) { - u32 ci = cqcc & cq->wq.fbc.sz_m1; + u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc); memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); } @@ -65,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, mlx5e_read_cqe_slot(cq, cqcc, &cq->title); cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); - rq->stats.cqe_compress_blks++; + rq->stats->cqe_compress_blks++; } static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) @@ -76,10 +76,11 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) { - struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc; - u8 op_own = (cqcc >> fbc->log_sz) & 1; - u32 wq_sz = 1 << fbc->log_sz; - u32 ci = cqcc & fbc->sz_m1; + struct mlx5_cqwq *wq = &cq->wq; + + u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; + u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); + u32 wq_sz = mlx5_cqwq_get_size(wq); u32 ci_top = min_t(u32, wq_sz, ci + n); for (; ci < ci_top; ci++, n--) { @@ -112,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, mpwrq_get_cqe_consumed_strides(&cq->title); else cq->decmprs_wqe_counter = - (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; + mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, @@ -145,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); cq->wq.cc = cqcc; cq->decmprs_left -= cqe_count; - rq->stats.cqe_compress_pkts += cqe_count; + rq->stats->cqe_compress_pkts += cqe_count; return cqe_count; } @@ -163,8 +164,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) - static inline bool mlx5e_page_is_reserved(struct page *page) { return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); @@ -175,14 +174,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, { struct mlx5e_page_cache *cache = &rq->page_cache; u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); + struct mlx5e_rq_stats *stats = rq->stats; if (tail_next == cache->head) { - rq->stats.cache_full++; + stats->cache_full++; return false; } if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { - rq->stats.cache_waive++; + stats->cache_waive++; return false; } @@ -195,23 +195,24 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { struct mlx5e_page_cache *cache = &rq->page_cache; + struct mlx5e_rq_stats *stats = rq->stats; if (unlikely(cache->head == cache->tail)) { - rq->stats.cache_empty++; + stats->cache_empty++; return false; } if (page_ref_count(cache->page_cache[cache->head].page) != 1) { - rq->stats.cache_busy++; + stats->cache_busy++; return false; } *dma_info = cache->page_cache[cache->head]; cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); - rq->stats.cache_reuse++; + stats->cache_reuse++; dma_sync_single_for_device(rq->pdev, dma_info->addr, - RQ_PAGE_SIZE(rq), + PAGE_SIZE, DMA_FROM_DEVICE); return true; } @@ -227,7 +228,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, return -ENOMEM; dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, - RQ_PAGE_SIZE(rq), rq->buff.map_dir); + PAGE_SIZE, rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { put_page(dma_info->page); dma_info->page = NULL; @@ -240,8 +241,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { - dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq), - rq->buff.map_dir); + dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); } void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, @@ -259,62 +259,103 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, } } -static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) +static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, + struct mlx5e_wqe_frag_info *frag) { - return rq->wqe.page_reuse && wi->di.page && - (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) && - !mlx5e_page_is_reserved(wi->di.page); + int err = 0; + + if (!frag->offset) + /* On first frag (offset == 0), replenish page (dma_info actually). + * Other frags that point to the same dma_info (with a different + * offset) should just use the new one without replenishing again + * by themselves. + */ + err = mlx5e_page_alloc_mapped(rq, frag->di); + + return err; } -static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, + struct mlx5e_wqe_frag_info *frag) { - struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; + if (frag->last_in_page) + mlx5e_page_release(rq, frag->di, true); +} + +static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) +{ + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; +} + +static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, + u16 ix) +{ + struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); + int err; + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = mlx5e_get_rx_frag(rq, frag); + if (unlikely(err)) + goto free_frags; - /* check if page exists, hence can be reused */ - if (!wi->di.page) { - if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di))) - return -ENOMEM; - wi->offset = 0; + wqe->data[i].addr = cpu_to_be64(frag->di->addr + + frag->offset + rq->buff.headroom); } - wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom); return 0; + +free_frags: + while (--i >= 0) + mlx5e_put_rx_frag(rq, --frag); + + return err; } static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi) { - mlx5e_page_release(rq, &wi->di, true); - wi->di.page = NULL; + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + mlx5e_put_rx_frag(rq, wi); } -static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) +void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { - if (mlx5e_page_reuse(rq, wi)) { - rq->stats.page_reuse++; - return; - } + struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); mlx5e_free_rx_wqe(rq, wi); } -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) +static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) { - struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + int err; + int i; - if (wi->di.page) - mlx5e_free_rx_wqe(rq, wi); + for (i = 0; i < wqe_bulk; i++) { + struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); + + err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); + if (unlikely(err)) + goto free_wqes; + } + + return 0; + +free_wqes: + while (--i >= 0) + mlx5e_dealloc_rx_wqe(rq, ix + i); + + return err; } -static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, - struct sk_buff *skb, - struct mlx5e_dma_info *di, - u32 frag_offset, u32 len) +static inline void +mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5e_dma_info *di, u32 frag_offset, u32 len, + unsigned int truesize) { - unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); - dma_sync_single_for_cpu(rq->pdev, di->addr + frag_offset, len, DMA_FROM_DEVICE); @@ -324,29 +365,33 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, } static inline void +mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, + struct mlx5e_dma_info *dma_info, + int offset_from, int offset_to, u32 headlen) +{ + const void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, offset_to, from, len); +} + +static inline void mlx5e_copy_skb_header_mpwqe(struct device *pdev, struct sk_buff *skb, struct mlx5e_dma_info *dma_info, u32 offset, u32 headlen) { u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - unsigned int len; - /* Aligning len to sizeof(long) optimizes memcpy performance */ - len = ALIGN(headlen_pg, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len); + mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg); if (unlikely(offset + headlen > PAGE_SIZE)) { dma_info++; - headlen_pg = len; - len = ALIGN(headlen - headlen_pg, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(skb, headlen_pg, - page_address(dma_info->page), - len); + mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg, + headlen - headlen_pg); } } @@ -364,8 +409,8 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; + struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); rq->mpwqe.umr_in_progress = false; @@ -382,6 +427,22 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } +static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, + struct mlx5_wq_cyc *wq, + u16 pi, u16 frag_pi) +{ + struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; + u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; + + edge_wi = wi + nnops; + + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->opcode = MLX5_OPCODE_NOP; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } +} + static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -390,14 +451,16 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); + u16 pi, frag_pi; int err; - u16 pi; int i; - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + + if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); } umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -433,7 +496,7 @@ err_unmap: dma_info--; mlx5e_page_release(rq, dma_info, true); } - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return err; } @@ -447,31 +510,34 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk; int err; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; - if (mlx5_wq_ll_is_full(wq)) + wqe_bulk = rq->wqe.info.wqe_bulk; + + if (mlx5_wq_cyc_missing(wq) < wqe_bulk) return false; do { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + u16 head = mlx5_wq_cyc_get_head(wq); - err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head); + err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); if (unlikely(err)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; break; } - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - } while (!mlx5_wq_ll_is_full(wq)); + mlx5_wq_cyc_push_n(wq, wqe_bulk); + } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); - mlx5_wq_ll_update_db_record(wq); + mlx5_wq_cyc_update_db_record(wq); return !!err; } @@ -482,7 +548,7 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; mlx5_cqwq_pop(&cq->wq); @@ -523,7 +589,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; @@ -672,6 +738,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, struct sk_buff *skb, bool lro) { + struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) @@ -679,7 +746,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - rq->stats.csum_unnecessary++; + stats->csum_unnecessary++; return; } @@ -697,7 +764,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (unlikely(netdev->features & NETIF_F_RXFCS)) skb->csum = csum_add(skb->csum, (__force __wsum)mlx5e_get_fcs(skb)); - rq->stats.csum_complete++; + stats->csum_complete++; return; } @@ -707,15 +774,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; skb->encapsulation = 1; - rq->stats.csum_unnecessary_inner++; + stats->csum_unnecessary_inner++; return; } - rq->stats.csum_unnecessary++; + stats->csum_unnecessary++; return; } csum_none: skb->ip_summed = CHECKSUM_NONE; - rq->stats.csum_none++; + stats->csum_none++; } static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, @@ -724,6 +791,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; + struct mlx5e_rq_stats *stats = rq->stats; struct net_device *netdev = rq->netdev; skb->mac_len = ETH_HLEN; @@ -733,9 +801,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, /* Subtract one since we already counted this as one * "regular" packet in mlx5e_complete_rx_cqe() */ - rq->stats.packets += lro_num_seg - 1; - rq->stats.lro_packets++; - rq->stats.lro_bytes += cqe_bcnt; + stats->packets += lro_num_seg - 1; + stats->lro_packets++; + stats->lro_bytes += cqe_bcnt; } if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) @@ -750,7 +818,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (cqe_has_vlan(cqe)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); - rq->stats.removed_vlan_packets++; + stats->removed_vlan_packets++; } skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; @@ -764,8 +832,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - rq->stats.packets++; - rq->stats.bytes += cqe_bcnt; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); } @@ -773,7 +843,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *wqe; - u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */ + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -786,7 +856,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, { struct mlx5e_xdpsq *sq = &rq->xdpsq; struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; @@ -797,10 +867,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, dma_addr_t dma_addr = di->addr + data_offset; unsigned int dma_len = xdp->data_end - xdp->data; + struct mlx5e_rq_stats *stats = rq->stats; + prefetchw(wqe); if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { - rq->stats.xdp_drop++; + stats->xdp_drop++; return false; } @@ -810,7 +882,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, mlx5e_xmit_xdp_doorbell(sq); sq->db.doorbell = false; } - rq->stats.xdp_tx_full++; + stats->xdp_tx_full++; return false; } @@ -844,7 +916,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.doorbell = true; - rq->stats.xdp_tx++; + stats->xdp_tx++; return true; } @@ -891,7 +963,7 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, case XDP_ABORTED: trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: - rq->stats.xdp_drop++; + rq->stats->xdp_drop++; return true; } } @@ -904,7 +976,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, struct sk_buff *skb = build_skb(va, frag_size); if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return NULL; } @@ -914,11 +986,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, return skb; } -static inline -struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +struct sk_buff * +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { - struct mlx5e_dma_info *di = &wi->di; + struct mlx5e_dma_info *di = wi->di; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb; void *va, *data; @@ -933,10 +1005,9 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, frag_size, DMA_FROM_DEVICE); prefetchw(va); /* xdp_frame data area */ prefetch(data); - wi->offset += frag_size; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; + rq->stats->wqe_err++; return NULL; } @@ -956,41 +1027,87 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return skb; } +struct sk_buff * +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +{ + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_wqe_frag_info *head_wi = wi; + u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt); + u16 frag_headlen = headlen; + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats->wqe_err++; + return NULL; + } + + /* XDP is not supported in this configuration, as incoming packets + * might spread among multiple pages. + */ + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); + + while (byte_cnt) { + u16 frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); + + mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + frag_headlen = 0; + frag_info++; + wi++; + } + + /* copy header */ + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, + 0, headlen); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + return skb; +} + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - wi->di.page = NULL; - /* do not return page to cache, it will be returned on XDP_TX completion */ - goto wq_ll_pop; + /* do not return page to cache, + * it will be returned on XDP_TX completion. + */ + goto wq_cyc_pop; } - /* probably an XDP_DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto free_wqe; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); +free_wqe: + mlx5e_free_rx_wqe(rq, wi); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #ifdef CONFIG_MLX5_ESWITCH @@ -1000,29 +1117,26 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; - __be16 wqe_counter_be; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { + /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - wi->di.page = NULL; - /* do not return page to cache, it will be returned on XDP_TX completion */ - goto wq_ll_pop; + /* do not return page to cache, + * it will be returned on XDP_TX completion. + */ + goto wq_cyc_pop; } - /* probably an XDP_DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto free_wqe; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); @@ -1032,10 +1146,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); +free_wqe: + mlx5e_free_rx_wqe(rq, wi); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #endif @@ -1043,7 +1157,7 @@ struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { - u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u32 frag_offset = head_offset + headlen; u32 byte_cnt = cqe_bcnt - headlen; @@ -1051,9 +1165,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct sk_buff *skb; skb = napi_alloc_skb(rq->cq.napi, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); + ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return NULL; } @@ -1067,9 +1181,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w while (byte_cnt) { u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + unsigned int truesize = + ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset, - pg_consumed_bytes); + mlx5e_add_skb_frag(rq, skb, di, frag_offset, + pg_consumed_bytes, truesize); byte_cnt -= pg_consumed_bytes; frag_offset = 0; di++; @@ -1132,19 +1248,20 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; - struct mlx5e_rx_wqe *wqe; + struct mlx5e_rx_wqe_ll *wqe; + struct mlx5_wq_ll *wq; struct sk_buff *skb; u16 cqe_bcnt; wi->consumed_strides += cstrides; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; + rq->stats->wqe_err++; goto mpwrq_cqe_out; } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats.mpwqe_filler++; + rq->stats->mpwqe_filler++; goto mpwrq_cqe_out; } @@ -1162,9 +1279,10 @@ mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + wq = &rq->mpwqe.wq; + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5e_free_rx_mpwqe(rq, wi); - mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) @@ -1256,7 +1374,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) last_wqe = (sqcc == wqe_counter); - ci = sqcc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); di = &sq->db.di[ci]; sqcc++; @@ -1281,7 +1399,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) u16 ci; while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); di = &sq->db.di[ci]; sq->cc++; @@ -1299,6 +1417,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { + struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; struct net_device *netdev; struct mlx5e_priv *priv; @@ -1360,27 +1479,24 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->dev = netdev; - rq->stats.csum_complete++; - rq->stats.packets++; - rq->stats.bytes += cqe_bcnt; + stats->csum_complete++; + stats->packets++; + stats->bytes += cqe_bcnt; } void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) goto wq_free_wqe; @@ -1392,9 +1508,8 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); wq_free_wqe: - mlx5e_free_rx_wqe_reuse(rq, wi); - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + mlx5e_free_rx_wqe(rq, wi); + mlx5_wq_cyc_pop(wq); } #endif /* CONFIG_MLX5_CORE_IPOIB */ @@ -1403,38 +1518,34 @@ wq_free_wqe: void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (unlikely(!skb)) { /* a DROP, save the page-reuse checks */ mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); if (unlikely(!skb)) { mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + mlx5e_free_rx_wqe(rq, wi); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #endif /* CONFIG_MLX5_EN_IPSEC */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 027f54ac1ca2..4d316cc9b008 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -100,7 +100,7 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) #ifdef CONFIG_INET /* loopback test */ -#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN) +#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e17919c0af08..1646859974ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -64,11 +64,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, @@ -81,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) @@ -109,20 +108,19 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } -static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) +void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) { struct mlx5e_sw_stats temp, *s = &temp; - struct mlx5e_rq_stats *rq_stats; - struct mlx5e_sq_stats *sq_stats; - struct mlx5e_ch_stats *ch_stats; - int i, j; + int i; memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->channels.num; i++) { - struct mlx5e_channel *c = priv->channels.c[i]; - rq_stats = &c->rq.stats; - ch_stats = &c->stats; + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + struct mlx5e_channel_stats *channel_stats = + &priv->channel_stats[i]; + struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; + struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; + int j; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; @@ -149,8 +147,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_waive += rq_stats->cache_waive; s->ch_eq_rearm += ch_stats->eq_rearm; - for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -175,9 +173,6 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) } } - s->link_down_events_phy = MLX5_GET(ppcnt_reg, - priv->stats.pport.phy_counters, - counter_set.phys_layer_cntrs.link_down_events); memcpy(&priv->stats.sw, s, sizeof(*s)); } @@ -580,12 +575,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = { { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, }; -#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) { + /* "1" for link_down_events special counter */ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? - NUM_PPORT_PHY_COUNTERS : 0; + NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1; } static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -593,10 +589,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, { int i; - if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) - for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_phy_statistical_stats_desc[i].format); + strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + return idx; + + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); return idx; } @@ -604,11 +604,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { int i; - if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) - for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, - pport_phy_statistical_stats_desc, i); + /* link_down_events_phy has special handling since it is not stored in __be64 format */ + data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events); + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + return idx; + + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); return idx; } @@ -1131,11 +1137,11 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; static const struct counter_desc ch_stats_desc[] = { @@ -1148,30 +1154,30 @@ static const struct counter_desc ch_stats_desc[] = { static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) { - return (NUM_RQ_STATS * priv->channels.num) + - (NUM_CH_STATS * priv->channels.num) + - (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); + int max_nch = priv->profile->max_nch(priv->mdev); + + return (NUM_RQ_STATS * max_nch) + + (NUM_CH_STATS * max_nch) + + (NUM_SQ_STATS * max_nch * priv->max_opened_tc); } static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) { + int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return idx; - - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, ch_stats_desc[j].format, i); - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < priv->channels.num; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, @@ -1183,29 +1189,26 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { - struct mlx5e_channels *channels = &priv->channels; + int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return idx; - - for (i = 0; i < channels->num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, ch_stats_desc, j); - for (i = 0; i < channels->num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, rq_stats_desc, j); - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < channels->num; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq_stats_desc, j); return idx; @@ -1217,7 +1220,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .get_num_stats = mlx5e_grp_sw_get_num_stats, .fill_strings = mlx5e_grp_sw_fill_strings, .fill_stats = mlx5e_grp_sw_fill_stats, - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, .update_stats = mlx5e_grp_sw_update_stats, }, { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a36e6a87066b..643153bb3607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -75,11 +75,11 @@ struct mlx5e_sw_stats { u64 tx_csum_partial; u64 tx_csum_partial_inner; u64 tx_queue_stopped; - u64 tx_queue_wake; u64 tx_queue_dropped; u64 tx_xmit_more; - u64 tx_cqe_err; u64 tx_recover; + u64 tx_queue_wake; + u64 tx_cqe_err; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_buff_alloc_err; @@ -97,9 +97,6 @@ struct mlx5e_sw_stats { u64 tx_tls_ooo; u64 tx_tls_resync_bytes; #endif - - /* Special handling counters */ - u64 link_down_events_phy; }; struct mlx5e_qcounter_stats { @@ -206,10 +203,11 @@ struct mlx5e_sq_stats { /* less likely accessed in data path */ u64 csum_none; u64 stopped; - u64 wake; u64 dropped; - u64 cqe_err; u64 recover; + /* dirtied @completion */ + u64 wake ____cacheline_aligned_in_smp; + u64 cqe_err; }; struct mlx5e_ch_stats { @@ -242,4 +240,6 @@ struct mlx5e_stats_grp { extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const int mlx5e_num_stats_grps; +void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv); + #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a9c96fe8e4fe..0edf4751a8ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -75,12 +75,14 @@ enum { MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), }; +#define MLX5E_TC_MAX_SPLITS 1 + struct mlx5e_tc_flow { struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; u8 flags; - struct mlx5_flow_handle *rule; + struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; struct list_head encap; /* flows sharing the same encap ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head hairpin; /* flows sharing the same hairpin */ @@ -794,8 +796,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(flow->rule); - mlx5_del_flow_rules(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); + mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) { @@ -844,8 +846,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; - attr->out_rep = rpriv->rep; - attr->out_mdev = out_priv->mdev; + attr->out_rep[attr->out_count] = rpriv->rep; + attr->out_mdev[attr->out_count++] = out_priv->mdev; } err = mlx5_eswitch_add_vlan_action(esw, attr); @@ -870,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); if (IS_ERR(rule)) goto err_add_rule; + + if (attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr); + if (IS_ERR(flow->rule[1])) + goto err_fwd_rule; + } } return rule; +err_fwd_rule: + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + rule = flow->rule[1]; err_add_rule: if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); @@ -893,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr); + if (attr->mirror_count) + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } mlx5_eswitch_del_vlan_action(esw, attr); @@ -929,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { esw_attr = flow->esw_attr; esw_attr->encap_id = e->encap_id; - flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); + flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); continue; } + + if (esw_attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr); + if (IS_ERR(flow->rule[1])) { + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr); + err = PTR_ERR(flow->rule[1]); + mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n", + err); + continue; + } + } + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; } } @@ -948,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); + if (attr->mirror_count) + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } } @@ -984,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) continue; list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - counter = mlx5_flow_rule_counter(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { neigh_used = true; @@ -2537,6 +2566,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + attr->mirror_count = attr->out_count; continue; } @@ -2548,12 +2578,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EOPNOTSUPP; } - if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *out_dev; + if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) { struct mlx5e_priv *out_priv; + struct net_device *out_dev; out_dev = tcf_mirred_dev(a); + if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + pr_err("can't support more than %d output ports, can't offload forwarding\n", + attr->out_count); + return -EOPNOTSUPP; + } + if (switchdev_port_same_parent_id(priv->netdev, out_dev) || is_merged_eswitch_dev(priv, out_dev)) { @@ -2561,8 +2597,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, MLX5_FLOW_CONTEXT_ACTION_COUNT; out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; - attr->out_rep = rpriv->rep; - attr->out_mdev = out_priv->mdev; + attr->out_rep[attr->out_count] = rpriv->rep; + attr->out_mdev[attr->out_count++] = out_priv->mdev; } else if (encap) { parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; @@ -2585,6 +2621,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, encap = true; else return -EOPNOTSUPP; + attr->mirror_count = attr->out_count; continue; } @@ -2606,6 +2643,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } + attr->mirror_count = attr->out_count; continue; } @@ -2621,6 +2659,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; + if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); + return -EOPNOTSUPP; + } + return 0; } @@ -2700,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); } else { err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); } - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); if (err != -EAGAIN) goto err_free; } @@ -2782,7 +2825,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) return 0; - counter = mlx5_flow_rule_counter(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); if (!counter) return 0; @@ -2833,3 +2876,10 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) { rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); } + +int mlx5e_tc_num_filters(struct mlx5e_priv *priv) +{ + struct rhashtable *tc_ht = get_tc_ht(priv); + + return atomic_read(&tc_ht->nelems); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 59e52b845beb..49436bf3b80a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -68,10 +68,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) -{ - return atomic_read(&priv->fs.tc.ht.nelems); -} +int mlx5e_tc_num_filters(struct mlx5e_priv *priv); #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2d3f17da5f5c..f29deb44bf3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -188,28 +188,16 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, return min_t(u16, hlen, skb_headlen(skb)); } -static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, - unsigned int *skb_len, - unsigned int len) -{ - *skb_len -= len; - *skb_data += len; -} - -static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, - unsigned char **skb_data, - unsigned int *skb_len) +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, *skb_data, cpy1_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); + memcpy(vhdr, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); + memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); } static inline void @@ -220,34 +208,31 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct if (skb->encapsulation) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | MLX5_ETH_WQE_L4_INNER_CSUM; - sq->stats.csum_partial_inner++; + sq->stats->csum_partial_inner++; } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; - sq->stats.csum_partial++; + sq->stats->csum_partial++; } } else - sq->stats.csum_none++; + sq->stats->csum_none++; } static inline u16 -mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes) +mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) { + struct mlx5e_sq_stats *stats = sq->stats; u16 ihs; - eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); - if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - sq->stats.tso_inner_packets++; - sq->stats.tso_inner_bytes += skb->len - ihs; + stats->tso_inner_packets++; + stats->tso_inner_bytes += skb->len - ihs; } else { ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - sq->stats.tso_packets++; - sq->stats.tso_bytes += skb->len - ihs; + stats->tso_packets++; + stats->tso_bytes += skb->len - ihs; } - *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; return ihs; } @@ -300,17 +285,34 @@ dma_unmap_wqe_err: return -ENOMEM; } +static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, + struct mlx5_wq_cyc *wq, + u16 pi, u16 frag_pi) +{ + struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; + u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; + + edge_wi = wi + nnops; + + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->skb = NULL; + wi->num_wqebbs = 1; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + sq->stats->nop += nnops; +} + static inline void mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, - u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi; wi->num_bytes = num_bytes; wi->num_dma = num_dma; - wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + wi->num_wqebbs = num_wqebbs; wi->skb = skb; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); @@ -324,84 +326,108 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->pc += wi->num_wqebbs; if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); - sq->stats.stopped++; + sq->stats->stopped++; } if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); - - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.wqe_info[pi].skb = NULL; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); - sq->stats.nop++; - } } +#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) + netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe *wqe, u16 pi) { - struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_eth_seg *eseg; + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_tx_wqe_info *wi; - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - unsigned int num_bytes; + struct mlx5e_sq_stats *stats = sq->stats; + u16 ds_cnt, ds_cnt_inl = 0; + u16 headlen, ihs, frag_pi; + u8 num_wqebbs, opcode; + u32 num_bytes; int num_dma; - u16 headlen; - u16 ds_cnt; - u16 ihs; - - mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + __be16 mss; + /* Calc ihs and ds cnt, no writes to wqe yet */ + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { - opcode = MLX5_OPCODE_LSO; - ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); - sq->stats.packets += skb_shinfo(skb)->gso_segs; + opcode = MLX5_OPCODE_LSO; + mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + ihs = mlx5e_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + stats->packets += skb_shinfo(skb)->gso_segs; } else { - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + opcode = MLX5_OPCODE_SEND; + mss = 0; + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - sq->stats.packets++; + stats->packets++; } - sq->stats.bytes += num_bytes; - sq->stats.xmit_more += skb->xmit_more; - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + stats->bytes += num_bytes; + stats->xmit_more += skb->xmit_more; + + headlen = skb->len - ihs - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + if (ihs) { + ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN; + + ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); + ds_cnt += ds_cnt_inl; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); + mlx5e_sq_fetch_wqe(sq, &wqe, &pi); + } + + /* fill wqe */ + wi = &sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + eseg = &wqe->eth; + dseg = wqe->data; + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + eseg->mss = mss; + + if (ihs) { + eseg->inline_hdr.sz = cpu_to_be16(ihs); if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); - ihs += VLAN_HLEN; - sq->stats.added_vlan_packets++; + ihs -= VLAN_HLEN; + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); + stats->added_vlan_packets++; } else { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); } - eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + dseg += ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); - sq->stats.added_vlan_packets++; + stats->added_vlan_packets++; } - headlen = skb_len - skb->data_len; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, - (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, - num_bytes, num_dma, wi, cseg); + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi, cseg); return NETDEV_TX_OK; err_drop: - sq->stats.dropped++; + stats->dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -485,7 +511,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats.cqe_err++; + sq->stats->cqe_err++; } do { @@ -496,7 +522,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) last_wqe = (sqcc == wqe_counter); - ci = sqcc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; @@ -545,7 +571,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats.wake++; + sq->stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); @@ -559,7 +585,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) int i; while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; @@ -581,18 +607,6 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } #ifdef CONFIG_MLX5_CORE_IPOIB - -struct mlx5_wqe_eth_pad { - u8 rsvd0[16]; -}; - -struct mlx5i_tx_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_datagram_seg datagram; - struct mlx5_wqe_eth_pad pad; - struct mlx5_wqe_eth_seg eth; -}; - static inline void mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, struct mlx5_wqe_datagram_seg *dseg) @@ -605,67 +619,92 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey) { - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; - struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - unsigned int num_bytes; - int num_dma; - u16 headlen; - u16 ds_cnt; - u16 ihs; - - memset(wqe, 0, sizeof(*wqe)); + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5i_tx_wqe *wqe; - mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); + struct mlx5_wqe_datagram_seg *datagram; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_eth_seg *eseg; + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_tx_wqe_info *wi; - mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + struct mlx5e_sq_stats *stats = sq->stats; + u16 headlen, ihs, pi, frag_pi; + u16 ds_cnt, ds_cnt_inl = 0; + u8 num_wqebbs, opcode; + u32 num_bytes; + int num_dma; + __be16 mss; + /* Calc ihs and ds cnt, no writes to wqe yet */ + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { - opcode = MLX5_OPCODE_LSO; - ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); - sq->stats.packets += skb_shinfo(skb)->gso_segs; + opcode = MLX5_OPCODE_LSO; + mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + ihs = mlx5e_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + stats->packets += skb_shinfo(skb)->gso_segs; } else { - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + opcode = MLX5_OPCODE_SEND; + mss = 0; + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - sq->stats.packets++; + stats->packets++; } - sq->stats.bytes += num_bytes; - sq->stats.xmit_more += skb->xmit_more; + stats->bytes += num_bytes; + stats->xmit_more += skb->xmit_more; + + headlen = skb->len - ihs - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + + if (ihs) { + ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); + ds_cnt += ds_cnt_inl; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); + } + + mlx5i_sq_fetch_wqe(sq, &wqe, &pi); + + /* fill wqe */ + wi = &sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + datagram = &wqe->datagram; + eseg = &wqe->eth; + dseg = wqe->data; + + mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + eseg->mss = mss; - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (ihs) { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + dseg += ds_cnt_inl; } - headlen = skb_len - skb->data_len; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, - (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, - num_bytes, num_dma, wi, cseg); + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi, cseg); return NETDEV_TX_OK; err_drop: - sq->stats.dropped++; + stats->dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 5d6f9ce2bf80..1b17f682693b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) { + struct mlx5e_sq_stats *stats = sq->stats; struct net_dim_sample dim_sample; if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) return; - net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes, + net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); net_dim(&sq->dim, dim_sample); } static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) { + struct mlx5e_rq_stats *stats = rq->stats; struct net_dim_sample dim_sample; if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) return; - net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes, + net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); net_dim(&rq->dim, dim_sample); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1814f803bd2c..406c23862f5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -144,6 +144,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_GPIO_EVENT"; case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; case MLX5_EVENT_TYPE_REMOTE_CONFIG: return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; case MLX5_EVENT_TYPE_DB_BF_CONGESTION: @@ -162,6 +164,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; case MLX5_EVENT_TYPE_FPGA_ERROR: return "MLX5_EVENT_TYPE_FPGA_ERROR"; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; case MLX5_EVENT_TYPE_GENERAL_EVENT: return "MLX5_EVENT_TYPE_GENERAL_EVENT"; default: @@ -396,6 +400,20 @@ static void general_event_handler(struct mlx5_core_dev *dev, } } +static void mlx5_temp_warning_event(struct mlx5_core_dev *dev, + struct mlx5_eqe *eqe) +{ + u64 value_lsb; + u64 value_msb; + + value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); + + mlx5_core_warn(dev, + "High temperature on sensors with bit set %llx %llx", + value_msb, value_lsb); +} + /* caller must eventually call mlx5_cq_put on the returned cq */ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) { @@ -547,9 +565,14 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) break; case MLX5_EVENT_TYPE_FPGA_ERROR: + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); break; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + mlx5_temp_warning_event(dev, eqe); + break; + case MLX5_EVENT_TYPE_GENERAL_EVENT: general_event_handler(dev, eqe); break; @@ -822,10 +845,13 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); if (MLX5_CAP_GEN(dev, fpga)) - async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR); + async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | + (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); if (MLX5_CAP_GEN_MAX(dev, dct)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED); + if (MLX5_CAP_GEN(dev, temp_warn_event)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 09f0e11c6ffc..6cab1dd66d1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, spec->match_criteria_enable = match_header; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = - mlx5_add_flow_rules(esw->fdb_table.fdb, spec, + mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, @@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create FDB Table err %d\n", err); goto out; } - esw->fdb_table.fdb = fdb; + esw->fdb_table.legacy.fdb = fdb; /* Addresses group : Full match unicast/multicast addresses */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -343,9 +343,9 @@ out: mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); esw->fdb_table.legacy.addr_grp = NULL; } - if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { - mlx5_destroy_flow_table(esw->fdb_table.fdb); - esw->fdb_table.fdb = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) { + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; } } @@ -355,15 +355,15 @@ out: static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return; esw_debug(esw->dev, "Destroy FDB Table\n"); mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); - mlx5_destroy_flow_table(esw->fdb_table.fdb); - esw->fdb_table.fdb = NULL; + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL; @@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ - if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) + if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", @@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return 0; esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); @@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return 0; esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index f47a14e31b7d..b174da2884c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -55,6 +55,9 @@ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) +#define mlx5_esw_has_fwd_fdb(dev) \ + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) + struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -117,16 +120,18 @@ struct mlx5_vport { }; struct mlx5_eswitch_fdb { - void *fdb; union { struct legacy_fdb { + struct mlx5_flow_table *fdb; struct mlx5_flow_group *addr_grp; struct mlx5_flow_group *allmulti_grp; struct mlx5_flow_group *promisc_grp; } legacy; struct offloads_fdb { - struct mlx5_flow_table *fdb; + struct mlx5_flow_table *fast_fdb; + struct mlx5_flow_table *fwd_fdb; + struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; struct mlx5_flow_handle *miss_rule_uni; @@ -214,6 +219,10 @@ struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr); +struct mlx5_flow_handle * +mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr); void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, @@ -234,12 +243,18 @@ enum mlx5_flow_match_level { MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, }; +/* current maximum for flow based vport multicasting */ +#define MLX5_MAX_FLOW_FWD_VPORTS 2 + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; - struct mlx5_eswitch_rep *out_rep; - struct mlx5_core_dev *out_mdev; + struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5_core_dev *in_mdev; + int mirror_count; + int out_count; + int action; __be16 vlan_proto; u16 vlan_vid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b9ea464bcfa9..cecd201f0b73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -48,16 +48,22 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr) { - struct mlx5_flow_destination dest[2] = {}; + struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_table *ft = NULL; struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; + int j, i = 0; void *misc; - int i = 0; if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); + if (attr->mirror_count) + ft = esw->fdb_table.offloads.fwd_fdb; + else + ft = esw->fdb_table.offloads.fast_fdb; + flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) @@ -70,14 +76,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = attr->out_rep->vport; - if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + for (j = attr->mirror_count; j < attr->out_count; j++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[j]->vport; dest[i].vport.vhca_id = - MLX5_CAP_GEN(attr->out_mdev, vhca_id); - dest[i].vport.vhca_id_valid = 1; + MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); + dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + i++; } - i++; } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(esw->dev, true); @@ -119,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) flow_act.encap_id = attr->encap_id; - rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, - spec, &flow_act, dest, i); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i); if (IS_ERR(rule)) goto err_add_rule; else @@ -134,6 +139,57 @@ err_counter_alloc: return rule; } +struct mlx5_flow_handle * +mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle *rule; + void *misc; + int i; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + for (i = 0; i < attr->mirror_count; i++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[i]->vport; + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); + dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + } + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = esw->fdb_table.offloads.fwd_fdb, + i++; + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(attr->in_mdev, vhca_id)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + if (attr->match_level == MLX5_MATCH_NONE) + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + else + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; + + rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i); + + if (!IS_ERR(rule)) + esw->offloads.num_flows++; + + return rule; +} + void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, @@ -173,7 +229,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; in_rep = attr->in_rep; - out_rep = attr->out_rep; + out_rep = attr->out_rep[0]; if (push) vport = in_rep; @@ -194,7 +250,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, goto out_notsupp; in_rep = attr->in_rep; - out_rep = attr->out_rep; + out_rep = attr->out_rep[0]; if (push && in_rep->vport == FDB_UPLINK_VPORT) goto out_notsupp; @@ -245,7 +301,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep->vport == FDB_UPLINK_VPORT) { + if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { vport->vlan_refcount++; attr->vlan_handled = true; } @@ -305,7 +361,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep->vport == FDB_UPLINK_VPORT) + if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) vport->vlan_refcount--; return 0; @@ -363,7 +419,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn dest.vport.num = vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); @@ -407,7 +463,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dest.vport.num = 0; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -422,7 +478,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16); dmac_v[0] = 0x01; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -454,7 +510,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); err = -EOPNOTSUPP; - goto out; + goto out_namespace; } esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", @@ -464,6 +520,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + if (mlx5_esw_has_fwd_fdb(dev)) + esw_size >>= 1; + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; @@ -474,17 +533,37 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); - goto out; + goto out_namespace; } - esw->fdb_table.fdb = fdb; + esw->fdb_table.offloads.fast_fdb = fdb; + + if (!mlx5_esw_has_fwd_fdb(dev)) + goto out_namespace; + + fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, + esw_size, + ESW_OFFLOADS_NUM_GROUPS, 1, + flags); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create fwd table err %d\n", err); + goto out_ft; + } + esw->fdb_table.offloads.fwd_fdb = fdb; -out: + return err; + +out_ft: + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); +out_namespace: return err; } static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) { - mlx5_destroy_flow_table(esw->fdb_table.fdb); + if (mlx5_esw_has_fwd_fdb(esw->dev)) + mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); } #define MAX_PF_SQ 256 @@ -530,7 +609,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); goto slow_fdb_err; } - esw->fdb_table.offloads.fdb = fdb; + esw->fdb_table.offloads.slow_fdb = fdb; /* create send-to-vport group */ memset(flow_group_in, 0, inlen); @@ -586,9 +665,9 @@ miss_rule_err: miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: - mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: - mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw_destroy_offloads_fast_fdb_table(esw); fast_fdb_err: ns_err: kvfree(flow_group_in); @@ -597,7 +676,7 @@ ns_err: static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.offloads.fast_fdb) return; esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); @@ -606,7 +685,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); - mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); esw_destroy_offloads_fast_fdb_table(esw); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h index d05233c9b4f6..eb8b0fe0b4e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h @@ -35,6 +35,13 @@ #include <linux/mlx5/driver.h> +enum mlx5_fpga_device_id { + MLX5_FPGA_DEVICE_UNKNOWN = 0, + MLX5_FPGA_DEVICE_KU040 = 1, + MLX5_FPGA_DEVICE_KU060 = 2, + MLX5_FPGA_DEVICE_KU060_2 = 3, +}; + enum mlx5_fpga_image { MLX5_FPGA_IMAGE_USER = 0, MLX5_FPGA_IMAGE_FACTORY, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index de7fe087d6fe..4138a770ed57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -181,6 +181,7 @@ int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn, if (!conn->qp.active) return -ENOTCONN; + buf->dma_dir = DMA_TO_DEVICE; err = mlx5_fpga_conn_map_buf(conn, buf); if (err) return err; @@ -255,8 +256,6 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); buf = conn->qp.rq.bufs[ix]; conn->qp.rq.bufs[ix] = NULL; - if (!status) - buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); conn->qp.rq.cc++; if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) @@ -274,6 +273,7 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, return; } + buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n", buf->sg[0].size); conn->recv_cb(conn->cb_arg, buf); @@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) } inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages; + sizeof(u64) * conn->cq.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; @@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); - MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); - mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas); + mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen); kvfree(in); @@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) goto out; err_cqwq: - mlx5_cqwq_destroy(&conn->cq.wq_ctrl); + mlx5_wq_destroy(&conn->cq.wq_ctrl); out: return err; } @@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn) tasklet_disable(&conn->cq.tasklet); tasklet_kill(&conn->cq.tasklet); mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); - mlx5_cqwq_destroy(&conn->cq.wq_ctrl); + mlx5_wq_destroy(&conn->cq.wq_ctrl); } static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc) @@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); - mlx5_fill_page_array(&conn->qp.wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); + mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h index 44bd9eccc711..634ae10e287b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h @@ -54,7 +54,7 @@ struct mlx5_fpga_conn { /* CQ */ struct { struct mlx5_cqwq wq; - struct mlx5_frag_wq_ctrl wq_ctrl; + struct mlx5_wq_ctrl wq_ctrl; struct mlx5_core_cq mcq; struct tasklet_struct tasklet; } cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index dc8970346521..436a8136f26f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -50,6 +50,11 @@ static const char *const mlx5_fpga_error_strings[] = { "Temperature Critical", }; +static const char * const mlx5_fpga_qp_error_strings[] = { + "Null Syndrome", + "Retry Counter Expired", + "RNR Expired", +}; static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void) { struct mlx5_fpga_device *fdev = NULL; @@ -75,6 +80,21 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image) } } +static const char *mlx5_fpga_device_name(u32 device) +{ + switch (device) { + case MLX5_FPGA_DEVICE_KU040: + return "ku040"; + case MLX5_FPGA_DEVICE_KU060: + return "ku060"; + case MLX5_FPGA_DEVICE_KU060_2: + return "ku060_2"; + case MLX5_FPGA_DEVICE_UNKNOWN: + default: + return "unknown"; + } +} + static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) { struct mlx5_fpga_query query; @@ -128,8 +148,9 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned long flags; unsigned int max_num_qps; + unsigned long flags; + u32 fpga_device_id; int err; if (!fdev) @@ -143,12 +164,23 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) if (err) goto out; - mlx5_fpga_info(fdev, "device %u; %s image, version %u\n", - MLX5_CAP_FPGA(fdev->mdev, fpga_device), + fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device); + mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n", + mlx5_fpga_device_name(fpga_device_id), + fpga_device_id, mlx5_fpga_image_name(fdev->last_oper_image), - MLX5_CAP_FPGA(fdev->mdev, image_version)); + MLX5_CAP_FPGA(fdev->mdev, image_version), + MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_version)); max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); + if (!max_num_qps) { + mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n"); + err = -ENOTSUPP; + goto out; + } + err = mlx5_core_reserve_gids(mdev, max_num_qps); if (err) goto out; @@ -244,23 +276,38 @@ static const char *mlx5_fpga_syndrome_to_string(u8 syndrome) return "Unknown"; } +static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome) +{ + if (syndrome < ARRAY_SIZE(mlx5_fpga_qp_error_strings)) + return mlx5_fpga_qp_error_strings[syndrome]; + return "Unknown"; +} + void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) { struct mlx5_fpga_device *fdev = mdev->fpga; const char *event_name; bool teardown = false; unsigned long flags; + u32 fpga_qpn; u8 syndrome; - if (event != MLX5_EVENT_TYPE_FPGA_ERROR) { + switch (event) { + case MLX5_EVENT_TYPE_FPGA_ERROR: + syndrome = MLX5_GET(fpga_error_event, data, syndrome); + event_name = mlx5_fpga_syndrome_to_string(syndrome); + break; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome); + event_name = mlx5_fpga_qp_syndrome_to_string(syndrome); + fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); + break; + default: mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", event); return; } - syndrome = MLX5_GET(fpga_error_event, data, syndrome); - event_name = mlx5_fpga_syndrome_to_string(syndrome); - spin_lock_irqsave(&fdev->state_lock, flags); switch (fdev->state) { case MLX5_FPGA_STATUS_SUCCESS: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h index a0573cc2fc9b..656f96be6e20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h @@ -44,8 +44,14 @@ #define SBU_QP_QUEUE_SIZE 8 #define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000) +/** + * enum mlx5_fpga_access_type - Enumerated the different methods possible for + * accessing the device memory address space + */ enum mlx5_fpga_access_type { + /** Use the slow CX-FPGA I2C bus */ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, + /** Use the fastest available method */ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 21048013826c..c9736238604a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -196,8 +196,8 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) MLX5_GET(tls_flow, flow, direction_sx)); } -void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, - u32 swid, gfp_t flags) +static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, + void *flow, u32 swid, gfp_t flags) { struct mlx5_teardown_stream_context *ctx; struct mlx5_fpga_dma_buf *buf; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 806e95523f9e..f9c2c03083eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2495,7 +2495,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_root_ns) return -ENOMEM; - prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1); + prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2); if (IS_ERR(prio)) goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 6d9053bcbe95..08eac92fc26c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -33,6 +33,8 @@ #ifndef __MLX5E_IPOB_H__ #define __MLX5E_IPOB_H__ +#ifdef CONFIG_MLX5_CORE_IPOIB + #include <linux/mlx5/fs.h> #include "en.h" @@ -93,8 +95,32 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void); /* Extract mlx5e_priv from IPoIB netdev */ #define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) +struct mlx5_wqe_eth_pad { + u8 rsvd0[16]; +}; + +struct mlx5i_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_datagram_seg datagram; + struct mlx5_wqe_eth_pad pad; + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; +}; + +static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, + struct mlx5i_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(*wqe, 0, sizeof(**wqe)); +} + netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index ea66448ba365..b97bb72b4db4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -36,7 +36,12 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) { - return (u32)wq->sz_m1 + 1; + return (u32)wq->fbc.sz_m1 + 1; +} + +u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) +{ + return (u32)wq->fbc.frag_sz_m1 + 1; } u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) @@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) { - return (u32)wq->sz_m1 + 1; + return (u32)wq->fbc.sz_m1 + 1; } static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) { - return mlx5_wq_cyc_get_size(wq) << wq->log_stride; + return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride; } static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) @@ -67,17 +72,20 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) { - return mlx5_wq_ll_get_size(wq) << wq->log_stride; + return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride; } int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) { + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; int err; - wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); - wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), + MLX5_GET(wq, wqc, log_wq_sz), + fbc); + wq->sz = wq->fbc.sz_m1 + 1; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -85,14 +93,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->buf = wq_ctrl->buf.frags->buf; + fbc->frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -105,17 +113,35 @@ err_db_free: return err; } +static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, + struct mlx5_wq_qp *qp) +{ + struct mlx5_frag_buf *rqb, *sqb; + + rqb = &qp->rq.fbc.frag_buf; + *rqb = *buf; + rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); + rqb->npages = 1 << get_order(rqb->size); + + sqb = &qp->sq.fbc.frag_buf; + *sqb = *buf; + sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); + sqb->npages = 1 << get_order(sqb->size); + sqb->frags += rqb->npages; /* first part is for the rq */ +} + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { int err; - wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4; - wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1; - - wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB); - wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1; + mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, + MLX5_GET(qpc, qpc, log_rq_size), + &wq->rq.fbc); + mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), + MLX5_GET(qpc, qpc, log_sq_size), + &wq->sq.fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -123,15 +149,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->rq.buf = wq_ctrl->buf.frags->buf; - wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq); + mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); + wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; @@ -147,7 +173,7 @@ err_db_free: int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_frag_wq_ctrl *wq_ctrl) + struct mlx5_wq_ctrl *wq_ctrl) { int err; @@ -160,7 +186,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, } err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), - &wq_ctrl->frag_buf, + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", @@ -168,7 +194,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->fbc.frag_buf = wq_ctrl->frag_buf; + wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -185,12 +211,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) { + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; struct mlx5_wqe_srq_next_seg *next_seg; int err; int i; - wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); - wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), + MLX5_GET(wq, wqc, log_wq_sz), + fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -198,17 +226,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->buf = wq_ctrl->buf.frags->buf; + wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; - for (i = 0; i < wq->sz_m1; i++) { + for (i = 0; i < fbc->sz_m1; i++) { next_seg = mlx5_wq_ll_get_wqe(wq, i); next_seg->next_wqe_index = cpu_to_be16(i + 1); } @@ -227,12 +255,7 @@ err_db_free: void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) { - mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); } -void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl) -{ - mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf); - mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index f3dfa0ca3c5d..0b47126815b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -48,17 +48,12 @@ struct mlx5_wq_ctrl { struct mlx5_db db; }; -struct mlx5_frag_wq_ctrl { - struct mlx5_core_dev *mdev; - struct mlx5_frag_buf frag_buf; - struct mlx5_db db; -}; - struct mlx5_wq_cyc { - void *buf; + struct mlx5_frag_buf_ctrl fbc; __be32 *db; - u16 sz_m1; - u8 log_stride; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; }; struct mlx5_wq_qp { @@ -73,20 +68,19 @@ struct mlx5_cqwq { }; struct mlx5_wq_ll { - void *buf; + struct mlx5_frag_buf_ctrl fbc; __be32 *db; __be16 *tail_next; - u16 sz_m1; u16 head; u16 wqe_ctr; u16 cur_sz; - u8 log_stride; }; int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); +u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, @@ -94,7 +88,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_frag_wq_ctrl *wq_ctrl); + struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, @@ -103,16 +97,67 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); -void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl); + +static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) +{ + return wq->cur_sz == wq->sz; +} + +static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) +{ + *wq->db = cpu_to_be32(wq->wqe_ctr); +} static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) { - return ctr & wq->sz_m1; + return ctr & wq->fbc.sz_m1; +} + +static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.frag_sz_m1; +} + +static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); } static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) { - return wq->buf + (ix << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) @@ -123,9 +168,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) return !equal && !smaller; } +static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) { - return wq->cc & wq->fbc.sz_m1; + return mlx5_cqwq_ctr2ix(wq, wq->cc); } static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) @@ -133,9 +183,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } +static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) { - return wq->cc >> wq->fbc.log_sz; + return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc); } static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) @@ -166,7 +221,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) { - return wq->cur_sz == wq->sz_m1; + return wq->cur_sz == wq->fbc.sz_m1; } static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) @@ -176,7 +231,7 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { - return wq->buf + (ix << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 8da91b023b13..2bc48054b685 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -58,7 +58,7 @@ static inline void mlxsw_cmd_mbox_zero(char *mbox) struct mlxsw_core; int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, - u32 in_mod, bool out_mbox_direct, + u32 in_mod, bool out_mbox_direct, bool reset_ok, char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size); @@ -67,7 +67,7 @@ static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode, size_t in_mbox_size) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, - in_mbox, in_mbox_size, NULL, 0); + false, in_mbox, in_mbox_size, NULL, 0); } static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, @@ -76,7 +76,7 @@ static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, char *out_mbox, size_t out_mbox_size) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, - out_mbox_direct, NULL, 0, + out_mbox_direct, false, NULL, 0, out_mbox, out_mbox_size); } @@ -84,7 +84,7 @@ static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, - NULL, 0, NULL, 0); + false, NULL, 0, NULL, 0); } enum mlxsw_cmd_opcode { @@ -179,6 +179,8 @@ enum mlxsw_cmd_status { MLXSW_CMD_STATUS_BAD_INDEX = 0x0A, /* NVMEM checksum/CRC failed. */ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B, + /* Device is currently running reset */ + MLXSW_CMD_STATUS_RUNNING_RESET = 0x26, /* Bad management packet (silently discarded). */ MLXSW_CMD_STATUS_BAD_PKT = 0x30, }; @@ -208,6 +210,8 @@ static inline const char *mlxsw_cmd_status_str(u8 status) return "BAD_INDEX"; case MLXSW_CMD_STATUS_BAD_NVMEM: return "BAD_NVMEM"; + case MLXSW_CMD_STATUS_RUNNING_RESET: + return "RUNNING_RESET"; case MLXSW_CMD_STATUS_BAD_PKT: return "BAD_PKT"; default: @@ -869,10 +873,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8); */ static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core, + bool reset_ok, char *in_mbox, char *out_mbox) { return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG, - 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE, + 0, 0, false, reset_ok, + in_mbox, MLXSW_CMD_MBOX_SIZE, out_mbox, MLXSW_CMD_MBOX_SIZE); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a38faec45b30..f9c724752a32 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -770,27 +770,35 @@ static void mlxsw_core_driver_put(const char *kind) static int mlxsw_devlink_port_split(struct devlink *devlink, unsigned int port_index, - unsigned int count) + unsigned int count, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= mlxsw_core->max_ports) + if (port_index >= mlxsw_core->max_ports) { + NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); return -EINVAL; + } if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; - return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); + return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, + extack); } static int mlxsw_devlink_port_unsplit(struct devlink *devlink, - unsigned int port_index) + unsigned int port_index, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= mlxsw_core->max_ports) + if (port_index >= mlxsw_core->max_ports) { + NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); return -EINVAL; + } if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; - return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); + return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, + extack); } static int @@ -963,17 +971,16 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, pool_type, p_cur, p_max); } -static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink) +static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus; int err; - if (!mlxsw_bus->reset) + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) return -EOPNOTSUPP; mlxsw_core_bus_device_unregister(mlxsw_core, true); - mlxsw_bus->reset(mlxsw_core->bus_priv); err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, mlxsw_core->bus, mlxsw_core->bus_priv, true, @@ -1480,6 +1487,7 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, { enum mlxsw_emad_op_tlv_status status; int err, n_retry; + bool reset_ok; char *in_mbox, *out_mbox, *tmp; dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", @@ -1501,9 +1509,16 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); mlxsw_emad_pack_reg_tlv(tmp, reg, payload); + /* There is a special treatment needed for MRSR (reset) register. + * The command interface will return error after the command + * is executed, so tell the lower layer to expect it + * and cope accordingly. + */ + reset_ok = reg->id == MLXSW_REG_MRSR_ID; + n_retry = 0; retry: - err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); + err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); if (!err) { err = mlxsw_emad_process_status(out_mbox, &status); if (err) { @@ -1793,7 +1808,7 @@ static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, } int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, - u32 in_mod, bool out_mbox_direct, + u32 in_mod, bool out_mbox_direct, bool reset_ok, char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size) { @@ -1816,7 +1831,15 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, in_mbox, in_mbox_size, out_mbox, out_mbox_size, &status); - if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { + if (!err && out_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); + } + + if (reset_ok && err == -EIO && + status == MLXSW_CMD_STATUS_RUNNING_RESET) { + err = 0; + } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod, status, mlxsw_cmd_status_str(status)); @@ -1826,10 +1849,6 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, in_mod); } - if (!err && out_mbox) { - dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); - mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); - } return err; } EXPORT_SYMBOL(mlxsw_cmd_exec); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 4eac7fbd07d5..552cfa29c2f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -274,8 +274,9 @@ struct mlxsw_driver { int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port, enum devlink_port_type new_type); int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, - unsigned int count); - int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); + unsigned int count, struct netlink_ext_ack *extack); + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port, + struct netlink_ext_ack *extack); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); @@ -337,6 +338,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id) #define MLXSW_BUS_F_TXRX BIT(0) +#define MLXSW_BUS_F_RESET BIT(1) struct mlxsw_bus { const char *kind; @@ -344,7 +346,6 @@ struct mlxsw_bus { const struct mlxsw_config_profile *profile, struct mlxsw_res *res); void (*fini)(void *bus_priv); - void (*reset)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index db794a1a3a7e..fc4557245ff4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1371,6 +1371,51 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, mbox->mapaddr); } +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) +{ + unsigned long end; + char mrsr_pl[MLXSW_REG_MRSR_LEN]; + int err; + + mlxsw_reg_mrsr_pack(mrsr_pl); + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); + if (err) + return err; + if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; + } + + /* We must wait for the HW to become responsive once again. */ + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); + + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + do { + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); + + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) + break; + cond_resched(); + } while (time_before(jiffies, end)); + return 0; +} + +static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); + if (err < 0) + dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); + return err; +} + +static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + pci_free_irq_vectors(mlxsw_pci->pdev); +} + static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, struct mlxsw_res *res) @@ -1398,6 +1443,16 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_out_mbox_alloc; + err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); + if (err) + goto err_sw_reset; + + err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); + if (err < 0) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_alloc_irq; + } + err = mlxsw_cmd_query_fw(mlxsw_core, mbox); if (err) goto err_query_fw; @@ -1481,6 +1536,9 @@ err_fw_area_init: err_doorbell_page_bar: err_iface_rev: err_query_fw: + mlxsw_pci_free_irq_vectors(mlxsw_pci); +err_alloc_irq: +err_sw_reset: mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); err_out_mbox_alloc: mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); @@ -1496,6 +1554,7 @@ static void mlxsw_pci_fini(void *bus_priv) free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); + mlxsw_pci_free_irq_vectors(mlxsw_pci); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } @@ -1677,58 +1736,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, return err; } -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, - const struct pci_device_id *id) -{ - unsigned long end; - - mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); - if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { - msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); - return 0; - } - - /* Reset needs to be written before we read control register, and - * we must wait for the HW to become responsive once again - */ - wmb(); - msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); - - end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); - do { - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); - - if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) - break; - cond_resched(); - } while (time_before(jiffies, end)); - return 0; -} - -static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) -{ - pci_free_irq_vectors(mlxsw_pci->pdev); -} - -static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) -{ - int err; - - err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); - if (err < 0) - dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); - return err; -} - -static void mlxsw_pci_reset(void *bus_priv) -{ - struct mlxsw_pci *mlxsw_pci = bus_priv; - - mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); - mlxsw_pci_alloc_irq_vectors(mlxsw_pci); -} - static const struct mlxsw_bus mlxsw_pci_bus = { .kind = "pci", .init = mlxsw_pci_init, @@ -1736,8 +1743,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, .skb_transmit = mlxsw_pci_skb_transmit, .cmd_exec = mlxsw_pci_cmd_exec, - .features = MLXSW_BUS_F_TXRX, - .reset = mlxsw_pci_reset, + .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1795,18 +1801,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); - err = mlxsw_pci_sw_reset(mlxsw_pci, id); - if (err) { - dev_err(&pdev->dev, "Software reset failed\n"); - goto err_sw_reset; - } - - err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); - if (err < 0) { - dev_err(&pdev->dev, "MSI-X init failed\n"); - goto err_msix_init; - } - mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1823,9 +1817,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: - mlxsw_pci_free_irq_vectors(mlxsw_pci); -err_msix_init: -err_sw_reset: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1843,7 +1834,6 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); - mlxsw_pci_free_irq_vectors(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 3f4d7e22cece..1877d9f8a11a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -7034,6 +7034,30 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, mlxsw_reg_mpar_pa_id_set(payload, pa_id); } +/* MRSR - Management Reset and Shutdown Register + * --------------------------------------------- + * MRSR register is used to reset or shutdown the switch or + * the entire system (when applicable). + */ +#define MLXSW_REG_MRSR_ID 0x9023 +#define MLXSW_REG_MRSR_LEN 0x08 + +MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN); + +/* reg_mrsr_command + * Reset/shutdown command + * 0 - do nothing + * 1 - software reset + * Access: WO + */ +MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4); + +static inline void mlxsw_reg_mrsr_pack(char *payload) +{ + MLXSW_REG_ZERO(mrsr, payload); + mlxsw_reg_mrsr_command_set(payload, 1); +} + /* MLCR - Management LED Control Register * -------------------------------------- * Controls the system LEDs. @@ -7898,6 +7922,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcia), MLXSW_REG(mpat), MLXSW_REG(mpar), + MLXSW_REG(mrsr), MLXSW_REG(mlcr), MLXSW_REG(mpsc), MLXSW_REG(mcqi), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bb252b36994d..968b88af2ef5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3092,7 +3092,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, - unsigned int count) + unsigned int count, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -3104,6 +3105,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); + NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); return -EINVAL; } @@ -3112,11 +3114,13 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (count != 2 && count != 4) { netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); + NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); return -EINVAL; } if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); + NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); return -EINVAL; } @@ -3125,6 +3129,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, base_port = local_port; if (mlxsw_sp->ports[base_port + 1]) { netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); return -EINVAL; } } else { @@ -3132,6 +3137,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (mlxsw_sp->ports[base_port + 1] || mlxsw_sp->ports[base_port + 3]) { netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); return -EINVAL; } } @@ -3153,7 +3159,8 @@ err_port_split_create: return err; } -static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -3165,11 +3172,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); + NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); return -EINVAL; } if (!mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); + netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); + NL_SET_ERR_MSG_MOD(extack, "Port was not split"); return -EINVAL; } @@ -4422,6 +4431,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } + if (is_vlan_dev(upper_dev) && + vlan_dev_vlan_id(upper_dev) == 1) { + NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); + return -EINVAL; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index da3f7f527360..3d187d88cc7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -191,7 +191,9 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, if (br_vlan_get_info(edev, vid, &vinfo)) return NULL; - if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)) + if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) + *p_vid = 0; + else *p_vid = vid; return edev; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 8c9cf8ee9398..e97652c40d13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1144,6 +1144,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -1694,7 +1697,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans); - if (switchdev_trans_ph_commit(trans)) { + if (switchdev_trans_ph_prepare(trans)) { /* The event is emitted before the changes are actually * applied to the bridge. Therefore schedule the respin * call for later, so that the respin logic sees the @@ -1741,6 +1744,9 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (WARN_ON(!bridge_port)) return -EINVAL; @@ -1850,7 +1856,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, break; } - mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); return err; } diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 7ed08486ae23..c805dcbebd02 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev) for (i = 0; i < SONIC_NUM_RRS; i++) { dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { while(i > 0) { /* free any that were mapped successfully */ i--; dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index e40f6f06417b..b157ccd8c80f 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -35,24 +35,299 @@ #include <linux/kernel.h> #include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nffw.h" #include "../nfp_app.h" +#include "../nfp_abi.h" #include "../nfp_main.h" #include "../nfp_net.h" #include "main.h" +#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u" +#define NFP_QLVL_STRIDE 16 +#define NFP_QLVL_BLOG_BYTES 0 +#define NFP_QLVL_BLOG_PKTS 4 +#define NFP_QLVL_THRS 8 + +#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats" +#define NFP_QMSTAT_STRIDE 32 +#define NFP_QMSTAT_NON_STO 0 +#define NFP_QMSTAT_STO 8 +#define NFP_QMSTAT_DROP 16 +#define NFP_QMSTAT_ECN 24 + +static unsigned long long +nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) +{ + return alink->abm->q_lvls->addr + + (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; +} + +static int +nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, + unsigned int stride, unsigned int offset, unsigned int i, + bool is_u64, u64 *res) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + u32 val32, mur; + u64 val, addr; + int err; + + mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain); + + addr = sym->addr + (alink->queue_base + i) * stride + offset; + if (is_u64) + err = nfp_cpp_readq(cpp, mur, addr, &val); + else + err = nfp_cpp_readl(cpp, mur, addr, &val32); + if (err) { + nfp_err(cpp, + "RED offload reading stat failed on vNIC %d queue %d\n", + alink->id, i); + return err; + } + + *res = is_u64 ? val : val32; + return 0; +} + +static int +nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, + unsigned int stride, unsigned int offset, bool is_u64, + u64 *res) +{ + u64 val, sum = 0; + unsigned int i; + int err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i, + is_u64, &val); + if (err) + return err; + sum += val; + } + + *res = sum; + return 0; +} + +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + u32 muw; + int err; + + muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target, + alink->abm->q_lvls->domain); + + err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val); + if (err) { + nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", + alink->id, i); + return err; + } + + return 0; +} + +int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val) +{ + int i, err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + err = nfp_abm_ctrl_set_q_lvl(alink, i, val); + if (err) + return err; + } + + return 0; +} + +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i) +{ + u64 val; + + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, + NFP_QMSTAT_NON_STO, i, true, &val)) + return 0; + return val; +} + +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i) +{ + u64 val; + + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, + NFP_QMSTAT_STO, i, true, &val)) + return 0; + return val; +} + +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_stats *stats) +{ + int err; + + stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); + stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, + i, false, &stats->backlog_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, + i, false, &stats->backlog_pkts); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + i, true, &stats->drops); + if (err) + return err; + + return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + i, true, &stats->overlimits); +} + +int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, + struct nfp_alink_stats *stats) +{ + u64 pkts = 0, bytes = 0; + int i, err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); + bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + } + stats->tx_pkts = pkts; + stats->tx_bytes = bytes; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, + false, &stats->backlog_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, + false, &stats->backlog_pkts); + if (err) + return err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + true, &stats->drops); + if (err) + return err; + + return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + true, &stats->overlimits); +} + +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_xstats *xstats) +{ + int err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + i, true, &xstats->pdrop); + if (err) + return err; + + return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + i, true, &xstats->ecn_marked); +} + +int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, + struct nfp_alink_xstats *xstats) +{ + int err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + true, &xstats->pdrop); + if (err) + return err; + + return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + true, &xstats->ecn_marked); +} + +int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm) +{ + return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE, + NULL, 0, NULL, 0); +} + +int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm) +{ + return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_DISABLE, + NULL, 0, NULL, 0); +} + void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) { alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ); alink->queue_base /= alink->vnic->stride_rx; } +static const struct nfp_rtsym * +nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) +{ + const struct nfp_rtsym *sym; + + sym = nfp_rtsym_lookup(pf->rtbl, name); + if (!sym) { + nfp_err(pf->cpp, "Symbol '%s' not found\n", name); + return ERR_PTR(-ENOENT); + } + if (sym->size != size) { + nfp_err(pf->cpp, + "Symbol '%s' wrong size: expected %u got %llu\n", + name, size, sym->size); + return ERR_PTR(-EINVAL); + } + + return sym; +} + +static const struct nfp_rtsym * +nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name, + unsigned int size) +{ + return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS); +} + int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) { struct nfp_pf *pf = abm->app->pf; + const struct nfp_rtsym *sym; unsigned int pf_id; + char pf_symbol[64]; pf_id = nfp_cppcore_pcie_unit(pf->cpp); abm->pf_id = pf_id; + snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id); + sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->q_lvls = sym; + + snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id); + sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->qm_stats = sym; + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 5a12bb20bced..1561c2724c26 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -38,6 +38,9 @@ #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <linux/slab.h> +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> +#include <net/red.h> #include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" @@ -55,6 +58,290 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) FIELD_PREP(NFP_ABM_PORTID_ID, id); } +static int +__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs, u32 init_val) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + int ret; + + ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); + memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); + + alink->parent = handle; + alink->num_qdiscs = qs; + port->tc_offload_cnt = qs; + + return ret; +} + +static void +nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs) +{ + __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); +} + +static int +nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + unsigned int i = TC_H_MIN(opt->parent) - 1; + + if (opt->parent == TC_H_ROOT) + i = 0; + else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) + i = TC_H_MIN(opt->parent) - 1; + else + return -EOPNOTSUPP; + + if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) + return -EOPNOTSUPP; + + return i; +} + +static void +nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle) +{ + unsigned int i; + + for (i = 0; i < alink->num_qdiscs; i++) + if (handle == alink->qdiscs[i].handle) + break; + if (i == alink->num_qdiscs) + return; + + if (alink->parent == TC_H_ROOT) { + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + } else { + nfp_abm_ctrl_set_q_lvl(alink, i, ~0); + memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); + } +} + +static int +nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + bool existing; + int i, err; + + i = nfp_abm_red_find(alink, opt); + existing = i >= 0; + + if (opt->set.min != opt->set.max || !opt->set.is_ecn) { + nfp_warn(alink->abm->app->cpp, + "RED offload failed - unsupported parameters\n"); + err = -EINVAL; + goto err_destroy; + } + + if (existing) { + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); + else + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + if (err) + goto err_destroy; + return 0; + } + + if (opt->parent == TC_H_ROOT) { + i = 0; + err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, + opt->set.min); + } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { + i = TC_H_MIN(opt->parent) - 1; + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + } else { + return -EINVAL; + } + /* Set the handle to try full clean up, in case IO failed */ + alink->qdiscs[i].handle = opt->handle; + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, + &alink->qdiscs[i].stats); + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, + &alink->qdiscs[i].xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, + &alink->qdiscs[i].xstats); + if (err) + goto err_destroy; + + alink->qdiscs[i].stats.backlog_pkts = 0; + alink->qdiscs[i].stats.backlog_bytes = 0; + + return 0; +err_destroy: + /* If the qdisc keeps on living, but we can't offload undo changes */ + if (existing) { + opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; + opt->set.qstats->backlog -= + alink->qdiscs[i].stats.backlog_bytes; + } + nfp_abm_red_destroy(netdev, alink, opt->handle); + + return err; +} + +static void +nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, + struct tc_qopt_offload_stats *stats) +{ + _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, + new->tx_pkts - old->tx_pkts); + stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; + stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; + stats->qstats->overlimits += new->overlimits - old->overlimits; + stats->qstats->drops += new->drops - old->drops; +} + +static int +nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_stats *prev_stats; + struct nfp_alink_stats stats; + int i, err; + + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_stats = &alink->qdiscs[i].stats; + + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, prev_stats, &opt->stats); + + *prev_stats = stats; + + return 0; +} + +static int +nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_xstats *prev_xstats; + struct nfp_alink_xstats xstats; + int i, err; + + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_xstats = &alink->qdiscs[i].xstats; + + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, &xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); + if (err) + return err; + + opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; + opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; + + *prev_xstats = xstats; + + return 0; +} + +static int +nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + switch (opt->command) { + case TC_RED_REPLACE: + return nfp_abm_red_replace(netdev, alink, opt); + case TC_RED_DESTROY: + nfp_abm_red_destroy(netdev, alink, opt->handle); + return 0; + case TC_RED_STATS: + return nfp_abm_red_stats(alink, opt); + case TC_RED_XSTATS: + return nfp_abm_red_xstats(alink, opt); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) +{ + struct nfp_alink_stats stats; + unsigned int i; + int err; + + for (i = 0; i < alink->num_qdiscs; i++) { + if (alink->qdiscs[i].handle == TC_H_UNSPEC) + continue; + + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, + &opt->stats); + } + + return 0; +} + +static int +nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt) +{ + switch (opt->command) { + case TC_MQ_CREATE: + nfp_abm_reset_root(netdev, alink, opt->handle, + alink->total_queues); + return 0; + case TC_MQ_DESTROY: + if (opt->handle == alink->parent) + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + return 0; + case TC_MQ_STATS: + return nfp_abm_mq_stats(alink, opt); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, + enum tc_setup_type type, void *type_data) +{ + struct nfp_repr *repr = netdev_priv(netdev); + struct nfp_port *port; + + port = nfp_port_from_netdev(netdev); + if (!port || port->type != NFP_PORT_PF_PORT) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_QDISC_MQ: + return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data); + case TC_SETUP_QDISC_RED: + return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) { enum nfp_repr_type rtype; @@ -83,14 +370,18 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, struct nfp_reprs *reprs; struct nfp_repr *repr; struct nfp_port *port; + unsigned int txqs; int err; - if (ptype == NFP_PORT_PHYS_PORT) + if (ptype == NFP_PORT_PHYS_PORT) { rtype = NFP_REPR_TYPE_PHYS_PORT; - else + txqs = 1; + } else { rtype = NFP_REPR_TYPE_PF; + txqs = alink->vnic->max_rx_rings; + } - netdev = nfp_repr_alloc(app); + netdev = nfp_repr_alloc_mqs(app, txqs, 1); if (!netdev) return -ENOMEM; repr = netdev_priv(netdev); @@ -182,6 +473,7 @@ static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app) static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm) { nfp_abm_kill_reprs_all(abm); + nfp_abm_ctrl_qm_disable(abm); abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; return 0; @@ -200,6 +492,10 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) struct nfp_net *nn; int err; + err = nfp_abm_ctrl_qm_enable(abm); + if (err) + return err; + list_for_each_entry(nn, &pf->vnics, vnic_list) { struct nfp_abm_link *alink = nn->app_priv; @@ -217,6 +513,7 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) err_kill_all_reprs: nfp_abm_kill_reprs_all(abm); + nfp_abm_ctrl_qm_disable(abm); return err; } @@ -291,13 +588,21 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) alink->abm = abm; alink->vnic = nn; alink->id = id; + alink->parent = TC_H_ROOT; + alink->total_queues = alink->vnic->max_rx_rings; + alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues, + GFP_KERNEL); + if (!alink->qdiscs) { + err = -ENOMEM; + goto err_free_alink; + } /* This is a multi-host app, make sure MAC/PHY is up, but don't * make the MAC/PHY state follow the state of any of the ports. */ err = nfp_eth_set_configured(app->cpp, eth_port->index, true); if (err < 0) - goto err_free_alink; + goto err_free_qdiscs; netif_keep_dst(nn->dp.netdev); @@ -306,6 +611,8 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) return 0; +err_free_qdiscs: + kvfree(alink->qdiscs); err_free_alink: kfree(alink); return err; @@ -316,9 +623,57 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) struct nfp_abm_link *alink = nn->app_priv; nfp_abm_kill_reprs(alink->abm, alink); + kvfree(alink->qdiscs); kfree(alink); } +static u64 * +nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + unsigned int i; + + if (port->type != NFP_PORT_PF_PORT) + return data; + alink = repr->app_priv; + for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { + *data++ = nfp_abm_ctrl_stat_non_sto(alink, i); + *data++ = nfp_abm_ctrl_stat_sto(alink, i); + } + return data; +} + +static int +nfp_abm_port_get_stats_count(struct nfp_app *app, struct nfp_port *port) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + + if (port->type != NFP_PORT_PF_PORT) + return 0; + alink = repr->app_priv; + return alink->vnic->dp.num_r_vecs * 2; +} + +static u8 * +nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, + u8 *data) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + unsigned int i; + + if (port->type != NFP_PORT_PF_PORT) + return data; + alink = repr->app_priv; + for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { + data = nfp_pr_et(data, "q%u_no_wait", i); + data = nfp_pr_et(data, "q%u_delayed", i); + } + return data; +} + static int nfp_abm_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; @@ -350,6 +705,11 @@ static int nfp_abm_init(struct nfp_app *app) if (err) goto err_free_abm; + /* We start in legacy mode, make sure advanced queuing is disabled */ + err = nfp_abm_ctrl_qm_disable(abm); + if (err) + goto err_free_abm; + err = -ENOMEM; reprs = nfp_reprs_alloc(pf->max_data_vnics); if (!reprs) @@ -392,6 +752,12 @@ const struct nfp_app_type app_abm = { .vnic_alloc = nfp_abm_vnic_alloc, .vnic_free = nfp_abm_vnic_free, + .port_get_stats = nfp_abm_port_get_stats, + .port_get_stats_count = nfp_abm_port_get_stats_count, + .port_get_stats_strings = nfp_abm_port_get_stats_strings, + + .setup_tc = nfp_abm_setup_tc, + .eswitch_mode_get = nfp_abm_eswitch_mode_get, .eswitch_mode_set = nfp_abm_eswitch_mode_set, diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 5938b69b8a84..934a70835473 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -49,11 +49,55 @@ struct nfp_net; * @pf_id: ID of our PF link * @eswitch_mode: devlink eswitch mode, advanced functions only visible * in switchdev mode + * @q_lvls: queue level control area + * @qm_stats: queue statistics symbol */ struct nfp_abm { struct nfp_app *app; unsigned int pf_id; enum devlink_eswitch_mode eswitch_mode; + const struct nfp_rtsym *q_lvls; + const struct nfp_rtsym *qm_stats; +}; + +/** + * struct nfp_alink_stats - ABM NIC statistics + * @tx_pkts: number of TXed packets + * @tx_bytes: number of TXed bytes + * @backlog_pkts: momentary backlog length (packets) + * @backlog_bytes: momentary backlog length (bytes) + * @overlimits: number of ECN marked TXed packets (accumulative) + * @drops: number of tail-dropped packets (accumulative) + */ +struct nfp_alink_stats { + u64 tx_pkts; + u64 tx_bytes; + u64 backlog_pkts; + u64 backlog_bytes; + u64 overlimits; + u64 drops; +}; + +/** + * struct nfp_alink_xstats - extended ABM NIC statistics + * @ecn_marked: number of ECN marked TXed packets + * @pdrop: number of hard drops due to queue limit + */ +struct nfp_alink_xstats { + u64 ecn_marked; + u64 pdrop; +}; + +/** + * struct nfp_red_qdisc - representation of single RED Qdisc + * @handle: handle of currently offloaded RED Qdisc + * @stats: statistics from last refresh + * @xstats: base of extended statistics + */ +struct nfp_red_qdisc { + u32 handle; + struct nfp_alink_stats stats; + struct nfp_alink_xstats xstats; }; /** @@ -62,14 +106,37 @@ struct nfp_abm { * @vnic: data vNIC * @id: id of the data vNIC * @queue_base: id of base to host queue within PCIe (not QC idx) + * @total_queues: number of PF queues + * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT + * @num_qdiscs: number of currently used qdiscs + * @qdiscs: array of qdiscs */ struct nfp_abm_link { struct nfp_abm *abm; struct nfp_net *vnic; unsigned int id; unsigned int queue_base; + unsigned int total_queues; + u32 parent; + unsigned int num_qdiscs; + struct nfp_red_qdisc *qdiscs; }; void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); +int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, + u32 val); +int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, + struct nfp_alink_stats *stats); +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_stats *stats); +int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, + struct nfp_alink_xstats *xstats); +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_xstats *xstats); +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i); +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i); +int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); +int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h index 7ffa6e6a9d1c..8b56c27931bf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -59,12 +59,26 @@ * @NFP_MBOX_POOL_SET: set shared buffer pool info/config * Input - struct nfp_shared_buf_pool_info_set * Output - None + * + * @NFP_MBOX_PCIE_ABM_ENABLE: enable PCIe-side advanced buffer management + * Enable advanced buffer management of the PCIe block. If ABM is disabled + * PCIe block maintains a very short queue of buffers and does tail drop. + * ABM allows more advanced buffering and priority control. + * Input - None + * Output - None + * + * @NFP_MBOX_PCIE_ABM_DISABLE: disable PCIe-side advanced buffer management + * Input - None + * Output - None */ enum nfp_mbox_cmd { NFP_MBOX_NO_CMD = 0x00, NFP_MBOX_POOL_GET = 0x01, NFP_MBOX_POOL_SET = 0x02, + + NFP_MBOX_PCIE_ABM_ENABLE = 0x03, + NFP_MBOX_PCIE_ABM_DISABLE = 0x04, }; #define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt" diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index c9d8a7ab311e..f28b244f4ee7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -43,6 +43,7 @@ #include "nfp_main.h" #include "nfp_net.h" #include "nfp_net_repr.h" +#include "nfp_port.h" static const struct nfp_app_type *apps[] = { [NFP_APP_CORE_NIC] = &app_nic, @@ -85,6 +86,27 @@ const char *nfp_app_mip_name(struct nfp_app *app) return nfp_mip_name(app->pf->mip); } +u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data) +{ + if (!port || !port->app || !port->app->type->port_get_stats) + return data; + return port->app->type->port_get_stats(port->app, port, data); +} + +int nfp_app_port_get_stats_count(struct nfp_port *port) +{ + if (!port || !port->app || !port->app->type->port_get_stats_count) + return 0; + return port->app->type->port_get_stats_count(port->app, port); +} + +u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data) +{ + if (!port || !port->app || !port->app->type->port_get_stats_strings) + return data; + return port->app->type->port_get_stats_strings(port->app, port, data); +} + struct sk_buff * nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 23b99a4e05c2..ee74caacb015 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -90,6 +90,9 @@ extern const struct nfp_app_type app_abm; * @repr_stop: representor netdev stop callback * @check_mtu: MTU change request on a netdev (verify it is valid) * @repr_change_mtu: MTU change request on repr (make and verify change) + * @port_get_stats: get extra ethtool statistics for a port + * @port_get_stats_count: get count of extra statistics for a port + * @port_get_stats_strings: get strings for extra statistics * @start: start application logic * @stop: stop application logic * @ctrl_msg_rx: control message handler @@ -132,6 +135,12 @@ struct nfp_app_type { int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev, int new_mtu); + u64 *(*port_get_stats)(struct nfp_app *app, + struct nfp_port *port, u64 *data); + int (*port_get_stats_count)(struct nfp_app *app, struct nfp_port *port); + u8 *(*port_get_stats_strings)(struct nfp_app *app, + struct nfp_port *port, u8 *data); + int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); @@ -404,6 +413,10 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); +u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data); +int nfp_app_port_get_stats_count(struct nfp_port *port); +u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data); + struct nfp_reprs * nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type); struct nfp_reprs * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 71c2edd83031..db463e20a876 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -92,7 +92,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes) static int nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, - unsigned int count) + unsigned int count, struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; @@ -123,7 +123,8 @@ out: } static int -nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index) +nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index eea11e881bf5..75110c8d6a90 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3286,11 +3286,12 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) if (nn->port) return nfp_port_get_phys_port_name(netdev, name, len); - if (!nn->dp.is_vf) { - n = snprintf(name, len, "%d", nn->id); - if (n >= len) - return -EINVAL; - } + if (nn->dp.is_vf) + return -EOPNOTSUPP; + + n = snprintf(name, len, "n%d", nn->id); + if (n >= len) + return -EINVAL; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index c9016419bfa0..26d1cc4e2906 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -437,7 +437,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } -static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) +__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) { va_list args; @@ -637,6 +637,7 @@ static void nfp_net_get_strings(struct net_device *netdev, nn->dp.num_tx_rings, false); data = nfp_mac_get_stats_strings(netdev, data); + data = nfp_app_port_get_stats_strings(nn->port, data); break; } } @@ -651,6 +652,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->dp.num_rx_rings, nn->dp.num_tx_rings); data = nfp_mac_get_stats(netdev, data); + data = nfp_app_port_get_stats(nn->port, data); } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) @@ -662,7 +664,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) return nfp_vnic_get_sw_stats_count(netdev) + nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, nn->dp.num_tx_rings) + - nfp_mac_get_stats_count(netdev); + nfp_mac_get_stats_count(netdev) + + nfp_app_port_get_stats_count(nn->port); default: return -EOPNOTSUPP; } @@ -679,6 +682,7 @@ static void nfp_port_get_strings(struct net_device *netdev, data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); + data = nfp_app_port_get_stats_strings(port, data); break; } } @@ -693,6 +697,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); else data = nfp_mac_get_stats(netdev, data); + data = nfp_app_port_get_stats(port, data); } static int nfp_port_get_sset_count(struct net_device *netdev, int sset) @@ -706,6 +711,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) count = nfp_vnic_get_hw_stats_count(0, 0); else count = nfp_mac_get_stats_count(netdev); + count += nfp_app_port_get_stats_count(port); return count; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 117eca6819de..d7b712f6362f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -360,12 +360,13 @@ void nfp_repr_free(struct net_device *netdev) __nfp_repr_free(netdev_priv(netdev)); } -struct net_device *nfp_repr_alloc(struct nfp_app *app) +struct net_device * +nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs) { struct net_device *netdev; struct nfp_repr *repr; - netdev = alloc_etherdev(sizeof(*repr)); + netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs); if (!netdev) return NULL; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 8366e4f3c623..1bf2b18109ab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -126,7 +126,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev); void nfp_repr_free(struct net_device *netdev); -struct net_device *nfp_repr_alloc(struct nfp_app *app); +struct net_device * +nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs); void nfp_repr_clean_and_free(struct nfp_repr *repr); void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, @@ -134,4 +135,8 @@ void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs); int nfp_reprs_resync_phys_ports(struct nfp_app *app); +static inline struct net_device *nfp_repr_alloc(struct nfp_app *app) +{ + return nfp_repr_alloc_mqs(app, 1, 1); +} #endif /* NFP_NET_REPR_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 18666750456e..51f10ae2d53e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -122,6 +122,8 @@ struct nfp_port { extern const struct ethtool_ops nfp_port_ethtool_ops; extern const struct switchdev_ops nfp_port_switchdev_ops; +__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); + int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 4e19add1c539..b0da3d436850 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -87,6 +87,11 @@ struct resource; #define NFP_CPP_TARGET_ID_MASK 0x1f +#define NFP_CPP_ATOMIC_RD(target, island) \ + NFP_CPP_ISLAND_ID((target), 3, 0, (island)) +#define NFP_CPP_ATOMIC_WR(target, island) \ + NFP_CPP_ISLAND_ID((target), 4, 0, (island)) + /** * NFP_CPP_ID() - pack target, token, and action into a CPP ID. * @target: NFP CPP target id diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 6cec2a6a3dcc..7503aa222392 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -146,8 +146,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { memcpy(adapter->mdump.md_template, addr, size); } else { - dev_err(&adapter->pdev->dev, "Failed to get minidump template, " - "err_code : %d, requested_size : %d, actual_size : %d\n ", + dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n", cmd.rsp.cmd, size, cmd.rsp.arg2); } pci_free_consistent(adapter->pdev, size, addr, md_template_addr); @@ -180,8 +179,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter) if ((err == NX_RCODE_CMD_INVALID) || (err == NX_RCODE_CMD_NOT_IMPL)) { dev_info(&adapter->pdev->dev, - "Flashed firmware version does not support minidump, " - "minimum version required is [ %u.%u.%u ].\n ", + "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n", NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, NX_MD_SUPPORT_SUBVERSION); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 820b226d6ff8..b5b5ff725426 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -47,6 +47,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) p_mgr->srq_count = num_srqs; } -static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; @@ -936,14 +937,13 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) u32 size = min_t(u32, total_size, psz); void **p_virt = &p_mngr->t2[i].p_virt; - *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, - &p_mngr->t2[i].p_phys, GFP_KERNEL); + *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, + size, &p_mngr->t2[i].p_phys, + GFP_KERNEL); if (!p_mngr->t2[i].p_virt) { rc = -ENOMEM; goto t2_fail; } - memset(*p_virt, 0, size); p_mngr->t2[i].size = size; total_size -= size; } @@ -2071,7 +2071,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, u32 num_cons, num_qps, num_srqs; enum protocol_type proto; - num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { DP_NOTICE(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index a4e95869889f..758a8b4c0de8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type type); u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, enum protocol_type type); +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 39124b594a36..b9ec460dd996 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -183,16 +183,9 @@ enum platform_ids { MAX_PLATFORM_IDS }; -struct chip_platform_defs { - u8 num_ports; - u8 num_pfs; - u8 num_vfs; -}; - /* Chip constant definitions */ struct chip_defs { const char *name; - struct chip_platform_defs per_platform[MAX_PLATFORM_IDS]; }; /* Platform constant definitions */ @@ -317,6 +310,11 @@ struct phy_defs { u32 tbus_data_hi_addr; }; +/* Split type definitions */ +struct split_type_defs { + const char *name; +}; + /******************************** Constants **********************************/ #define MAX_LCIDS 320 @@ -469,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "bb", - {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "ah", - {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "reserved", - {{0, 0, 0}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } } + {"bb"}, + {"ah"}, + {"reserved"}, }; /* Storm constant definitions array */ @@ -1588,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = { {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 0}, /* DBG_GRC_PARAM_DUMP_NIG */ {{1, 1, 1}, 0, 1, false, false, 0, 1}, @@ -1745,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = { PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, }; +static struct split_type_defs s_split_type_defs[] = { + /* SPLIT_TYPE_NONE */ + {"eng"}, + + /* SPLIT_TYPE_PORT */ + {"port"}, + + /* SPLIT_TYPE_PF */ + {"pf"}, + + /* SPLIT_TYPE_PORT_PF */ + {"port"}, + + /* SPLIT_TYPE_VF */ + {"vf"} +}; + /**************************** Private Functions ******************************/ /* Reads and returns a single dword from the specified unaligned buffer */ @@ -1781,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 num_pfs = 0, max_pfs_per_port = 0; if (dev_data->initialized) return DBG_STATUS_OK; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; + dev_data->num_vfs = MAX_NUM_VFS_K2; + num_pfs = MAX_NUM_PFS_K2; + max_pfs_per_port = MAX_NUM_PFS_K2 / 2; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; + dev_data->num_vfs = MAX_NUM_VFS_BB; + num_pfs = MAX_NUM_PFS_BB; + max_pfs_per_port = MAX_NUM_PFS_BB; } else { return DBG_STATUS_UNKNOWN_CHIP; } + /* Set platofrm */ dev_data->platform_id = PLATFORM_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; + /* Set port mode */ + switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { + case 0: + dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; + break; + case 1: + dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; + break; + case 2: + dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; + break; + } + + /* Set 100G mode */ + if (dev_data->chip_id == CHIP_BB && + qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2) + dev_data->mode_enable[MODE_100G] = 1; + + /* Set number of ports */ + if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] || + dev_data->mode_enable[MODE_100G]) + dev_data->num_ports = 1; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2]) + dev_data->num_ports = 2; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4]) + dev_data->num_ports = 4; + + /* Set number of PFs per port */ + dev_data->num_pfs_per_port = min_t(u32, + num_pfs / dev_data->num_ports, + max_pfs_per_port); + /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); dev_data->use_dmae = true; - dev_data->num_regs_read = 0; dev_data->initialized = 1; return DBG_STATUS_OK; @@ -1821,9 +1864,9 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ -static void qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 storm_id, struct fw_info *fw_info) +static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 storm_id, struct fw_info *fw_info) { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; @@ -1945,45 +1988,29 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { - /* Read FW image/version from PRAM in a non-reset SEMI */ - bool found = false; - u8 storm_id; - - for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; - storm_id++) { - struct storm_defs *storm = &s_storm_defs[storm_id]; - - /* Read FW version/image */ - if (dev_data->block_in_reset[storm->block_id]) - continue; - - /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); - - /* Create FW version/image strings */ - if (snprintf(fw_ver_str, sizeof(fw_ver_str), - "%d_%d_%d_%d", fw_info.ver.num.major, - fw_info.ver.num.minor, fw_info.ver.num.rev, - fw_info.ver.num.eng) < 0) - DP_NOTICE(p_hwfn, - "Unexpected debug error: invalid FW version string\n"); - switch (fw_info.ver.image_id) { - case FW_IMG_MAIN: - strcpy(fw_img_str, "main"); - break; - default: - strcpy(fw_img_str, "unknown"); - break; - } - - found = true; + /* Read FW info from chip */ + qed_read_fw_info(p_hwfn, p_ptt, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; } } @@ -2412,20 +2439,21 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, /* Dumps GRC registers section header. Returns the dumped size in dwords. * The following parameters are dumped: - * - count: no. of dumped entries - * - split: split type - * - id: split ID (dumped only if split_id >= 0) + * - count: no. of dumped entries + * - split_type: split type + * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) * - param_name: user parameter value (dumped only if param_name != NULL * and param_val != NULL). */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, u32 num_reg_entries, - const char *split_type, - int split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { - u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0); + u8 num_params = 2 + + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0); u32 offset = 0; offset += qed_dump_section_hdr(dump_buf + offset, @@ -2433,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, offset += qed_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries); offset += qed_dump_str_param(dump_buf + offset, - dump, "split", split_type); - if (split_id >= 0) + dump, "split", + s_split_type_defs[split_type].name); + if (split_type != SPLIT_TYPE_NONE) offset += qed_dump_num_param(dump_buf + offset, dump, "id", split_id); if (param_name && param_val) @@ -2463,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, + u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; if (!dump) return len; @@ -2481,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, dev_data->num_regs_read = 0; } + switch (split_type) { + case SPLIT_TYPE_PORT: + port_id = split_id; + break; + case SPLIT_TYPE_PF: + pf_id = split_id; + break; + case SPLIT_TYPE_PORT_PF: + port_id = split_id / dev_data->num_pfs_per_port; + pf_id = port_id + dev_data->num_ports * + (split_id % dev_data->num_pfs_per_port); + break; + case SPLIT_TYPE_VF: + vf_id = split_id; + break; + default: + break; + } + /* Try reading using DMAE */ - if (dev_data->use_dmae && + if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || wide_bus)) { if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), @@ -2494,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, "Failed reading from chip using DMAE, using GRC instead\n"); } - /* Read registers */ + /* If not read using DMAE, read using GRC */ + + /* Set pretend */ + if (split_type != dev_data->pretend.split_type || split_id != + dev_data->pretend.split_id) { + switch (split_type) { + case SPLIT_TYPE_PORT: + qed_port_pretend(p_hwfn, p_ptt, port_id); + break; + case SPLIT_TYPE_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + case SPLIT_TYPE_PORT_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); + break; + case SPLIT_TYPE_VF: + fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | + (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT); + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + default: + break; + } + + dev_data->pretend.split_type = (u8)split_type; + dev_data->pretend.split_id = split_id; + } + + /* Read registers using GRC */ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); return len; @@ -2518,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, u8 split_id) { u32 offset = 0; @@ -2526,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + split_type, split_id); return offset; } @@ -2559,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, curr_len, false); + dump, addr, curr_len, false, + SPLIT_TYPE_NONE, 0); reg_offset += curr_len; addr += curr_len; @@ -2581,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, struct dbg_array input_regs_arr, u32 *dump_buf, bool dump, + enum init_split_types split_type, + u8 split_id, bool block_enable[MAX_BLOCK_ID], u32 *num_dumped_reg_entries) { @@ -2628,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, dump, addr, len, - wide_bus); + wide_bus, + split_type, split_id); (*num_dumped_reg_entries)++; } } @@ -2643,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], - const char *split_type_name, - u32 split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum init_split_types hdr_split_type = split_type; u32 num_dumped_reg_entries, offset; + u8 hdr_split_id = split_id; + + /* In PORT_PF split type, print a port split header */ + if (split_type == SPLIT_TYPE_PORT_PF) { + hdr_split_type = SPLIT_TYPE_PORT; + hdr_split_id = split_id / dev_data->num_pfs_per_port; + } /* Calculate register dump header size (and skip it for now) */ offset = qed_grc_dump_regs_hdr(dump_buf, false, 0, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); /* Dump registers */ offset += qed_grc_dump_regs_entries(p_hwfn, @@ -2663,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, input_regs_arr, dump_buf + offset, dump, + split_type, + split_id, block_enable, &num_dumped_reg_entries); @@ -2671,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, qed_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); return num_dumped_reg_entries > 0 ? offset : 0; } @@ -2688,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct chip_platform_defs *chip_platform; u32 offset = 0, input_offset = 0; - struct chip_defs *chip; - u8 port_id, pf_id, vf_id; u16 fid; - - chip = &s_chip_defs[dev_data->chip_id]; - chip_platform = &chip->per_platform[dev_data->platform_id]; - while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_regs_arr; + enum init_split_types split_type; + u16 split_count = 0; u32 split_data_size; - u8 split_type_id; + u8 split_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -2717,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; curr_input_regs_arr.size_in_dwords = split_data_size; - switch (split_type_id) { + switch (split_type) { case SPLIT_TYPE_NONE: - offset += qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "eng", - (u32)(-1), - param_name, - param_val); + split_count = 1; break; - case SPLIT_TYPE_PORT: - for (port_id = 0; port_id < chip_platform->num_ports; - port_id++) { - if (dump) - qed_port_pretend(p_hwfn, p_ptt, - port_id); - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "port", port_id, - param_name, - param_val); - } + split_count = dev_data->num_ports; break; - case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; pf_id < chip_platform->num_pfs; - pf_id++) { - u8 pfid_shift = - PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; - - if (dump) { - fid = pf_id << pfid_shift; - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "pf", - pf_id, - param_name, - param_val); - } + split_count = dev_data->num_ports * + dev_data->num_pfs_per_port; break; - case SPLIT_TYPE_VF: - for (vf_id = 0; vf_id < chip_platform->num_vfs; - vf_id++) { - u8 vfvalid_shift = - PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; - u8 vfid_shift = - PXP_PRETEND_CONCRETE_FID_VFID_SHIFT; - - if (dump) { - fid = BIT(vfvalid_shift) | - (vf_id << vfid_shift); - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "vf", vf_id, - param_name, - param_val); - } + split_count = dev_data->num_vfs; break; - default: - break; + return 0; } + for (split_id = 0; split_id < split_count; split_id++) + offset += qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + split_type, + split_id, + param_name, + param_val); + input_offset += split_data_size; } - /* Pretend to original PF */ + /* Cancel pretends (pretend to original PF) */ if (dump) { fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; qed_fid_pretend(p_hwfn, p_ptt, fid); + dev_data->pretend.split_type = SPLIT_TYPE_NONE; + dev_data->pretend.split_id = 0; } return offset; @@ -2825,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, + SPLIT_TYPE_NONE, 0, NULL, NULL); /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { @@ -2838,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, dump, BYTES_TO_DWORDS (s_reset_regs_defs[i].addr), 1, - false); + false, SPLIT_TYPE_NONE, 0); num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, - true, num_regs, "eng", -1, NULL, NULL); + true, num_regs, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2864,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, SPLIT_TYPE_NONE, + 0, NULL, NULL); /* Write parity registers */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { @@ -2899,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); addr = GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS); offset += qed_grc_dump_reg_entry(p_hwfn, @@ -2907,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); num_reg_entries += 2; } } @@ -2929,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); num_reg_entries++; } @@ -2937,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, if (dump) qed_grc_dump_regs_hdr(dump_buf, true, - num_reg_entries, "eng", -1, NULL, NULL); + num_reg_entries, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2950,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, u32 offset = 0, addr; offset += qed_grc_dump_regs_hdr(dump_buf, - dump, 2, "eng", -1, NULL, NULL); + dump, 2, SPLIT_TYPE_NONE, 0, + NULL, NULL); /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be * skipped). @@ -3096,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + SPLIT_TYPE_NONE, 0); return offset; } @@ -3235,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_mems_arr; + enum init_split_types split_type; u32 split_data_size; - u8 split_type_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -3250,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; curr_input_mems_arr.size_in_dwords = split_data_size; - switch (split_type_id) { - case SPLIT_TYPE_NONE: + if (split_type == SPLIT_TYPE_NONE) offset += qed_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump); - break; - - default: + else DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); - break; - } input_offset += split_data_size; } @@ -3623,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, dump, addr, num_dwords_to_read, - false); + false, + SPLIT_TYPE_NONE, 0); total_dwords -= num_dwords_to_read; rss_addr++; } @@ -3682,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, dump, addr, len, - false); + false, SPLIT_TYPE_NONE, 0); } return offset; @@ -3731,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, - dump, 1, "eng", -1, "block", "MCP"); + dump, 1, SPLIT_TYPE_NONE, 0, + "block", "MCP"); addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, @@ -3739,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -3923,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, dump, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); } /* Disable block's client and debug output */ @@ -3949,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; bool parities_masked = false; - u8 i, port_mode = 0; u32 offset = 0; + u8 i; *num_dumped_dwords = 0; + dev_data->num_regs_read = 0; - if (dump) { - /* Find port mode */ - switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { - case 0: - port_mode = 1; - break; - case 1: - port_mode = 2; - break; - case 2: - port_mode = 4; - break; - } - - /* Update reset state */ + /* Update reset state */ + if (dump) qed_update_blocks_reset_state(p_hwfn, p_ptt); - } /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -3989,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS)); offset += qed_dump_num_param(dump_buf + offset, - dump, "num-ports", port_mode); + dump, "num-ports", dev_data->num_ports); /* Dump reset registers (dumped before taking blocks out of reset ) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) @@ -4093,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump); - /* Dump static debug data */ + /* Dump static debug data (only if not during debug bus recording) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && - dev_data->bus.state == DBG_BUS_STATE_IDLE) + (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE)) offset += qed_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump); @@ -4250,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - reg->size, wide_bus); + reg->size, wide_bus, + SPLIT_TYPE_NONE, 0); } } @@ -4373,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, next_reg_offset, dump, addr, reg->entry_size, - wide_bus); + wide_bus, + SPLIT_TYPE_NONE, 0); } /* Call rule condition function. @@ -4723,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), - trace_data_size_dwords, false); + trace_data_size_dwords, false, + SPLIT_TYPE_NONE, 0); /* Resume MCP (only if halt succeeded) */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -4829,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } @@ -4898,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } @@ -4956,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, true, addr, override_window_dwords, - true); + true, SPLIT_TYPE_NONE, 0); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); out: @@ -4998,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, continue; /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); asserts = &fw_info.fw_asserts_section; @@ -5036,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, asserts->list_element_dword_size, - false); + false, SPLIT_TYPE_NONE, 0); } /* Dump last section */ @@ -5063,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + + /* Skip Storm if it's in reset */ + if (dev_data->block_in_reset[storm->block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info); + + return true; + } + + return false; +} + /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 560528962658..b285edc8d6a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1098,7 +1098,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Sending final cleanup for PFVF[%d] [Command %08x\n]", + "Sending final cleanup for PFVF[%d] [Command %08x]\n", id, command); qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); @@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, { u32 port_mode; - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); + port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); if (port_mode < 3) { p_hwfn->cdev->num_ports_in_engine = 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8e1e6e1eb40e..bee10c1781fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1095,14 +1095,16 @@ enum personality_type { struct pf_start_tunnel_config { u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; - u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved[3]; }; /* Ramrod data for PF start ramrod */ @@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config { u8 update_rx_def_non_ucast_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; + u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved1[3]; }; /* Data for port update ramrod */ @@ -2535,7 +2540,14 @@ struct idle_chk_data { u16 reserved2; }; -/* Debug Tools data (per HW function) */ +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ struct dbg_tools_data { struct dbg_grc_data grc; struct dbg_bus_data bus; @@ -2544,8 +2556,13 @@ struct dbg_tools_data { u8 block_in_reset[88]; u8 chip_id; u8 platform_id; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; u8 initialized; u8 use_dmae; + u8 reserved; + struct pretend_params pretend; u32 num_regs_read; }; @@ -2975,6 +2992,24 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); /** + * @brief qed_read_fw_info - Reads FW info from the chip. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param fw_info - Out: a pointer to write the FW info into. + * + * @return true if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); + +/** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. * @@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); +#define NUM_STORMS 6 + +/** + * @brief qed_set_rdma_error_level - Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param assert_level - An array of assert levels for each storm. + * + */ +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]); + /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) @@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[47].base + ((pf_id) * IRO[47].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[48].base + ((roce_pf_id) * IRO[48].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size) + (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[49].base + ((roce_pf_id) * IRO[49].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[50].base + ((roce_pf_id) * IRO[50].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) -static const struct iro iro_arr[51] = { +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + +static const struct iro iro_arr[59] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = { {0x10768, 0x20, 0x0, 0x0, 0x20}, {0x2d48, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, + {0xc748, 0x8, 0x0, 0x0, 0x1}, + {0xa128, 0x8, 0x0, 0x0, 0x1}, + {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xf030, 0x8, 0x0, 0x0, 0x1}, + {0x13028, 0x8, 0x0, 0x0, 0x1}, + {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, - {0xed90, 0x10, 0x0, 0x0, 0x10}, - {0xa3a0, 0x10, 0x0, 0x0, 0x10}, + {0xed90, 0x28, 0x0, 0x0, 0x28}, + {0xa520, 0x18, 0x0, 0x0, 0x18}, + {0xa6a0, 0x8, 0x0, 0x0, 0x8}, {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; /* Runtime array offsets */ @@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = { #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 - -#define RUNTIME_ARRAY_SIZE 43023 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021 + +#define RUNTIME_ARRAY_SIZE 43022 + /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF -#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7 }; /* Command for setting tpa parameters */ @@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx { #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx { #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data { struct regpair va; struct regpair pbl_base; struct regpair dif_error_addr; - struct regpair dif_runt_addr; - __le32 reserved4[2]; + __le32 reserved4[4]; }; /* rdma resize cq output params */ @@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data { enum rdma_tid_type { RDMA_TID_REGISTERED_MR, RDMA_TID_FMR, - RDMA_TID_MW_TYPE1, - RDMA_TID_MW_TYPE2A, + RDMA_TID_MW, MAX_RDMA_TID_TYPE }; @@ -7681,6 +7779,16 @@ struct e4_roce_conn_context { struct ustorm_roce_conn_st_ctx ustorm_st_context; }; +/* roce cqes statistics */ +struct roce_cqe_stats { + __le32 req_cqe_error; + __le32 req_remote_access_errors; + __le32 req_remote_invalid_request; + __le32 resp_cqe_error; + __le32 resp_local_length_error; + __le32 reserved; +}; + /* roce create qp requester ramrod data */ struct roce_create_qp_req_ramrod_data { __le16 flags; @@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats { /* RoCE destroy qp requester output params */ struct roce_destroy_qp_req_output_params { - __le32 num_bound_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp requester ramrod data */ @@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data { /* RoCE destroy qp responder output params */ struct roce_destroy_qp_resp_output_params { - __le32 num_invalidated_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp responder ramrod data */ @@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* roce error statistics */ +struct roce_error_stats { + __le32 resp_remote_access_errors; + __le32 reserved; +}; + /* roce special events statistics */ struct roce_events_stats { - __le16 silent_drops; - __le16 rnr_naks_sent; + __le32 silent_drops; + __le32 rnr_naks_sent; __le32 retransmit_count; __le32 icrc_error_count; - __le32 reserved; + __le32 implied_nak_seq_err; + __le32 duplicate_request; + __le32 local_ack_timeout_err; + __le32 out_of_sequence; + __le32 packet_seq_err; + __le32 rnr_nak_retry_err; }; -/* ROCE slow path EQ cmd IDs */ +/* roce slow path EQ cmd IDs */ enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, @@ -7845,6 +7964,9 @@ struct roce_init_func_params { u8 cnp_dscp; u8 reserved; __le32 cnp_send_timeout; + __le16 rl_offset; + u8 rl_count_log; + u8 reserved1[5]; }; /* roce func init ramrod data */ @@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le16 rq_prod; __le16 conn_dpi; __le16 irq_cons; - __le32 num_invlidated_mw; + __le32 reg9; __le32 reg10; }; @@ -9725,6 +9847,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, + IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, MAX_IWARP_EQE_ASYNC_OPCODE }; @@ -11996,6 +12120,7 @@ struct public_port { #define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_OFFSET 16 + u32 reserved1; u32 oem_cfg_port; #define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 #define OEM_CFG_CHANNEL_TYPE_OFFSET 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index fca2dbd93ad9..70504dcf4087 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) *(u32 *)&p_ptt->pxp.pretend); } +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) { u32 concrete_fid = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 8db2839a8ec8..505e94db939d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -245,6 +245,18 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** + * @brief qed_port_fid_pretend - pretend to another port and another function + * when accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + */ +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid); + +/** * @brief qed_vfid_to_concrete - build a concrete FID for a * given VF ID * diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 1365da7c8900..d845badf9b90 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, ram_line_lo, ram_line_hi; + u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Search no IP as GFT */ + search_non_ip_as_gft = 0; + /* Tunnel type */ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); @@ -1337,9 +1340,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + + /* Allow tunneled traffic without inner IP */ + search_non_ip_as_gft = 1; } qed_wr(p_hwfn, + p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, ram_line_lo); @@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + + default: + return 0; + } +} + +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); + + qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 2a2b1018ed1d..90a2b53096e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, p_ramrod->sq_num_pages = qp->sq_num_pages; p_ramrod->rq_num_pages = qp->rq_num_pages; + p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); @@ -1157,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt, struct qed_iwarp_info *iwarp_info; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; - u8 ts_hdr_size = 0; u32 cid; int rc; @@ -1216,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt, iparams->cm_info.private_data, iparams->cm_info.private_data_len); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - ep->mss = iparams->mss - ts_hdr_size; + ep->mss = iparams->mss; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->event_cb = iparams->event_cb; @@ -2335,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) u8 local_mac_addr[ETH_ALEN]; struct qed_iwarp_ep *ep; int tcp_start_offset; - u8 ts_hdr_size = 0; u8 ll2_syn_handle; int payload_len; u32 hdr_size; @@ -2413,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + - ts_hdr_size; + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); @@ -3004,8 +2997,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; struct regpair *fw_handle = &data->rdma_data.async_handle; struct qed_iwarp_ep *ep = NULL; + u16 srq_offset; + u16 srq_id; u16 cid; ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, @@ -3067,6 +3063,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, qed_iwarp_cid_cleaned(p_hwfn, cid); break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_EMPTY, + &srq_id); + break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_LIMIT, + &srq_id); + break; case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 1c0d0c217936..1f6ac848109d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); + SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, + !!(accept_filter & QED_ACCEPT_ANY_VNI)); + p_ramrod->rx_mode.state = cpu_to_le16(state); DP_VERBOSE(p_hwfn, QED_MSG_SP, "p_ramrod->rx_mode.state = 0x%x\n", state); @@ -1854,6 +1857,11 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_change_count)); } static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, @@ -1961,11 +1969,14 @@ void qed_reset_vport_stats(struct qed_dev *cdev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ - if (!cdev->reset_stats) + if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); - else + } else { _qed_get_vport_stats(cdev, cdev->reset_stats); + cdev->reset_stats->common.link_change_count = 0; + } } static enum gft_profile_type diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index c4030e949cce..806a8da257e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -183,6 +183,7 @@ struct qed_filter_accept_flags { #define QED_ACCEPT_MCAST_MATCHED 0x08 #define QED_ACCEPT_MCAST_UNMATCHED 0x10 #define QED_ACCEPT_BCAST 0x20 +#define QED_ACCEPT_ANY_VNI 0x40 }; struct qed_arfs_config_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 68c4399ffd50..b04d57ca5176 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -64,6 +64,7 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) +#define QED_RDMA_SRQS QED_ROCE_QPS static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, if (IS_ENABLED(CONFIG_QED_RDMA)) { params->rdma_pf_params.num_qps = QED_ROCE_QPS; params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; /* divide by 3 the MRs to avoid MF ILT overflow */ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2612e3e458d9..6f9927d1a501 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1514,9 +1514,10 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); - val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> + OEM_CFG_FUNC_TC_OFFSET; p_hwfn->ufp_info.tc = (u8)val; - val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index a411f9c702a1..101d677114f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, goto free_cid_map; } + /* Allocate bitmap for srqs */ + p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, + p_rdma_info->num_srqs, "SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate srq bitmap, rc = %d\n", rc); + goto free_real_cid_map; + } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) rc = qed_iwarp_alloc(p_hwfn); if (rc) - goto free_cid_map; + goto free_srq_map; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; +free_srq_map: + kfree(p_rdma_info->srq_map.bitmap); +free_real_cid_map: + kfree(p_rdma_info->real_cid_map.bitmap); free_cid_map: kfree(p_rdma_info->cid_map.bitmap); free_tid_map: @@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); @@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, if (cdev->rdma_max_sge) dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; + if (p_hwfn->cdev->rdma_max_srq_sge) { + dev->max_srq_sge = min_t(u32, + p_hwfn->cdev->rdma_max_srq_sge, + dev->max_srq_sge); + } dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; dev->max_inline = (cdev->rdma_max_inline) ? @@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; dev->max_pkey = QED_RDMA_MAX_P_KEY; + dev->max_srq = p_hwfn->p_rdma_info->num_srqs; + dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / @@ -1484,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt, case QED_RDMA_TID_FMR: tid_type = RDMA_TID_FMR; break; - case QED_RDMA_TID_MW_TYPE1: - tid_type = RDMA_TID_MW_TYPE1; - break; - case QED_RDMA_TID_MW_TYPE2A: - tid_type = RDMA_TID_MW_TYPE2A; + case QED_RDMA_TID_MW: + tid_type = RDMA_TID_MW; break; default: rc = -EINVAL; @@ -1520,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt, RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); DMA_REGPAIR_LE(p_ramrod->dif_error_addr, params->dif_error_addr); - DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); @@ -1628,6 +1648,155 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) return QED_LEADING_HWFN(cdev); } +static int qed_rdma_modify_srq(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *in_params) +{ + struct rdma_srq_modify_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + u16 opaque_fid; + int rc; + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_MODIFY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_modify_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + opaque_fid = p_hwfn->hw_info.opaque_fid; + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_destroy_srq(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *in_params) +{ + struct rdma_srq_destroy_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + struct qed_bmap *bmap; + u16 opaque_fid; + int rc; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_destroy_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_create_srq(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *in_params, + struct qed_rdma_create_srq_out_params *out_params) +{ + struct rdma_srq_create_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + enum qed_cxt_elem_type elem_type; + struct qed_spq_entry *p_ent; + u16 opaque_fid, srq_id; + struct qed_bmap *bmap; + u32 returned_id; + int rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); + return rc; + } + + elem_type = QED_ELEM_SRQ; + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); + if (rc) + goto err; + /* returned id is no greater than u16 */ + srq_id = (u16)returned_id; + opaque_fid = p_hwfn->hw_info.opaque_fid; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_srq; + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); + p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); + p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); + p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->page_size = cpu_to_le16(in_params->page_size); + DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->srq_id = srq_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "SRQ created Id = %x\n", out_params->srq_id); + + return rc; + +err: + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + return rc; +} + bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; @@ -1773,6 +1942,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .rdma_free_tid = &qed_rdma_free_tid, .rdma_register_tid = &qed_rdma_register_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid, + .rdma_create_srq = &qed_rdma_create_srq, + .rdma_modify_srq = &qed_rdma_modify_srq, + .rdma_destroy_srq = &qed_rdma_destroy_srq, .ll2_acquire_connection = &qed_ll2_acquire_connection, .ll2_establish_connection = &qed_ll2_establish_connection, .ll2_terminate_connection = &qed_ll2_terminate_connection, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 18ec9cbd84f5..6f722ee8ee94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -96,6 +96,8 @@ struct qed_rdma_info { u8 num_cnqs; u32 num_qps; u32 num_mrs; + u32 num_srqs; + u16 srq_id_offset; u16 queue_zone_base; u16 max_queue_zones; enum protocol_type proto; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f7122059b6b5..d8ad2dcad8d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -178,7 +178,7 @@ 0x008c80UL #define MCP_REG_SCRATCH \ 0xe20000UL -#define CNIG_REG_NW_PORT_MODE_BB_B0 \ +#define CNIG_REG_NW_PORT_MODE_BB \ 0x218200UL #define MISCS_REG_CHIP_NUM \ 0x00976cUL @@ -1621,6 +1621,7 @@ #define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 #define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_GFT_CAM 0x1f1100UL #define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 6acfd43c1a4f..b5ce1581645f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -65,6 +65,8 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { u16 icid = (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); @@ -75,11 +77,18 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, */ qed_roce_free_real_icid(p_hwfn, icid); } else { - struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)data->rdma_data.async_handle.lo; + + events.affiliated_event(events.context, fw_event_code, + &srq_id); + } else { + union rdma_eqe_data rdata = data->rdma_data; - events->affiliated_event(p_hwfn->p_rdma_info->events.context, - fw_event_code, - (void *)&data->rdma_data.async_handle); + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata.async_handle); + } } return 0; @@ -672,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, - u32 *num_invalidated_mw, u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; @@ -683,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - - *num_invalidated_mw = 0; *cq_prod = qp->cq_prod; if (!qp->resp_offloaded) { @@ -733,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); qp->cq_prod = *cq_prod; @@ -755,8 +760,7 @@ err: } static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - u32 *num_bound_mw) + struct qed_rdma_qp *qp) { struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_ramrod_data *p_ramrod; @@ -798,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -959,8 +962,6 @@ err_resp: int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { - u32 num_invalidated_mw = 0; - u32 num_bound_mw = 0; u32 cq_prod; int rc; @@ -975,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) return rc; /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); if (rc) return rc; - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } } return 0; @@ -1001,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) { - u32 num_invalidated_mw = 0, num_bound_mw = 0; int rc = 0; /* Perform additional operations according to the current state and the @@ -1081,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, /* Send destroy responder ramrod */ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) @@ -1089,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, qp->cq_prod = cq_prod; - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); } diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 81c5c8dfa2ef..d7ed0d3dbf71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -88,6 +88,7 @@ struct qede_stats_common { u64 coalesced_aborts_num; u64 non_coalesced_pkts; u64 coalesced_bytes; + u64 link_change_count; /* port */ u64 rx_64_byte_packets; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6906e04b609e..f4a0f8ff8261 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -171,6 +171,8 @@ static const struct { QEDE_STAT(coalesced_aborts_num), QEDE_STAT(non_coalesced_pkts), QEDE_STAT(coalesced_bytes), + + QEDE_STAT(link_change_count), }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d118771e1a7b..6a796040a32c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -399,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->brb_truncates = stats.common.brb_truncates; p_common->brb_discards = stats.common.brb_discards; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; + p_common->link_change_count = stats.common.link_change_count; if (QEDE_IS_BB(edev)) { struct qede_stats_bb *p_bb = &edev->stats.bb; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8293c2028002..70de062b72a1 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2211,7 +2211,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; @@ -2258,7 +2258,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = rx_ring->curr_entry; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 562420b834df..e78e5db39458 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -273,6 +273,14 @@ static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup) return 0; } +static struct sgmii_ops fsm9900_ops = { + .init = emac_sgmii_init_fsm9900, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; + static struct sgmii_ops qdf2432_ops = { .init = emac_sgmii_init_qdf2432, .open = emac_sgmii_common_open, @@ -281,6 +289,7 @@ static struct sgmii_ops qdf2432_ops = { .reset = emac_sgmii_common_reset, }; +#ifdef CONFIG_ACPI static struct sgmii_ops qdf2400_ops = { .init = emac_sgmii_init_qdf2400, .open = emac_sgmii_common_open, @@ -288,6 +297,7 @@ static struct sgmii_ops qdf2400_ops = { .link_change = emac_sgmii_common_link_change, .reset = emac_sgmii_common_reset, }; +#endif static int emac_sgmii_acpi_match(struct device *dev, void *data) { @@ -335,11 +345,11 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) static const struct of_device_id emac_sgmii_dt_match[] = { { .compatible = "qcom,fsm9900-emac-sgmii", - .data = emac_sgmii_init_fsm9900, + .data = &fsm9900_ops, }, { .compatible = "qcom,qdf2432-emac-sgmii", - .data = emac_sgmii_init_qdf2432, + .data = &qdf2432_ops, }, {} }; @@ -386,7 +396,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->sgmii_ops->init = match->data; + phy->sgmii_ops = (struct sgmii_ops *)match->data; } /* Base address is the first address */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 56a93df962e6..3ee8ae9b6838 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -67,7 +67,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; - int xmit_status; + struct net_device *dev = skb->dev; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, @@ -78,9 +78,9 @@ static void rmnet_map_send_ack(struct sk_buff *skb, cmd = RMNET_MAP_GET_CMD_START(skb); cmd->cmd_type = type & 0x03; - netif_tx_lock(skb->dev); - xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); - netif_tx_unlock(skb->dev); + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); } /* Process MAP command frame and send N/ACK message as appropriate. Message cmd diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index cb02e1a015c1..b9a7548ec6a0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -221,6 +221,10 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->needs_free_netdev = true; rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; + + /* This perm addr will be used as interface identifier by IPv6 */ + rmnet_dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(rmnet_dev->perm_addr); } /* Exposed API */ diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d9cadfb1bc4a..e9007b613f17 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -460,6 +460,17 @@ static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) return ioread32(mdp->tsu_addr + offset); } +static void sh_eth_soft_swap(char *src, int len) +{ +#ifdef __LITTLE_ENDIAN + u32 *p = (u32 *)src; + u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32)); + + for (; p < maxp; p++) + *p = swab32(*p); +#endif +} + static void sh_eth_select_mii(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 5dee19b61aee..726c55a82dd7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -560,18 +560,6 @@ struct sh_eth_private { unsigned wol_enabled:1; }; -static inline void sh_eth_soft_swap(char *src, int len) -{ -#ifdef __LITTLE_ENDIAN__ - u32 *p = (u32 *)src; - u32 *maxp; - maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); - - for (; p < maxp; p++) - *p = swab32(*p); -#endif -} - static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) { diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index e73e4febeedb..aeafdb9ac015 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1632,6 +1632,9 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (!wops->port_obj_vlan_add) return -EOPNOTSUPP; @@ -1647,6 +1650,9 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (!wops->port_obj_vlan_del) return -EOPNOTSUPP; return wops->port_obj_vlan_del(rocker_port, vlan); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index aa50331b7607..ce8071fc90c4 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1681,8 +1681,8 @@ static int netsec_probe(struct platform_device *pdev) if (ret) goto unreg_napi; - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) - dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) + dev_warn(&pdev->dev, "Failed to set DMA mask\n"); ret = register_netdev(ndev); if (ret) { diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index a679cb729d1d..78fd0f8b8e81 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -346,6 +346,8 @@ struct dma_features { /* TX and RX number of queues */ unsigned int number_rx_queues; unsigned int number_tx_queues; + /* PPS output */ + unsigned int pps_out_num; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; /* TX and RX FIFO sizes */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 9e6db16af663..7e2e79dedebf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -16,49 +16,180 @@ #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/stmmac.h> #include "stmmac_platform.h" -#define MII_PHY_SEL_MASK BIT(23) +#define SYSCFG_MCU_ETH_MASK BIT(23) +#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16) + +#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16) +#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17) +#define SYSCFG_PMCR_ETH_SEL_MII BIT(20) +#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21) +#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23) +#define SYSCFG_PMCR_ETH_SEL_GMII 0 +#define SYSCFG_MCU_ETH_SEL_MII 0 +#define SYSCFG_MCU_ETH_SEL_RMII 1 struct stm32_dwmac { struct clk *clk_tx; struct clk *clk_rx; + struct clk *clk_eth_ck; + struct clk *clk_ethstp; + struct clk *syscfg_clk; + bool int_phyclk; /* Clock from RCC to drive PHY */ u32 mode_reg; /* MAC glue-logic mode register */ struct regmap *regmap; u32 speed; + const struct stm32_ops *ops; + struct device *dev; +}; + +struct stm32_ops { + int (*set_mode)(struct plat_stmmacenet_data *plat_dat); + int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare); + int (*suspend)(struct stm32_dwmac *dwmac); + void (*resume)(struct stm32_dwmac *dwmac); + int (*parse_data)(struct stm32_dwmac *dwmac, + struct device *dev); + u32 syscfg_eth_mask; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - u32 reg = dwmac->mode_reg; - u32 val; int ret; - val = (plat_dat->interface == PHY_INTERFACE_MODE_MII) ? 0 : 1; - ret = regmap_update_bits(dwmac->regmap, reg, MII_PHY_SEL_MASK, val); - if (ret) - return ret; + if (dwmac->ops->set_mode) { + ret = dwmac->ops->set_mode(plat_dat); + if (ret) + return ret; + } ret = clk_prepare_enable(dwmac->clk_tx); if (ret) return ret; - ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) - clk_disable_unprepare(dwmac->clk_tx); + if (!dwmac->dev->power.is_suspended) { + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) { + clk_disable_unprepare(dwmac->clk_tx); + return ret; + } + } + + if (dwmac->ops->clk_prepare) { + ret = dwmac->ops->clk_prepare(dwmac, true); + if (ret) { + clk_disable_unprepare(dwmac->clk_rx); + clk_disable_unprepare(dwmac->clk_tx); + } + } return ret; } +static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) +{ + int ret = 0; + + if (prepare) { + ret = clk_prepare_enable(dwmac->syscfg_clk); + if (ret) + return ret; + + if (dwmac->int_phyclk) { + ret = clk_prepare_enable(dwmac->clk_eth_ck); + if (ret) { + clk_disable_unprepare(dwmac->syscfg_clk); + return ret; + } + } + } else { + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->int_phyclk) + clk_disable_unprepare(dwmac->clk_eth_ck); + } + return ret; +} + +static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = SYSCFG_PMCR_ETH_SEL_MII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + break; + case PHY_INTERFACE_MODE_GMII: + val = SYSCFG_PMCR_ETH_SEL_GMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n"); + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_PMCR_ETH_SEL_RMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); + break; + case PHY_INTERFACE_MODE_RGMII: + val = SYSCFG_PMCR_ETH_SEL_RGMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n"); + break; + default: + pr_debug("SYSCFG init : Do not manage %d interface\n", + plat_dat->interface); + /* Do not manage others interfaces */ + return -EINVAL; + } + + return regmap_update_bits(dwmac->regmap, reg, + dwmac->ops->syscfg_eth_mask, val); +} + +static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = SYSCFG_MCU_ETH_SEL_MII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_MCU_ETH_SEL_RMII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); + break; + default: + pr_debug("SYSCFG init : Do not manage %d interface\n", + plat_dat->interface); + /* Do not manage others interfaces */ + return -EINVAL; + } + + return regmap_update_bits(dwmac->regmap, reg, + dwmac->ops->syscfg_eth_mask, val); +} + static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) { clk_disable_unprepare(dwmac->clk_tx); clk_disable_unprepare(dwmac->clk_rx); + + if (dwmac->ops->clk_prepare) + dwmac->ops->clk_prepare(dwmac, false); } static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, @@ -70,15 +201,22 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, /* Get TX/RX clocks */ dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx"); if (IS_ERR(dwmac->clk_tx)) { - dev_err(dev, "No tx clock provided...\n"); + dev_err(dev, "No ETH Tx clock provided...\n"); return PTR_ERR(dwmac->clk_tx); } + dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx"); if (IS_ERR(dwmac->clk_rx)) { - dev_err(dev, "No rx clock provided...\n"); + dev_err(dev, "No ETH Rx clock provided...\n"); return PTR_ERR(dwmac->clk_rx); } + if (dwmac->ops->parse_data) { + err = dwmac->ops->parse_data(dwmac, dev); + if (err) + return err; + } + /* Get mode register */ dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); if (IS_ERR(dwmac->regmap)) @@ -91,11 +229,46 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, return err; } +static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, + struct device *dev) +{ + struct device_node *np = dev->of_node; + + dwmac->int_phyclk = of_property_read_bool(np, "st,int-phyclk"); + + /* Check if internal clk from RCC selected */ + if (dwmac->int_phyclk) { + /* Get ETH_CLK clocks */ + dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck"); + if (IS_ERR(dwmac->clk_eth_ck)) { + dev_err(dev, "No ETH CK clock provided...\n"); + return PTR_ERR(dwmac->clk_eth_ck); + } + } + + /* Clock used for low power mode */ + dwmac->clk_ethstp = devm_clk_get(dev, "ethstp"); + if (IS_ERR(dwmac->clk_ethstp)) { + dev_err(dev, "No ETH peripheral clock provided for CStop mode ...\n"); + return PTR_ERR(dwmac->clk_ethstp); + } + + /* Clock for sysconfig */ + dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk"); + if (IS_ERR(dwmac->syscfg_clk)) { + dev_err(dev, "No syscfg clock provided...\n"); + return PTR_ERR(dwmac->syscfg_clk); + } + + return 0; +} + static int stm32_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct stm32_dwmac *dwmac; + const struct stm32_ops *data; int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -112,6 +285,16 @@ static int stm32_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "no of match data provided\n"); + ret = -EINVAL; + goto err_remove_config_dt; + } + + dwmac->ops = data; + dwmac->dev = &pdev->dev; + ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); @@ -149,15 +332,48 @@ static int stm32_dwmac_remove(struct platform_device *pdev) return ret; } +static int stm32mp1_suspend(struct stm32_dwmac *dwmac) +{ + int ret = 0; + + ret = clk_prepare_enable(dwmac->clk_ethstp); + if (ret) + return ret; + + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->int_phyclk) + clk_disable_unprepare(dwmac->clk_eth_ck); + + return ret; +} + +static void stm32mp1_resume(struct stm32_dwmac *dwmac) +{ + clk_disable_unprepare(dwmac->clk_ethstp); +} + +static int stm32mcu_suspend(struct stm32_dwmac *dwmac) +{ + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_rx); + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int stm32_dwmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; + int ret; ret = stmmac_suspend(dev); - stm32_dwmac_clk_disable(priv->plat->bsp_priv); + + if (dwmac->ops->suspend) + ret = dwmac->ops->suspend(dwmac); return ret; } @@ -166,8 +382,12 @@ static int stm32_dwmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; int ret; + if (dwmac->ops->resume) + dwmac->ops->resume(dwmac); + ret = stm32_dwmac_init(priv->plat); if (ret) return ret; @@ -181,8 +401,24 @@ static int stm32_dwmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume); +static struct stm32_ops stm32mcu_dwmac_data = { + .set_mode = stm32mcu_set_mode, + .suspend = stm32mcu_suspend, + .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK +}; + +static struct stm32_ops stm32mp1_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .clk_prepare = stm32mp1_clk_prepare, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK +}; + static const struct of_device_id stm32_dwmac_match[] = { - { .compatible = "st,stm32-dwmac"}, + { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data}, + { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data}, { } }; MODULE_DEVICE_TABLE(of, stm32_dwmac_match); @@ -199,5 +435,6 @@ static struct platform_driver stm32_dwmac_driver = { module_platform_driver(stm32_dwmac_driver); MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@gmail.com>"); -MODULE_DESCRIPTION("STMicroelectronics MCU DWMAC Specific Glue layer"); +MODULE_AUTHOR("Christophe Roullier <christophe.roullier@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 DWMAC Specific Glue layer"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 6330a55953df..eb013d54025a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -187,6 +187,7 @@ enum power_event { #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) #define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a7121a7d9391..7e5d5db0d516 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -796,6 +796,7 @@ const struct stmmac_ops dwmac510_ops = { .safety_feat_irq_status = dwmac5_safety_feat_irq_status, .safety_feat_dump = dwmac5_safety_feat_dump, .rxp_config = dwmac5_rxp_config, + .flex_pps_config = dwmac5_flex_pps_config, }; int dwmac4_setup(struct stmmac_priv *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index bf8e5a16f11c..d37f17ca62fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -373,6 +373,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; dma_cap->number_tx_queues = ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + /* PPS output */ + dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index b2becb80a697..3f4f3132e16b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -8,6 +8,7 @@ #include "dwmac4.h" #include "dwmac5.h" #include "stmmac.h" +#include "stmmac_ptp.h" struct dwmac5_error_desc { bool valid; @@ -494,3 +495,57 @@ re_enable: writel(old_val, ioaddr + GMAC_CONFIG); return ret; } + +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + MAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~PPSx_MASK(index); + + if (!enable) { + val |= PPSCMDx(index, 0x5); + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; + } + + val |= PPSCMDx(index, 0x2); + val |= TRGTMODSELx(index, 0x2); + val |= PPSEN0; + + writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index cc810aff7100..775db776b3cc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -11,6 +11,25 @@ #define PRTYEN BIT(1) #define TMOUTEN BIT(0) +#define MAC_PPS_CONTROL 0x00000b70 +#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define PPS_MINIDX(x) ((x) * 8) +#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x)) +#define MCGRENx(x) BIT(PPS_MAXIDX(x)) +#define TRGTMODSELx(x, val) \ + GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \ + ((val) << (PPS_MAXIDX(x) - 2)) +#define PPSCMDx(x, val) \ + GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \ + ((val) << PPS_MINIDX(x)) +#define PPSEN0 BIT(4) +#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10)) +#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10)) +#define TRGTBUSY0 BIT(31) +#define TTSL0 GENMASK(30, 0) +#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) +#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) + #define MTL_RXP_CONTROL_STATUS 0x00000ca0 #define RXPI BIT(31) #define NPE GENMASK(23, 16) @@ -61,5 +80,8 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, int index, unsigned long *count, const char **desc); int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, unsigned int count); +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index f499a7fad6f0..e44e7b26ce82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -241,6 +241,7 @@ struct net_device; struct rgmii_adv; struct stmmac_safety_stats; struct stmmac_tc_entry; +struct stmmac_pps_cfg; /* Helpers to program the MAC core */ struct stmmac_ops { @@ -313,6 +314,10 @@ struct stmmac_ops { /* Flexible RX Parser */ int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, unsigned int count); + /* Flexible PPS */ + int (*flex_pps_config)(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); }; #define stmmac_core_init(__priv, __args...) \ @@ -379,6 +384,8 @@ struct stmmac_ops { stmmac_do_callback(__priv, mac, safety_feat_dump, __args) #define stmmac_rxp_config(__priv, __args...) \ stmmac_do_callback(__priv, mac, rxp_config, __args) +#define stmmac_flex_pps_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, flex_pps_config, __args) /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 4d425b1a0c59..025efbf6145c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -100,6 +100,13 @@ struct stmmac_tc_entry { } __packed val; }; +#define STMMAC_PPS_MAX 4 +struct stmmac_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_count_frames; @@ -122,7 +129,7 @@ struct stmmac_priv { struct net_device *dev; struct device *device; struct mac_device_info *hw; - spinlock_t lock; + struct mutex lock; /* RX Queue */ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; @@ -160,6 +167,8 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; + u32 sub_second_inc; + u32 systime_flags; u32 adv_ts; int use_riwt; int irq_wake; @@ -181,6 +190,9 @@ struct stmmac_priv { unsigned int tc_entries_max; unsigned int tc_off_max; struct stmmac_tc_entry *tc_entries; + + /* Pulse Per Second output */ + struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; }; enum stmmac_state { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 6d82b3ef5c3b..5710864fa809 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -390,9 +390,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full); - spin_lock(&priv->lock); + mutex_lock(&priv->lock); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); - spin_unlock(&priv->lock); + mutex_unlock(&priv->lock); return 0; } @@ -632,12 +632,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - spin_lock_irq(&priv->lock); + mutex_lock(&priv->lock); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; wol->wolopts = priv->wolopts; } - spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->lock); } static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -666,9 +666,9 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) disable_irq_wake(priv->wol_irq); } - spin_lock_irq(&priv->lock); + mutex_lock(&priv->lock); priv->wolopts = wol->wolopts; - spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->lock); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c32de53a00d3..11fb7c777d89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -381,7 +381,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; int interface = priv->plat->interface; - unsigned long flags; bool ret = false; if ((interface != PHY_INTERFACE_MODE_MII) && @@ -408,7 +407,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) * changed). * In that case the driver disable own timers. */ - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (priv->eee_active) { netdev_dbg(priv->dev, "disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); @@ -416,11 +415,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) tx_lpi_timer); } priv->eee_active = 0; - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); goto out; } /* Activate the EEE and start timers */ - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (!priv->eee_active) { priv->eee_active = 1; timer_setup(&priv->eee_ctrl_timer, @@ -435,7 +434,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); ret = true; - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); } @@ -722,6 +721,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) priv->plat->has_gmac4, &sec_inc); temp = div_u64(1000000000ULL, sec_inc); + /* Store sub second increment and flags for later use */ + priv->sub_second_inc = sec_inc; + priv->systime_flags = value; + /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; @@ -811,13 +814,12 @@ static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - unsigned long flags; bool new_state = false; if (!phydev) return; - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (phydev->link) { u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); @@ -876,7 +878,7 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); if (phydev->is_pseudo_fixed_link) /* Stop PHY layer to call the hook to adjust the link in case @@ -4275,7 +4277,7 @@ int stmmac_dvr_probe(struct device *device, (8 * priv->plat->rx_queues_to_use)); } - spin_lock_init(&priv->lock); + mutex_init(&priv->lock); /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be @@ -4359,6 +4361,7 @@ int stmmac_dvr_remove(struct device *dev) priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); + mutex_destroy(&priv->lock); free_netdev(ndev); return 0; @@ -4376,7 +4379,6 @@ int stmmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; if (!ndev || !netif_running(ndev)) return 0; @@ -4384,7 +4386,7 @@ int stmmac_suspend(struct device *dev) if (ndev->phydev) phy_stop(ndev->phydev); - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); netif_device_detach(ndev); stmmac_stop_all_queues(priv); @@ -4405,7 +4407,7 @@ int stmmac_suspend(struct device *dev) clk_disable(priv->plat->pclk); clk_disable(priv->plat->stmmac_clk); } - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); priv->oldlink = false; priv->speed = SPEED_UNKNOWN; @@ -4450,7 +4452,6 @@ int stmmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; if (!netif_running(ndev)) return 0; @@ -4462,9 +4463,9 @@ int stmmac_resume(struct device *dev) * from another devices (e.g. serial console). */ if (device_may_wakeup(priv->device)) { - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); stmmac_pmt(priv, priv->hw, 0); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); @@ -4478,7 +4479,7 @@ int stmmac_resume(struct device *dev) netif_device_attach(ndev); - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); @@ -4492,7 +4493,7 @@ int stmmac_resume(struct device *dev) stmmac_start_all_queues(priv); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); if (ndev->phydev) phy_start(ndev->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ebd3e5ffa73c..6d141f3931eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -472,7 +472,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } if (of_device_is_compatible(np, "snps,dwmac-4.00") || - of_device_is_compatible(np, "snps,dwmac-4.10a")) { + of_device_is_compatible(np, "snps,dwmac-4.10a") || + of_device_is_compatible(np, "snps,dwmac-4.20a")) { plat->has_gmac4 = 1; plat->has_gmac = 0; plat->pmt = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 7d3a5c7f5db6..0cb0e39a2be9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -140,17 +140,43 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, static int stmmac_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { - return -EOPNOTSUPP; + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + struct stmmac_pps_cfg *cfg; + int ret = -EOPNOTSUPP; + unsigned long flags; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + cfg = &priv->pps[rq->perout.index]; + + cfg->start.tv_sec = rq->perout.start.sec; + cfg->start.tv_nsec = rq->perout.start.nsec; + cfg->period.tv_sec = rq->perout.period.sec; + cfg->period.tv_nsec = rq->perout.period.nsec; + + spin_lock_irqsave(&priv->ptp_lock, flags); + ret = stmmac_flex_pps_config(priv, priv->ioaddr, + rq->perout.index, cfg, on, + priv->sub_second_inc, + priv->systime_flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + break; + default: + break; + } + + return ret; } /* structure describing a PTP hardware clock */ -static const struct ptp_clock_info stmmac_ptp_clock_ops = { +static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, .n_alarm = 0, .n_ext_ts = 0, - .n_per_out = 0, + .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, .adjfreq = stmmac_adjust_freq, @@ -168,6 +194,16 @@ static const struct ptp_clock_info stmmac_ptp_clock_ops = { */ void stmmac_ptp_register(struct stmmac_priv *priv) { + int i; + + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { + if (i >= STMMAC_PPS_MAX) + break; + priv->pps[i].available = true; + } + + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + spin_lock_init(&priv->ptp_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 643cd2d9dfb6..534596ce00d3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -29,7 +29,7 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pm_runtime.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index be0fec17d95d..06d7c9e4dcda 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (IS_ERR(priv->txchan)) { dev_err(&pdev->dev, "error initializing tx dma channel\n"); rc = PTR_ERR(priv->txchan); - goto no_cpdma_chan; + goto err_free_dma; } priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, @@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev) if (IS_ERR(priv->rxchan)) { dev_err(&pdev->dev, "error initializing rx dma channel\n"); rc = PTR_ERR(priv->rxchan); - goto no_cpdma_chan; + goto err_free_txchan; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "error getting irq res\n"); rc = -ENOENT; - goto no_cpdma_chan; + goto err_free_rxchan; } ndev->irq = res->start; @@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", __func__, rc); - goto no_cpdma_chan; + goto err_napi_del; } /* register the network device */ @@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev) dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; pm_runtime_put(&pdev->dev); - goto no_cpdma_chan; + goto err_napi_del; } @@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev) return 0; -no_cpdma_chan: - if (priv->txchan) - cpdma_chan_destroy(priv->txchan); - if (priv->rxchan) - cpdma_chan_destroy(priv->rxchan); +err_napi_del: + netif_napi_del(&priv->napi); +err_free_rxchan: + cpdma_chan_destroy(priv->rxchan); +err_free_txchan: + cpdma_chan_destroy(priv->txchan); +err_free_dma: cpdma_ctlr_destroy(priv->dma); no_pdata: if (of_phy_is_fixed_link(np)) diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 8ac72831af05..a98aedae1b41 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -321,7 +321,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return ret; } -#if IS_ENABLED(CONFIG_OF) static int davinci_mdio_probe_dt(struct mdio_platform_data *data, struct platform_device *pdev) { @@ -339,7 +338,6 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, return 0; } -#endif #if IS_ENABLED(CONFIG_OF) static const struct davinci_mdio_of_param of_cpsw_mdio_data = { @@ -374,7 +372,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - if (dev->of_node) { + if (IS_ENABLED(CONFIG_OF) && dev->of_node) { const struct of_device_id *of_id; ret = davinci_mdio_probe_dt(&data->pdata, pdev); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 6a728d35e776..6e455a27a8de 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3206,7 +3206,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, if (!slave->phy) { dev_err(dev, "phy not found for slave %d\n", slave->slave_num); - slave->phy = NULL; } else { dev_dbg(dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index e74e1e897864..f24f48f33802 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -900,7 +900,6 @@ static void axienet_dma_err_handler(unsigned long data); * @ndev: Pointer to net_device structure * * Return: 0, on success. - * -ENODEV, if PHY cannot be connected to * non-zero error value on failure * * This is the driver open routine. It calls phy_start to start the PHY device. |