summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c22
-rw-r--r--drivers/net/ethernet/amd/declance.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c86
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c214
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h29
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c331
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h51
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c152
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h97
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c78
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c446
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c20
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c145
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c258
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c436
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c359
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c627
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2668
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h173
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c327
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c801
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c16
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h79
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c61
-rw-r--r--drivers/net/ethernet/mscc/ocelot_hsio.h785
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c93
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c17
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c45
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/sfc/efx.c26
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c26
123 files changed, 7732 insertions, 2742 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b2522e84f482..13eb6a4d98d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2184,25 +2184,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* Dont schedule NAPI if the driver is in the middle of reset
- * or netdev is down.
- */
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
- return;
-
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
@@ -2368,9 +2349,6 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
};
static int ena_device_validate_params(struct ena_adapter *adapter,
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 29ebbf582010..9f23703dd509 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ const char *desc;
if (dec_lance_debug && version_printed++ == 0)
printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
*/
switch (type) {
case ASIC_LANCE:
- printk("%s: IOASIC onboard LANCE", name);
+ desc = "IOASIC onboard LANCE";
break;
case PMAD_LANCE:
- printk("%s: PMAD-AA", name);
+ desc = "PMAD-AA";
break;
case PMAX_LANCE:
- printk("%s: PMAX onboard LANCE", name);
+ desc = "PMAX onboard LANCE";
break;
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = esar[i * 4];
- printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+ printk("%s: %s, addr = %pM, irq = %d\n",
+ name, desc, dev->dev_addr, dev->irq);
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index faba55fd656a..4122553e224b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1070,9 +1070,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
/* Disable RXCHK, active filters and Broadcom tag matching */
reg = rxchk_readl(priv, RXCHK_CONTROL);
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1082,6 +1079,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1106,7 +1114,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
- u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1132,16 +1139,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD)
- netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
- if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
- reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
- RXCHK_BRCM_TAG_MATCH_MASK;
- netdev_info(priv->netdev,
- "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
- }
-
if (!priv->is_lite)
goto out;
@@ -2645,9 +2642,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61957b0bbd8c..e2d92548226a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget)
break;
}
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
- bp->hwrm_cmd_resp_dma_addr);
-
- bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_cmd_resp_addr) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
enables |= ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
- hw_resc->max_irqs);
+ hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -9050,6 +9058,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_free_hwrm_resources(bp);
bnxt_cleanup_pci(bp);
init_err_free:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index ddc98c359488..a85d2be986af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
- u8 qidx;
+ u8 qidx = bp->tc_to_qidx[i];
req.enables |= cpu_to_le32(
- QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+ qidx);
memset(&cos2bw, 0, sizeof(cos2bw));
- qidx = bp->tc_to_qidx[i];
cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 790c684f08ab..140dbd62106d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+enum bnxt_dl_param_id {
+ BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+};
+
static const struct bnxt_dl_nvm_param nvm_params[] = {
{DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
BNXT_NVM_SHARED_CFG, 1},
+ {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
+ BNXT_NVM_SHARED_CFG, 1},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+ {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
+ BNXT_NVM_SHARED_CFG, 1},
};
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
@@ -55,8 +68,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
- if (nvm_param.num_bits == 1)
- buf = &val->vbool;
+ switch (bytesize) {
+ case 1:
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+ else
+ buf = &val->vu8;
+ break;
+ case 2:
+ buf = &val->vu16;
+ break;
+ case 4:
+ buf = &val->vu32;
+ break;
+ default:
+ return -EFAULT;
+ }
data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
&data_dma_addr, GFP_KERNEL);
@@ -78,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
- if (rc)
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
+ return -EACCES;
+ } else if (rc) {
return -EIO;
+ }
return 0;
}
@@ -88,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
{
struct hwrm_nvm_get_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
- return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ if (!rc)
+ if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
+
+ return rc;
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
@@ -100,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+
+ if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
+
return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
}
+static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int max_val = -1;
+
+ if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX)
+ max_val = BNXT_MSIX_VEC_MAX;
+
+ if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN)
+ max_val = BNXT_MSIX_VEC_MIN_MAX;
+
+ if (val.vu32 > max_val) {
+ NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
+ DEVLINK_PARAM_GENERIC(IGNORE_ARI,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_msix_validate),
+ DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+ "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
};
int bnxt_dl_register(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 2f68dc048390..5b6b2c7d97cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
+#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
+#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define BNXT_MSIX_VEC_MAX 1280
+#define BNXT_MSIX_VEC_MIN_MAX 128
+
enum bnxt_nvm_dir_type {
BNXT_NVM_SHARED_CFG = 40,
BNXT_NVM_PORT_CFG,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index b574fe8e974e..9a25c05aa571 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -521,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return 0;
}
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 38b9a75ad724..d7287651422f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
#else
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index eb96b0613cf6..825a28e5b544 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1732,7 +1732,7 @@ int liquidio_set_fec(struct lio *lio, int on_off)
if (oct->props[lio->ifidx].fec !=
oct->props[lio->ifidx].fec_boot) {
dev_dbg(&oct->pci_dev->dev,
- "Reloade driver to chang fec to %s\n",
+ "Reload driver to change fec to %s\n",
oct->props[lio->ifidx].fec ? "on" : "off");
}
@@ -1796,7 +1796,7 @@ int liquidio_get_fec(struct lio *lio)
if (oct->props[lio->ifidx].fec !=
oct->props[lio->ifidx].fec_boot) {
dev_dbg(&oct->pci_dev->dev,
- "Reloade driver to chang fec to %s\n",
+ "Reload driver to change fec to %s\n",
oct->props[lio->ifidx].fec ? "on" : "off");
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 9d70e5c6157f..3d24133e5e49 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3144,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
static int
-liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
+liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct lio_devlink_priv *priv;
struct octeon_device *oct;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 8b0a253a18d8..1e82b9efe447 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2158,6 +2158,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+ return -EINVAL;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2257,6 +2259,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+ return -EINVAL;
+
/* Display qsets for all ports when offload enabled */
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
q1 = 0;
@@ -2302,6 +2307,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
+ if (edata.cmd != CHELSIO_SET_QSET_NUM)
+ return -EINVAL;
if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
@@ -2342,6 +2349,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_LOAD_FW)
+ return -EINVAL;
/* Check t.len sanity ? */
fw_data = memdup_user(useraddr + sizeof(t), t.len);
if (IS_ERR(fw_data))
@@ -2365,6 +2374,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SETMTUTAB)
+ return -EINVAL;
if (m.nmtus != NMTUS)
return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */
@@ -2406,6 +2417,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SET_PM)
+ return -EINVAL;
if (!is_power_of_2(m.rx_pg_sz) ||
!is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */
@@ -2439,6 +2452,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_MEM)
+ return -EINVAL;
if ((t.addr & 7) || (t.len & 7))
return -EINVAL;
if (t.mem_id == MEM_CM)
@@ -2491,6 +2506,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+ return -EINVAL;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6ba3104ff7eb..9bd5f755a0e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -300,8 +300,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
enum cxgb4_dcb_state_input input =
((pcmd->u.dcb.control.all_syncd_pkd &
FW_PORT_CMD_ALL_SYNCD_F)
- ? CXGB4_DCB_STATE_FW_ALLSYNCED
- : CXGB4_DCB_STATE_FW_INCOMPLETE);
+ ? CXGB4_DCB_INPUT_FW_ALLSYNCED
+ : CXGB4_DCB_INPUT_FW_INCOMPLETE);
if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 02040b99c78a..484ee8290090 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -67,7 +67,7 @@
do { \
if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \
cxgb4_dcb_state_fsm((__dev), \
- CXGB4_DCB_STATE_FW_ALLSYNCED); \
+ CXGB4_DCB_INPUT_FW_ALLSYNCED); \
} while (0)
/* States we can be in for a port's Data Center Bridging.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 7fc656680299..52edb688942b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -38,7 +38,6 @@
#include "cxgb4.h"
#include "sched.h"
-/* Spinlock must be held by caller */
static int t4_sched_class_fw_cmd(struct port_info *pi,
struct ch_sched_params *p,
enum sched_fw_ops op)
@@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
return err;
}
-/* Spinlock must be held by caller */
static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
enum sched_bind_type type, bool bind)
{
@@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
if (e && index >= 0) {
int i = 0;
- spin_lock(&e->lock);
list_for_each_entry(qe, &e->queue_list, list) {
if (i == index)
break;
@@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
}
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
- if (err) {
- spin_unlock(&e->lock);
- goto out;
- }
+ if (err)
+ return err;
list_del(&qe->list);
kvfree(qe);
@@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
- spin_unlock(&e->lock);
}
-out:
return err;
}
@@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err) {
- kvfree(qe);
- goto out;
- }
+ if (err)
+ goto out_err;
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
@@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
memcpy(&qe->param, p, sizeof(qe->param));
e = &s->tab[qe->param.class];
- spin_lock(&e->lock);
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
- if (err) {
- kvfree(qe);
- spin_unlock(&e->lock);
- goto out;
- }
+ if (err)
+ goto out_err;
list_add_tail(&qe->list, &e->queue_list);
atomic_inc(&e->refcnt);
- spin_unlock(&e->lock);
-out:
+ return err;
+
+out_err:
+ kvfree(qe);
return err;
}
@@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type)
{
struct port_info *pi = netdev2pinfo(dev);
- struct sched_table *s;
- int err = 0;
u8 class_id;
if (!can_sched(dev))
@@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
if (class_id == SCHED_CLS_NONE)
return -ENOTSUPP;
- s = pi->sched_tbl;
- write_lock(&s->rw_lock);
- err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
- write_unlock(&s->rw_lock);
+ return t4_sched_class_bind_unbind_op(pi, arg, type, true);
- return err;
}
/**
@@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type)
{
struct port_info *pi = netdev2pinfo(dev);
- struct sched_table *s;
- int err = 0;
u8 class_id;
if (!can_sched(dev))
@@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
if (!valid_class_id(dev, class_id))
return -EINVAL;
- s = pi->sched_tbl;
- write_lock(&s->rw_lock);
- err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
- write_unlock(&s->rw_lock);
-
- return err;
+ return t4_sched_class_bind_unbind_op(pi, arg, type, false);
}
/* If @p is NULL, fetch any available unused class */
@@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
u8 class_id;
int err;
@@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- write_lock(&s->rw_lock);
/* See if there's an exisiting class with same
* requested sched params
*/
@@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
/* Fetch any available unused class */
e = t4_sched_class_lookup(pi, NULL);
if (!e)
- goto out;
+ return NULL;
memcpy(&np, p, sizeof(np));
np.u.params.class = e->idx;
-
- spin_lock(&e->lock);
/* New class */
err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
- if (err) {
- spin_unlock(&e->lock);
- e = NULL;
- goto out;
- }
+ if (err)
+ return NULL;
memcpy(&e->info, &np, sizeof(e->info));
atomic_set(&e->refcnt, 0);
e->state = SCHED_STATE_ACTIVE;
- spin_unlock(&e->lock);
}
-out:
- write_unlock(&s->rw_lock);
return e;
}
@@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
return NULL;
s->sched_size = sched_size;
- rwlock_init(&s->rw_lock);
for (i = 0; i < s->sched_size; i++) {
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].queue_list);
- spin_lock_init(&s->tab[i].lock);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap)
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
- write_lock(&s->rw_lock);
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
t4_sched_class_free(pi, e);
- write_unlock(&s->rw_lock);
}
kvfree(s);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 3a49e00a38a1..168fb4ce3759 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -69,13 +69,11 @@ struct sched_class {
u8 idx;
struct ch_sched_params info;
struct list_head queue_list;
- spinlock_t lock; /* Per class lock */
atomic_t refcnt;
};
struct sched_table { /* per port scheduling table */
u8 sched_size;
- rwlock_t rw_lock; /* Table lock */
struct sched_class tab[0];
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f85eab57e9e1..cb523949c812 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
*/
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
{
+ unsigned int fw_caps = adap->params.fw_caps_support;
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
@@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_PORT_CMD_PORTID_V(port));
c.action_to_len16 =
- cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
+ ? FW_PORT_ACTION_L1_CFG
+ : FW_PORT_ACTION_L1_CFG32) |
FW_LEN16(c));
- c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
+ if (fw_caps == FW_CAPS16)
+ c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
+ else
+ c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b8f75a22fb6c..f152da1ce046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
};
struct cpl_abort_req_rss6 {
- WR_HDR;
union opcode_tid ot;
__be32 srqidx_status;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 74d122616e76..534787291b44 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
adapter->vxlan_port = 0;
netdev->hw_enc_features = 0;
- netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
- netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 84843de25c7b..6e0f47f2c8a3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2731,8 +2731,6 @@ out_error:
return err;
}
-static const struct of_device_id dpaa_match[];
-
static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
{
u16 headroom;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index c282d5ca06d6..108c137ea593 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1897,6 +1897,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
if (err)
goto close;
+ priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
+ if (!priv->cls_rules)
+ goto close;
+
return 0;
close:
@@ -2004,7 +2009,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
}
/* Supported header fields for Rx hash distribution key */
-static const struct dpaa2_eth_hash_fields hash_fields[] = {
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
/* L2 header */
.rxnfc_field = RXH_L2DA,
@@ -2012,6 +2017,18 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = {
.cls_field = NH_FLD_ETH_DA,
.size = 6,
}, {
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_SA,
+ .size = 6,
+ }, {
+ /* This is the last ethertype field parsed:
+ * depending on frame format, it can be the MAC ethertype
+ * or the VLAN etype.
+ */
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_TYPE,
+ .size = 2,
+ }, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
@@ -2049,33 +2066,122 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = {
},
};
-/* Set RX hash options
+/* Configure the Rx hash key using the legacy API */
+static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_tc_dist_cfg dist_cfg;
+ int err;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+ if (err)
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+
+ return err;
+}
+
+/* Configure the Rx hash key using the new API */
+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int err;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+ if (err)
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+
+ return err;
+}
+
+/* Configure the Rx flow classification key */
+static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int err;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+ if (err)
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+
+ return err;
+}
+
+/* Size of the Rx flow classification key */
+int dpaa2_eth_cls_key_size(void)
+{
+ int i, size = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ size += dist_fields[i].size;
+
+ return size;
+}
+
+/* Offset of header field in Rx classification key */
+int dpaa2_eth_cls_fld_off(int prot, int field)
+{
+ int i, off = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (dist_fields[i].cls_prot == prot &&
+ dist_fields[i].cls_field == field)
+ return off;
+ off += dist_fields[i].size;
+ }
+
+ WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
+ return 0;
+}
+
+/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+ enum dpaa2_eth_rx_dist type, u64 flags)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpkg_profile_cfg cls_cfg;
- struct dpni_rx_tc_dist_cfg dist_cfg;
u32 rx_hash_fields = 0;
+ dma_addr_t key_iova;
u8 *dma_mem;
int i;
int err = 0;
- if (!dpaa2_eth_hash_enabled(priv)) {
- dev_dbg(dev, "Hashing support is not enabled\n");
- return -EOPNOTSUPP;
- }
-
memset(&cls_cfg, 0, sizeof(cls_cfg));
- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
- if (!(flags & hash_fields[i].rxnfc_field))
- continue;
+ /* For Rx hashing key we set only the selected fields.
+ * For Rx flow classification key we set all supported fields
+ */
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (!(flags & dist_fields[i].rxnfc_field))
+ continue;
+ rx_hash_fields |= dist_fields[i].rxnfc_field;
+ }
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2083,12 +2189,10 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
}
key->type = DPKG_EXTRACT_FROM_HDR;
- key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
key->extract.from_hdr.type = DPKG_FULL_FIELD;
- key->extract.from_hdr.field = hash_fields[i].cls_field;
+ key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
-
- rx_hash_fields |= hash_fields[i].rxnfc_field;
}
dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
@@ -2098,38 +2202,73 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
if (err) {
dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
- goto err_prep_key;
+ goto free_key;
}
- memset(&dist_cfg, 0, sizeof(dist_cfg));
-
/* Prepare for setting the rx dist */
- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
- DPAA2_CLASSIFIER_DMA_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
+ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
dev_err(dev, "DMA mapping failed\n");
err = -ENOMEM;
- goto err_dma_map;
+ goto free_key;
}
- dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (dpaa2_eth_has_legacy_dist(priv))
+ err = config_legacy_hash_key(priv, key_iova);
+ else
+ err = config_hash_key(priv, key_iova);
+ } else {
+ err = config_cls_key(priv, key_iova);
+ }
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
- dma_unmap_single(dev, dist_cfg.key_cfg_iova,
- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
- if (err)
- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
- else
+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
priv->rx_hash_fields = rx_hash_fields;
-err_dma_map:
-err_prep_key:
+free_key:
kfree(dma_mem);
return err;
}
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!dpaa2_eth_hash_enabled(priv))
+ return -EOPNOTSUPP;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+}
+
+static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+
+ /* Check if we actually support Rx flow classification */
+ if (dpaa2_eth_has_legacy_dist(priv)) {
+ dev_dbg(dev, "Rx cls not supported by current MC version\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
+ !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!dpaa2_eth_hash_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->rx_cls_enabled = 1;
+
+ return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+}
+
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
@@ -2159,6 +2298,13 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure hashing\n");
+ /* Configure the flow classification key; it includes all
+ * supported header fields and cannot be modified at runtime
+ */
+ err = dpaa2_eth_set_cls(priv);
+ if (err && err != -EOPNOTSUPP)
+ dev_err(dev, "Failed to configure Rx classification key\n");
+
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
err_cfg.set_frame_annotation = 1;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 93bc41265e5e..7a7a3e7bcde2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -290,13 +290,18 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_stats stats;
};
-struct dpaa2_eth_hash_fields {
+struct dpaa2_eth_dist_fields {
u64 rxnfc_field;
enum net_prot cls_prot;
int cls_field;
int size;
};
+struct dpaa2_eth_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ u8 in_use;
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -340,6 +345,8 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
+ struct dpaa2_eth_cls_rule *cls_rules;
+ u8 rx_cls_enabled;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -367,6 +374,24 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
return priv->dpni_ver_major - ver_major;
}
+/* Minimum firmware version that supports a more flexible API
+ * for configuring the Rx flow hash key
+ */
+#define DPNI_RX_DIST_KEY_VER_MAJOR 7
+#define DPNI_RX_DIST_KEY_VER_MINOR 5
+
+#define dpaa2_eth_has_legacy_dist(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
+ DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
+#define dpaa2_eth_fs_count(priv) \
+ ((priv)->dpni_attrs.fs_entries)
+
+enum dpaa2_eth_rx_dist {
+ DPAA2_ETH_RX_DIST_HASH,
+ DPAA2_ETH_RX_DIST_CLS
+};
+
/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
* the buffer also needs space for its shared info struct, and we need
* to allocate enough to accommodate hardware alignment restrictions
@@ -410,5 +435,7 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_cls_fld_off(int prot, int field);
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index ce0d94d8a7d8..26bd5a2bd8ed 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -224,10 +224,310 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = cdan;
}
+static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask)
+{
+ int off;
+
+ if (eth_mask->h_proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = eth_value->h_proto;
+ *(__be16 *)(mask + off) = eth_mask->h_proto;
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
+ ether_addr_copy(key + off, eth_value->h_source);
+ ether_addr_copy(mask + off, eth_mask->h_source);
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, eth_value->h_dest);
+ ether_addr_copy(mask + off, eth_mask->h_dest);
+ }
+
+ return 0;
+}
+
+static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask)
+{
+ int off;
+ u32 tmp_value, tmp_mask;
+
+ if (uip_mask->tos || uip_mask->ip_ver)
+ return -EOPNOTSUPP;
+
+ if (uip_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = uip_value->ip4src;
+ *(__be32 *)(mask + off) = uip_mask->ip4src;
+ }
+
+ if (uip_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = uip_value->ip4dst;
+ *(__be32 *)(mask + off) = uip_mask->ip4dst;
+ }
+
+ if (uip_mask->proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = uip_value->proto;
+ *(u8 *)(mask + off) = uip_mask->proto;
+ }
+
+ if (uip_mask->l4_4_bytes) {
+ tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
+ tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = htons(tmp_value >> 16);
+ *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
+ *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ }
+
+ /* Only apply the rule for IPv4 frames */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+
+ return 0;
+}
+
+static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto)
+{
+ int off;
+
+ if (l4_mask->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = l4_value->ip4src;
+ *(__be32 *)(mask + off) = l4_mask->ip4src;
+ }
+
+ if (l4_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = l4_value->ip4dst;
+ *(__be32 *)(mask + off) = l4_mask->ip4dst;
+ }
+
+ if (l4_mask->psrc) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = l4_value->psrc;
+ *(__be16 *)(mask + off) = l4_mask->psrc;
+ }
+
+ if (l4_mask->pdst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = l4_value->pdst;
+ *(__be16 *)(mask + off) = l4_mask->pdst;
+ }
+
+ /* Only apply the rule for IPv4 frames with the specified L4 proto */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = l4_proto;
+ *(u8 *)(mask + off) = 0xFF;
+
+ return 0;
+}
+
+static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask)
+{
+ int off;
+
+ if (ext_mask->vlan_etype)
+ return -EOPNOTSUPP;
+
+ if (ext_mask->vlan_tci) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+ *(__be16 *)(key + off) = ext_value->vlan_tci;
+ *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ }
+
+ return 0;
+}
+
+static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask)
+{
+ int off;
+
+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, ext_value->h_dest);
+ ether_addr_copy(mask + off, ext_mask->h_dest);
+ }
+
+ return 0;
+}
+
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+{
+ int err;
+
+ switch (fs->flow_type & 0xFF) {
+ case ETHER_FLOW:
+ err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask);
+ break;
+ case IP_USER_FLOW:
+ err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask);
+ break;
+ case TCP_V4_FLOW:
+ err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP);
+ break;
+ case UDP_V4_FLOW:
+ err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+
+ if (fs->flow_type & FLOW_EXT) {
+ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ if (err)
+ return err;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_rule_cfg rule_cfg = { 0 };
+ struct dpni_fs_action_cfg fs_act = { 0 };
+ dma_addr_t key_iova;
+ void *key_buf;
+ int err;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+ return -EINVAL;
+
+ rule_cfg.key_size = dpaa2_eth_cls_key_size();
+
+ /* allocate twice the key size, for the actual key and for mask */
+ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ /* Fill the key and mask memory areas */
+ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+ if (err)
+ goto free_mem;
+
+ key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ rule_cfg.key_iova = key_iova;
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
+ if (add) {
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ fs_act.options |= DPNI_FS_OPT_DISCARD;
+ else
+ fs_act.flow_id = fs->ring_cookie;
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+ fs->location, &rule_cfg, &fs_act);
+ } else {
+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+ &rule_cfg);
+ }
+
+ dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+free_mem:
+ kfree(key_buf);
+
+ return err;
+}
+
+static int update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ int location)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_cls_rule *rule;
+ int err = -EINVAL;
+
+ if (!priv->rx_cls_enabled)
+ return -EOPNOTSUPP;
+
+ if (location >= dpaa2_eth_fs_count(priv))
+ return -EINVAL;
+
+ rule = &priv->cls_rules[location];
+
+ /* If a rule is present at the specified location, delete it. */
+ if (rule->in_use) {
+ err = do_cls_rule(net_dev, &rule->fs, false);
+ if (err)
+ return err;
+
+ rule->in_use = 0;
+ }
+
+ /* If no new entry to add, return here */
+ if (!new_fs)
+ return err;
+
+ err = do_cls_rule(net_dev, new_fs, true);
+ if (err)
+ return err;
+
+ rule->in_use = 1;
+ rule->fs = *new_fs;
+
+ return 0;
+}
+
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int max_rules = dpaa2_eth_fs_count(priv);
+ int i, j = 0;
switch (rxnfc->cmd) {
case ETHTOOL_GRXFH:
@@ -240,6 +540,31 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv);
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rxnfc->rule_cnt = 0;
+ for (i = 0; i < max_rules; i++)
+ if (priv->cls_rules[i].in_use)
+ rxnfc->rule_cnt++;
+ rxnfc->data = max_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= max_rules)
+ return -EINVAL;
+ if (!priv->cls_rules[rxnfc->fs.location].in_use)
+ return -EINVAL;
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < max_rules; i++) {
+ if (!priv->cls_rules[i].in_use)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ rxnfc->rule_cnt = j;
+ rxnfc->data = max_rules;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -258,6 +583,12 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return -EOPNOTSUPP;
err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ break;
default:
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 83698abce8b4..7b44d7d9b19a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -82,6 +82,9 @@
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -515,4 +518,52 @@ struct dpni_rsp_get_api_version {
__le16 minor;
};
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le16 miss_flow_id;
+ __le16 pad;
+ __le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le32 pad;
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+ /* cmd word 0 */
+ __le16 options;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 flow_id;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+ /* cmd word 3 */
+ __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+ /* cmd word 0 */
+ __le16 pad0;
+ u8 tc_id;
+ u8 key_size;
+ __le32 pad1;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6ac26797cec..220dfc806a24 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1598,3 +1598,155 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx flow steering distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ *
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_fs_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx hash distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the default queue
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_hash_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the FS table where to insert the entry.
+ * Only relevant if MASKING is enabled for FS
+ * classification on this DPNI, it is ignored for exact match.
+ * @cfg: Flow steering rule to add
+ * @action: Action to be taken as result of a classification hit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action)
+{
+ struct dpni_cmd_add_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+ cmd_params->options = cpu_to_le16(action->options);
+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
+ cmd_params->flc = cpu_to_le64(action->flc);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Flow steering rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index b378a00c7c53..a521242e2353 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -629,6 +629,45 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_tc_dist_cfg *cfg);
/**
+ * When used for fs_miss_flow_id in function dpni_set_rx_dist,
+ * will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - Rx distribution configuration
+ * @dist_size: distribution size
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ u16 dist_size;
+ u64 key_cfg_iova;
+ u8 enable;
+ u8 tc;
+ u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
* does not generate FQDAN notifications; user is expected to
@@ -816,6 +855,64 @@ struct dpni_rule_cfg {
u8 key_size;
};
+/**
+ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD 0x1
+
+/**
+ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC 0x2
+
+/**
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ * - bits 0-1: indicates the number of 64 byte units of context that are
+ * stashed. FLC value is interpreted as a memory address in this case,
+ * excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
+ * to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc: FLC value for traffic matching this rule. Please check the
+ * Frame Descriptor section in the hardware documentation for
+ * more information.
+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
+ * values are in range 0 to num_queue-1.
+ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+ u64 flc;
+ u16 flow_id;
+ u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg);
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ce74b7a46d07..a17cc973d9a3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -1273,7 +1273,7 @@ skb_done:
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (netif_queue_stopped(ndev)) {
+ if (netif_tx_queue_stopped(nq)) {
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -2240,7 +2240,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index a051e582d541..79d03f8ee7b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index f56855e63c96..28e907831b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -40,9 +40,9 @@
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
- fill_v2_desc(ring, priv,
- (k == frag_buf_num - 1) ?
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
- dma + BD_MAX_SEND_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- buf_num,
- (type == DESC_TYPE_SKB && !k) ?
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
- mtu);
+ mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
return phy_mii_ioctl(phy_dev, ifr, cmd);
}
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
- struct hns_nic_priv *priv = netdev_priv(ndev);
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < priv->ae_handle->q_num * 2; i++)
- napi_schedule(&priv->ring_data[i].napi);
- local_irq_restore(flags);
-}
-#endif
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hns_nic_poll_controller,
-#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
.ndo_select_queue = hns_nic_select_queue,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index be9dc08ccf67..038326cfda93 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
- HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
};
/* below are per-VF vlan cfg subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 591ee2ee4bf6..1b49c5d3340b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -355,8 +355,6 @@ struct hnae3_ae_ops {
const unsigned char *addr);
int (*rm_mc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
- int (*update_mta_status)(struct hnae3_handle *handle);
-
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index c2692563a4d9..e9d4564b8ce1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -21,6 +21,7 @@
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
@@ -475,9 +476,6 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
if (netdev->flags & IFF_MULTICAST) {
if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
netdev_err(netdev, "sync mc address fail\n");
-
- if (h->ae_algo->ops->update_mta_status)
- h->ae_algo->ops->update_mta_status(h);
}
}
@@ -2202,18 +2200,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
-static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
- struct hns3_desc *desc, u32 l234info)
+static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info,
+ u16 *vlan_tag)
{
struct pci_dev *pdev = ring->tqp->handle->pdev;
- u16 vlan_tag;
if (pdev->revision == 0x20) {
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- if (!(vlan_tag & VLAN_VID_MASK))
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(*vlan_tag & VLAN_VID_MASK))
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- return vlan_tag;
+ return (*vlan_tag != 0);
}
#define HNS3_STRP_OUTER_VLAN 0x1
@@ -2222,17 +2220,14 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- break;
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ return true;
case HNS3_STRP_INNER_VLAN:
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- break;
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ return true;
default:
- vlan_tag = 0;
- break;
+ return false;
}
-
- return vlan_tag;
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
@@ -2334,8 +2329,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
- vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
- if (vlan_tag & VLAN_VID_MASK)
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
@@ -3155,15 +3149,6 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
}
-static void hns3_uninit_mac_addr(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
-
- if (h->ae_algo->ops->rm_uc_addr)
- h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
-}
-
static int hns3_restore_fd_rules(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -3296,6 +3281,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_remove_hw_addr(netdev);
+
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -3319,8 +3306,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
- hns3_uninit_mac_addr(netdev);
-
free_netdev(netdev);
}
@@ -3392,6 +3377,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
hns3_nic_mc_sync(ndev, ha->addr);
}
+static void hns3_remove_hw_addr(struct net_device *netdev)
+{
+ struct netdev_hw_addr_list *list;
+ struct netdev_hw_addr *ha, *tmp;
+
+ hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+
+ /* go through and unsync uc_addr entries to the device */
+ list = &netdev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ hns3_nic_uc_unsync(netdev, ha->addr);
+
+ /* go through and unsync mc_addr entries to the device */
+ list = &netdev->mc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ if (ha->refcount > 1)
+ hns3_nic_mc_unsync(netdev, ha->addr);
+}
+
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -3637,14 +3641,14 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_uninit_mac_addr(netdev);
-
- /* it is cumbersome for hardware to pick-and-choose rules for deletion
- * from TCAM. Hence, for function reset software intervention is
- * required to delete the rules
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
*/
- if (hns3_dev_ongoing_func_reset(ae_dev))
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(netdev);
hns3_del_all_fd_rules(netdev, false);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index e5e66b27e03e..1ccde67db770 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -175,15 +175,9 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
- HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
-
- /* Multicast linear table commands */
- HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
- HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
- HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
- HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
@@ -402,6 +396,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_UMV_TBL_SPACE_S 16
+#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -591,13 +587,12 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0
-struct hclge_mac_vlan_mask_entry_cmd {
- u8 rsv0[2];
- u8 vlan_mask;
- u8 rsv1;
- u8 mac_mask[6];
- u8 rsv2[14];
+#define HCLGE_UMV_SPC_ALC_B 0
+struct hclge_umv_spc_alc_cmd {
+ u8 allocate;
+ u8 rsv1[3];
+ __le32 space_size;
+ u8 rsv2[16];
};
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
@@ -622,30 +617,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0
-#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 7
-struct hclge_mta_filter_mode_cmd {
- u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
- u8 rsv[23];
-};
-
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
-struct hclge_cfg_func_mta_filter_cmd {
- u8 accept; /* Only used lowest 1 bit */
- u8 function_id;
- u8 rsv[22];
-};
-
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0
-#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
-struct hclge_cfg_func_mta_item_cmd {
- __le16 item_idx; /* Only used lowest 12 bit */
- u8 accept; /* Only used lowest 1 bit */
- u8 rsv[21];
-};
-
struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 21ca4af3b37a..ca1a93664d0e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -25,12 +25,11 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable);
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc);
static struct hnae3_ae_algo ae_algo;
@@ -778,6 +777,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_UMV_TBL_SPACE_M,
+ HCLGE_CFG_UMV_TBL_SPACE_S);
+ if (!cfg->umv_space)
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -856,6 +860,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
+ hdev->wanted_umv_size = cfg.umv_space;
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -1939,40 +1944,13 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
return hdev->hw.mac.autoneg;
}
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
- bool mask_vlan,
- u8 *mac_mask)
-{
- struct hclge_mac_vlan_mask_entry_cmd *req;
- struct hclge_desc desc;
- int status;
-
- req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
-
- hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
- ether_addr_copy(req->mac_mask, mac_mask);
-
- status = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
- status);
-
- return status;
-}
-
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- struct hclge_vport *vport;
int mtu;
int ret;
- int i;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
@@ -1985,39 +1963,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- /* Initialize the MTA table work mode */
- hdev->enable_mta = true;
- hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
-
- ret = hclge_set_mta_filter_mode(hdev,
- hdev->mta_mac_sel_type,
- hdev->enable_mta);
- if (ret) {
- dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
- ret);
- return ret;
- }
-
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vport->accept_mta_mc = false;
-
- memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
- ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n", ret);
- return ret;
- }
- }
-
- ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set default mac_vlan_mask fail ret=%d\n", ret);
- return ret;
- }
-
if (netdev)
mtu = netdev->mtu;
else
@@ -4978,174 +4923,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
- const u8 *addr)
-{
- u16 high_val = addr[1] | (addr[0] << 8);
- struct hclge_dev *hdev = vport->back;
- u32 rsh = 4 - hdev->mta_mac_sel_type;
- u16 ret_val = (high_val >> rsh) & 0xfff;
-
- return ret_val;
-}
-
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable)
-{
- struct hclge_mta_filter_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_mta_filter_mode_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
-
- hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config mat filter mode failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable)
-{
- struct hclge_cfg_func_mta_filter_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
-
- hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
- req->function_id = func_id;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config func_id enable failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
- u16 idx,
- bool enable)
-{
- struct hclge_dev *hdev = vport->back;
- struct hclge_cfg_func_mta_item_cmd *req;
- struct hclge_desc desc;
- u16 item_idx = 0;
- int ret;
-
- req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
-
- hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
- req->item_idx = cpu_to_le16(item_idx);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mta table item failed for cmd_send, ret =%d.\n",
- ret);
- return ret;
- }
-
- if (enable)
- set_bit(idx, vport->mta_shadow);
- else
- clear_bit(idx, vport->mta_shadow);
-
- return 0;
-}
-
-static int hclge_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct net_device *netdev = handle->kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update mta_status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclge_update_mta_status_common(vport, mta_status,
- 0, HCLGE_MTA_TBL_SIZE, true);
-}
-
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter)
-{
- struct hclge_dev *hdev = vport->back;
- u16 update_max = idx + count;
- u16 check_max;
- int ret = 0;
- bool used;
- u16 i;
-
- /* setup mta check range */
- if (update_filter) {
- i = 0;
- check_max = HCLGE_MTA_TBL_SIZE;
- } else {
- i = idx;
- check_max = update_max;
- }
-
- used = false;
- /* check and update all mta item */
- for (; i < check_max; i++) {
- /* ignore unused item */
- if (!test_bit(i, vport->mta_shadow))
- continue;
-
- /* if i in update range then update it */
- if (i >= idx && i < update_max)
- if (!test_bit(i - idx, status))
- hclge_set_mta_table_item(vport, i, false);
-
- if (!used && test_bit(i, vport->mta_shadow))
- used = true;
- }
-
- /* no longer use mta, disable it */
- if (vport->accept_mta_mc && update_filter && !used) {
- ret = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "disable func mta filter fail ret=%d\n",
- ret);
- else
- vport->accept_mta_mc = false;
- }
-
- return ret;
-}
-
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd *req)
{
@@ -5269,6 +5046,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
+ true);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "Alloc umv space failed, want %d, get %d\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ mutex_init(&hdev->umv_mutex);
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+
+ return 0;
+}
+
+static int hclge_uninit_umv_space(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->max_umv_size > 0) {
+ ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
+ false);
+ if (ret)
+ return ret;
+ hdev->max_umv_size = 0;
+ }
+ mutex_destroy(&hdev->umv_mutex);
+
+ return 0;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc)
+{
+ struct hclge_umv_spc_alc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+ hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+ req->space_size = cpu_to_le32(space_size);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "%s umv space failed for cmd_send, ret =%d\n",
+ is_alloc ? "allocate" : "free", ret);
+ return ret;
+ }
+
+ if (is_alloc && allocated_size)
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->used_umv_num = 0;
+ }
+
+ mutex_lock(&hdev->umv_mutex);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+ mutex_unlock(&hdev->umv_mutex);
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool is_full;
+
+ mutex_lock(&hdev->umv_mutex);
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size == 0);
+ mutex_unlock(&hdev->umv_mutex);
+
+ return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->umv_mutex);
+ if (is_free) {
+ if (vport->used_umv_num > hdev->priv_umv_size)
+ hdev->share_umv_size++;
+ vport->used_umv_num--;
+ } else {
+ if (vport->used_umv_num >= hdev->priv_umv_size)
+ hdev->share_umv_size--;
+ vport->used_umv_num++;
+ }
+ mutex_unlock(&hdev->umv_mutex);
+}
+
static int hclge_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
@@ -5314,8 +5203,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
* is not allowed in the mac vlan table.
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
- if (ret == -ENOENT)
- return hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (ret == -ENOENT) {
+ if (!hclge_is_umv_space_full(vport)) {
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (!ret)
+ hclge_update_umv_space(vport, false);
+ return ret;
+ }
+
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
+
+ return -ENOSPC;
+ }
/* check if we just hit the duplicate */
if (!ret)
@@ -5358,6 +5258,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!ret)
+ hclge_update_umv_space(vport, true);
return ret;
}
@@ -5376,7 +5278,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
- u16 tbl_idx;
int status;
/* mac addr check */
@@ -5406,25 +5307,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
- /* If mc mac vlan table is full, use MTA table */
- if (status == -ENOSPC) {
- if (!vport->accept_mta_mc) {
- status = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- true);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n",
- status);
- return status;
- }
- vport->accept_mta_mc = true;
- }
-
- /* Set MTA table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
- }
+ if (status == -ENOSPC)
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
}
@@ -5639,7 +5523,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
- bool filter_en)
+ u8 fe_type, bool filter_en)
{
struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
@@ -5649,7 +5533,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
- req->vlan_fe = filter_en;
+ req->vlan_fe = filter_en ? fe_type : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -5661,13 +5545,30 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
#define HCLGE_FILTER_TYPE_VF 0
#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+ if (hdev->pdev->revision >= 0x21) {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable);
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, enable);
+ } else {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable);
+ }
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
@@ -5969,13 +5870,23 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
int ret;
int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
- if (ret)
- return ret;
+ if (hdev->pdev->revision >= 0x21) {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, true);
+ if (ret)
+ return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
- if (ret)
- return ret;
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true);
+ if (ret)
+ return ret;
+ }
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -6746,6 +6657,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
}
+ ret = hclge_init_umv_space(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ goto err_msi_irq_uninit;
+ }
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -6866,6 +6783,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_umv_space(hdev);
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -6919,6 +6838,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
+ hclge_uninit_umv_space(hdev);
+
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
@@ -7317,7 +7238,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.rm_uc_addr = hclge_rm_uc_addr,
.add_mc_addr = hclge_add_mc_addr,
.rm_mc_addr = hclge_rm_mc_addr,
- .update_mta_status = hclge_update_mta_status,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 06adbdd27b95..e3dfd654eca9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -14,6 +14,8 @@
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
+#define HCLGE_MAX_PF_NUM 8
+
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@@ -53,7 +55,9 @@
#define HCLGE_RSS_TC_SIZE_6 64
#define HCLGE_RSS_TC_SIZE_7 128
-#define HCLGE_MTA_TBL_SIZE 4096
+#define HCLGE_UMV_TBL_SIZE 3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
#define HCLGE_TQP_RESET_TRY_TIMES 10
@@ -162,13 +166,6 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
-enum hclge_mta_dmac_sel_type {
- HCLGE_MAC_ADDR_47_36,
- HCLGE_MAC_ADDR_46_35,
- HCLGE_MAC_ADDR_45_34,
- HCLGE_MAC_ADDR_44_33,
-};
-
struct hclge_mac {
u8 phy_addr;
u8 flag;
@@ -251,6 +248,7 @@ struct hclge_cfg {
u8 default_speed;
u32 numa_node_map;
u8 speed_ability;
+ u16 umv_space;
};
struct hclge_tm_info {
@@ -670,9 +668,6 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
- enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Multicast filter enable */
-
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
@@ -680,6 +675,15 @@ struct hclge_dev {
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
u16 hclge_fd_rule_num;
+
+ u16 wanted_umv_size;
+ /* max available unicast mac vlan space */
+ u16 max_umv_size;
+ /* private unicast mac vlan space, it's same for PF and its VFs */
+ u16 priv_umv_size;
+ /* unicast mac vlan space shared by PF and its VFs */
+ u16 share_umv_size;
+ struct mutex umv_mutex; /* protect share_umv_size */
};
/* VPort level vlan tag configuration for TX direction */
@@ -732,13 +736,12 @@ struct hclge_vport {
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
+ u16 used_umv_num;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
-
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -753,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable);
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter);
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f34851c91eb3..04462a347a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
- u8 *msg, u8 idx, bool is_end)
-{
-#define HCLGE_MTA_STATUS_MSG_SIZE 13
-#define HCLGE_MTA_STATUS_MSG_BITS \
- (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGE_MTA_STATUS_MSG_END_BITS \
- (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
- unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_ofs;
- u8 msg_bit;
-
- tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
- HCLGE_MTA_STATUS_MSG_BITS;
-
- /* set msg field */
- msg_ofs = 0;
- msg_bit = 0;
- memset(status, 0, sizeof(status));
- for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
- if (msg[msg_ofs] & BIT(msg_bit))
- set_bit(tbl_idx, status);
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- return hclge_update_mta_status_common(vport,
- status, idx * HCLGE_MTA_STATUS_MSG_BITS,
- tbl_cnt, is_end);
-}
-
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
status = hclge_add_mc_addr_common(vport, mac_addr);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
- u8 func_id = vport->vport_id;
- bool enable = mbx_req->msg[2];
-
- status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
- resp_data = hdev->mta_mac_sel_type;
- resp_len = sizeof(u8);
- gen_resp = true;
- status = 0;
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
- /* mta status update msg format
- * msg[2.6 : 2.0] msg index
- * msg[2.7] msg is end
- * msg[15 : 3] mta status bits[103 : 0]
- */
- bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
-
- status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
- mbx_req->msg[2] & 0x7F,
- is_end);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8f858cb2a67b..ca4a9f790917 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -746,126 +746,6 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
-static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
-{
- u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
- int ret;
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
- NULL, 0, true, &resp_msg, sizeof(u8));
-
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Read mta type fail, ret=%d.\n", ret);
- return ret;
- }
-
- if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
- dev_err(&hdev->pdev->dev,
- "Read mta type invalid, resp=%d.\n", resp_msg);
- return -EINVAL;
- }
-
- hdev->mta_mac_sel_type = resp_msg;
-
- return 0;
-}
-
-static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
- const u8 *addr)
-{
- u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
- u16 high_val = addr[1] | (addr[0] << 8);
-
- return (high_val >> rsh) & 0xfff;
-}
-
-static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
- unsigned long *status)
-{
-#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
-#define HCLGEVF_MTA_STATUS_MSG_BITS \
- (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
- (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_cnt;
- u8 msg_idx;
- int ret;
-
- msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
- HCLGEVF_MTA_STATUS_MSG_BITS);
- tbl_idx = 0;
- msg_idx = 0;
- while (msg_cnt--) {
- u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
- u8 *p = &msg[1];
- u8 msg_ofs;
- u8 msg_bit;
-
- memset(msg, 0, sizeof(msg));
-
- /* set index field */
- msg[0] = 0x7F & msg_idx;
-
- /* set end flag field */
- if (msg_cnt == 0) {
- msg[0] |= 0x80;
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
- } else {
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
- }
-
- /* set status field */
- msg_ofs = 0;
- msg_bit = 0;
- while (tbl_cnt--) {
- if (test_bit(tbl_idx, status))
- p[msg_ofs] |= BIT(msg_bit);
-
- tbl_idx++;
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
- msg, sizeof(msg), false, NULL, 0);
- if (ret)
- break;
-
- msg_idx++;
- }
-
- return ret;
-}
-
-static int hclgevf_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct net_device *netdev = hdev->nic.kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- /* clear status */
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclgevf_do_update_mta_status(hdev, mta_status);
-}
-
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1871,14 +1751,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* Initialize mta type for this VF */
- ret = hclgevf_cfg_func_mta_type(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize MTA type\n", ret);
- goto err_config;
- }
-
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2038,7 +1910,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.rm_uc_addr = hclgevf_rm_uc_addr,
.add_mc_addr = hclgevf_add_mc_addr,
.rm_mc_addr = hclgevf_rm_mc_addr,
- .update_mta_status = hclgevf_update_mta_status,
.get_stats = hclgevf_get_stats,
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 2af01f107c63..cf5fbf793c5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -47,9 +47,6 @@
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
-#define HCLGEVF_MTA_TBL_SIZE 4096
-#define HCLGEVF_MTA_TYPE_SEL_MAX 4
-
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
@@ -157,8 +154,6 @@ struct hclgevf_dev {
u16 *vector_status;
int *vector_irq;
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- u8 mta_mac_sel_type;
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 09e9da10b786..4a8f82938ed5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
stats->tx_errors = nic_tx_stats->tx_dropped;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int i, num_qps;
-
- num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
- for (i = 0; i < num_qps; i++) {
- struct hinic_txq *txq = &nic_dev->txqs[i];
- struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
- napi_schedule(&txq->napi);
- napi_schedule(&rxq->napi);
- }
-}
-#endif
-
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_start_xmit = hinic_xmit_frame,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hinic_netpoll,
-#endif
};
static void netdev_features_init(struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index aa0b89777e74..3baabdc89726 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -920,17 +920,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
return rx;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
- struct ehea_port *port = netdev_priv(dev);
- int i;
-
- for (i = 0; i < port->num_def_qps; i++)
- napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -2952,9 +2941,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_open = ehea_open,
.ndo_stop = ehea_stop,
.ndo_start_xmit = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ehea_netpoll,
-#endif
.ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a8369addfe68..7893beffcc71 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2207,19 +2207,6 @@ restart_poll:
return frames_processed;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int i;
-
- replenish_pools(netdev_priv(dev));
- for (i = 0; i < adapter->req_rx_queues; i++)
- ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
- adapter->rx_scrq[i]);
-}
-#endif
-
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ibmvnic_netpoll_controller,
-#endif
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
};
@@ -2364,8 +2348,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+ } else {
+ ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+ ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+ }
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2378,21 +2367,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int ret;
- if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
- ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
- netdev_err(netdev, "Invalid request.\n");
- netdev_err(netdev, "Max tx buffers = %llu\n",
- adapter->max_rx_add_entries_per_subcrq);
- netdev_err(netdev, "Max rx buffers = %llu\n",
- adapter->max_tx_entries_per_subcrq);
- return -EINVAL;
- }
-
+ ret = 0;
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- return wait_for_reset(adapter);
+ ret = wait_for_reset(adapter);
+
+ if (!ret &&
+ (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
+ adapter->req_tx_entries_per_subcrq != ring->tx_pending))
+ netdev_info(netdev,
+ "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+ ring->rx_pending, ring->tx_pending,
+ adapter->req_rx_add_entries_per_subcrq,
+ adapter->req_tx_entries_per_subcrq);
+ return ret;
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2400,8 +2391,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
+ } else {
+ channels->max_rx = IBMVNIC_MAX_QUEUES;
+ channels->max_tx = IBMVNIC_MAX_QUEUES;
+ }
+
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -2414,11 +2411,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int ret;
+ ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- return wait_for_reset(adapter);
+ ret = wait_for_reset(adapter);
+
+ if (!ret &&
+ (adapter->req_rx_queues != channels->rx_count ||
+ adapter->req_tx_queues != channels->tx_count))
+ netdev_info(netdev,
+ "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+ channels->rx_count, channels->tx_count,
+ adapter->req_rx_queues, adapter->req_tx_queues);
+ return ret;
+
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2426,32 +2435,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- if (stringset != ETH_SS_STATS)
- return;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
+ i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN,
+ ibmvnic_priv_flags[i]);
+ break;
+ default:
+ return;
}
}
@@ -2464,6 +2484,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -2514,6 +2536,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
+
+ if (which_maxes)
+ adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
+ else
+ adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
+
+ return 0;
+}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -2527,6 +2568,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
+ .get_priv_flags = ibmvnic_get_priv_flags,
+ .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f06eec145ca6..18103b811d4d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,8 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_QUEUES 10
+#define IBMVNIC_MAX_QUEUES 16
+#define IBMVNIC_MAX_QUEUE_SZ 4096
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -48,6 +49,11 @@
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
#define IBMVNIC_BUFFER_HLEN 500
+static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
+#define IBMVNIC_USE_SERVER_MAXES 0x1
+ "use-server-maxes"
+};
+
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -969,6 +975,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 56b911a5dd8b..a20d1cf058ad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- dev_info(&pf->pdev->dev, " active_vlans is %s\n",
- vsi->active_vlans ? "<valid>" : "<null>");
dev_info(&pf->pdev->dev,
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index f4bb2779f03a..81b0e1f8d14b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4256,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf->link_forced = true;
vf->link_up = true;
pfe.event_data.link_event.link_status = true;
- pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index a512f7521841..272d76b733aa 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -342,7 +342,7 @@ struct iavf_adapter {
struct iavf_channel_config ch_config;
u8 num_tc;
struct list_head cloud_filter_list;
- /* lock to protest access to the cloud filter list */
+ /* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
};
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 45125bd074d9..e5d6f684437e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -16,3 +16,4 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx.o \
ice_ethtool.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 9cce4cb91401..4c4b5717a627 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -28,6 +28,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
+#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
#include "ice_type.h"
@@ -35,6 +36,8 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -46,6 +49,7 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
+#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130
@@ -63,6 +67,14 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -133,9 +145,21 @@ enum ice_state {
__ICE_EMPR_RECV, /* set by OICR handler */
__ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */
+ /* When checking for the PF to be in a nominal operating state, the
+ * bits that are grouped at the beginning of the list need to be
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
+ * operating state, it must be added before
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * without appropriate consideration.
+ */
+ __ICE_STATE_NOMINAL_CHECK_BITS,
__ICE_ADMINQ_EVENT_PENDING,
+ __ICE_MAILBOXQ_EVENT_PENDING,
__ICE_MDD_EVENT_PENDING,
+ __ICE_VFLR_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC,
+ __ICE_VF_DIS,
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
@@ -172,7 +196,8 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
- int base_vector;
+ int sw_base_vector; /* Irq base for OS reserved vectors */
+ int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -180,6 +205,8 @@ struct ice_vsi {
/* Interrupt thresholds */
u16 work_lmt;
+ s16 vf_id; /* VF ID for SR-IOV VSIs */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -229,21 +256,39 @@ struct ice_q_vector {
u8 num_ring_tx; /* total number of tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
+ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
+ * value to the device
+ */
+ u8 intrl;
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
+ ICE_FLAG_SRIOV_ENA,
+ ICE_FLAG_SRIOV_CAPABLE,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
+
+ /* OS reserved IRQ details */
struct msix_entry *msix_entries;
- struct ice_res_tracker *irq_tracker;
+ struct ice_res_tracker *sw_irq_tracker;
+
+ /* HW reserved Interrupts for this PF */
+ struct ice_res_tracker *hw_irq_tracker;
+
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ /* Virtchnl/SR-IOV config info */
+ struct ice_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_vfs_supported; /* num VFs supported for this PF */
+ u16 num_vf_qps; /* num queue pairs per VF */
+ u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
@@ -256,9 +301,11 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 oicr_idx; /* Other interrupt cause vector index */
+ u32 sw_oicr_idx; /* Other interrupt cause SW vector index */
+ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
+ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */
u16 num_lan_tx; /* num lan tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */
@@ -293,8 +340,8 @@ struct ice_netdev_priv {
static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
- ((struct ice_pf *)hw->back)->oicr_idx;
+ u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ ((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index c100b4bda195..6653555f55dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_SRIOV 0x0012
+#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
#define ICE_AQC_CAPS_RSS 0x0040
#define ICE_AQC_CAPS_RXQS 0x0041
@@ -1075,6 +1077,19 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+/**
+ * Send to PF command (indirect 0x0801) id is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+ __le32 id;
+ u32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1332,6 +1347,7 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm;
+ struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1429,6 +1445,10 @@ enum ice_adminq_opc {
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ /* PF/VF mailbox commands */
+ ice_mbx_opc_send_msg_to_pf = 0x0801,
+ ice_mbx_opc_send_msg_to_vf = 0x0802,
+
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
ice_aqc_opc_set_rss_lut = 0x0B03,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index ef9229fa5510..c52f450f2c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -422,7 +422,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
-
+ ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
devm_kfree(ice_hw_to_dev(hw), sw);
}
@@ -598,6 +598,39 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
}
/**
+ * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * @hw: pointer to the hw struct
+ *
+ * Determines the itr/intrl granularities based on the maximum aggregate
+ * bandwidth according to the device's configuration during power-on.
+ */
+static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+{
+ u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
+ GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
+ GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+
+ switch (max_agg_bw) {
+ case ICE_MAX_AGG_BW_200G:
+ case ICE_MAX_AGG_BW_100G:
+ case ICE_MAX_AGG_BW_50G:
+ hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
+ break;
+ case ICE_MAX_AGG_BW_25G:
+ hw->itr_gran = ICE_ITR_GRAN_MAX_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to determine itr/intrl granularity\n");
+ return ICE_ERR_CFG;
+ }
+
+ return 0;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -621,11 +654,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- /* set these values to minimum allowed */
- hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
- hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
- hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
- hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
+ status = ice_get_itr_intrl_gran(hw);
+ if (status)
+ return status;
status = ice_init_all_ctrlq(hw);
if (status)
@@ -1375,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_VF:
+ if (dev_p) {
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs exposed = %d\n",
+ dev_p->num_vfs_exposed);
+ } else if (func_p) {
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs allocated = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VF base_id = %d\n",
+ func_p->vf_base_id);
+ }
+ break;
case ICE_AQC_CAPS_VSI:
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
@@ -1740,8 +1793,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-static enum ice_status
-ice_update_link_info(struct ice_port_info *pi)
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy_info;
@@ -2055,7 +2107,7 @@ ice_aq_get_set_rss_lut_exit:
/**
* ice_aq_get_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
@@ -2063,17 +2115,20 @@ ice_aq_get_set_rss_lut_exit:
* get the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size)
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
{
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
- false);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, false);
}
/**
* ice_aq_set_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
@@ -2081,11 +2136,14 @@ ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
* set the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size)
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
{
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
- true);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, true);
}
/**
@@ -2126,31 +2184,39 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
* @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
* get the RSS key per VSI
*/
enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
- return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ key, false);
}
/**
* ice_aq_set_rss_key
* @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
* set the RSS key per VSI
*/
enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
- return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ keys, true);
}
/**
@@ -2221,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* Disable LAN Tx queue (0x0C31)
@@ -2228,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
struct ice_aqc_dis_txqs *cmd;
@@ -2237,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
- if (!qg_list)
+ /* qg_list can be NULL only in VM/VF reset flow */
+ if (!qg_list && !rst_src)
return ICE_ERR_PARAM;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
cmd->num_entries = num_qgrps;
+ cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+ ICE_AQC_Q_DIS_TIMEOUT_M);
+
+ switch (rst_src) {
+ case ICE_VM_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_VF_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+ /* In this case, FW expects vmvf_num to be absolute VF id */
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_NO_RESET:
+ default:
+ break;
+ }
+
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
+ if (!qg_list)
+ goto do_aq;
+
+ /* set RD bit to indicate that command buffer is provided by the driver
+ * and it needs to be read by the firmware
+ */
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
for (i = 0; i < num_qgrps; ++i) {
/* Calculate the size taken up by the queue IDs in this group */
sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -2260,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sz)
return ICE_ERR_PARAM;
+do_aq:
return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
}
@@ -2489,7 +2590,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/**
* ice_ena_vsi_txq
* @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
* @tc: tc number
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
@@ -2499,7 +2600,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* This function adds one lan q
*/
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -2516,15 +2617,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
hw = pi->hw;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
mutex_lock(&pi->sched_lock);
/* find a parent node */
- parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
status = ICE_ERR_PARAM;
goto ena_txq_exit;
}
+
buf->parent_teid = parent->info.node_teid;
node.parent_teid = parent->info.node_teid;
/* Mark that the values in the "generic" section as valid. The default
@@ -2562,13 +2667,16 @@ ena_txq_exit:
* @num_queues: number of queues
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cd)
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
@@ -2577,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ /* if queue is disabled already yet the disable queue command has to be
+ * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
+ * any queue information
+ */
+
+ if (!num_queues && rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
+ NULL);
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -2589,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), cd);
+ sizeof(qg_list), rst_src, vmvf_num,
+ cd);
if (status)
break;
@@ -2602,7 +2720,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
/**
* ice_cfg_vsi_qs - configure the new/exisiting VSI queues
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
* @owner: lan or rdma
@@ -2610,7 +2728,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
* This function adds/updates the VSI queues per TC.
*/
static enum ice_status
-ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
enum ice_status status = 0;
@@ -2619,6 +2737,9 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
@@ -2626,7 +2747,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
if (!ice_sched_get_tc_node(pi, i))
continue;
- status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
+ status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
ice_is_tc_ena(tc_bitmap, i));
if (status)
break;
@@ -2639,21 +2760,84 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
/**
* ice_cfg_vsi_lan - configure VSI lan queues
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @max_lanqs: max lan queues array per TC
*
* This function adds/updates the VSI lan queues per TC.
*/
enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
- return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
ICE_SCHED_NODE_OWNER_LAN);
}
/**
+ * ice_replay_pre_init - replay pre initialization
+ * @hw: pointer to the hw struct
+ *
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
+ */
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ /* Delete old entries from replay filter list head if there is any */
+ ice_rm_all_sw_replay_rule_info(hw);
+ /* In start of replay, move entries into replay_rules list, it
+ * will allow adding rules entries back to filt_rules list,
+ * which is operational list.
+ */
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+ list_replace_init(&sw->recp_list[i].filt_rules,
+ &sw->recp_list[i].filt_replay_rules);
+
+ return 0;
+}
+
+/**
+ * ice_replay_vsi - replay VSI configuration
+ * @hw: pointer to the hw struct
+ * @vsi_handle: driver VSI handle
+ *
+ * Restore all VSI configuration after reset. It is required to call this
+ * function with main VSI first.
+ */
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Replay pre-initialization if there is any */
+ if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
+ status = ice_replay_pre_init(hw);
+ if (status)
+ return status;
+ }
+
+ /* Replay per VSI all filters */
+ status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ return status;
+}
+
+/**
+ * ice_replay_post - post replay configuration cleanup
+ * @hw: pointer to the hw struct
+ *
+ * Post replay cleanup.
+ */
+void ice_replay_post(struct ice_hw *hw)
+{
+ /* Delete old entries from replay filter list head */
+ ice_rm_all_sw_replay_rule_info(hw);
+}
+
+/**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
* @hireg: high 32 bit HW register to read from
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 80d288a07731..1900681289a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -7,6 +7,7 @@
#include "ice.h"
#include "ice_type.h"
#include "ice_switch.h"
+#include <linux/avf/virtchnl.h>
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len);
@@ -21,6 +22,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
enum ice_status
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status ice_update_link_info(struct ice_port_info *pi);
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
@@ -37,17 +39,18 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
+
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
@@ -87,14 +90,17 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cmd_details);
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cmd_details);
enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index b25ce4f587f5..84c967294eaf 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
}
/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+ /* set head and tail registers in our local struct */
+ cq->sq.head = PF_MBX_ATQH;
+ cq->sq.tail = PF_MBX_ATQT;
+ cq->sq.len = PF_MBX_ATQLEN;
+ cq->sq.bah = PF_MBX_ATQBAH;
+ cq->sq.bal = PF_MBX_ATQBAL;
+ cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
+ cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
+ cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
+
+ cq->rq.head = PF_MBX_ARQH;
+ cq->rq.tail = PF_MBX_ARQT;
+ cq->rq.len = PF_MBX_ARQLEN;
+ cq->rq.bah = PF_MBX_ARQBAH;
+ cq->rq.bal = PF_MBX_ARQBAL;
+ cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
+ cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
+ cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the hw struct
* @cq: pointer to the specific Control queue
@@ -639,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_MAILBOX:
+ ice_mailbox_init_regs(hw);
+ cq = &hw->mailboxq;
+ break;
default:
return ICE_ERR_PARAM;
}
@@ -696,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (ret_code)
return ret_code;
- return ice_init_check_adminq(hw);
+ ret_code = ice_init_check_adminq(hw);
+ if (ret_code)
+ return ret_code;
+
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
@@ -714,6 +753,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
default:
return;
}
@@ -736,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ea02b89243e2..437f832fd7c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -8,6 +8,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -28,6 +29,7 @@
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
+ ICE_CTL_Q_MAILBOX,
};
/* Control Queue default settings */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 88f11498804b..a6679a9bfd3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -29,6 +29,22 @@
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
+#define PF_MBX_ARQBAH 0x0022E400
+#define PF_MBX_ARQBAL 0x0022E380
+#define PF_MBX_ARQH 0x0022E500
+#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN 0x0022E480
+#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_MBX_ARQT 0x0022E580
+#define PF_MBX_ATQBAH 0x0022E180
+#define PF_MBX_ATQBAL 0x0022E100
+#define PF_MBX_ATQH 0x0022E280
+#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN 0x0022E200
+#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_MBX_ATQT 0x0022E300
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@@ -74,10 +90,16 @@
#define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C
+#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0)
#define PFGEN_STATE 0x00088000
#define PRTGEN_STATUS 0x000B8100
+#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
+#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
+#define VPGEN_VFRSTAT_VFRD_M BIT(0)
+#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
+#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
@@ -88,11 +110,25 @@
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
+#define GLINT_RATE_INTRL_ENA_M BIT(6)
+#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
+#define GLINT_VECT2FUNC_VF_NUM_S 0
+#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
+#define GLINT_VECT2FUNC_PF_NUM_S 12
+#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
+#define GLINT_VECT2FUNC_IS_PF_S 16
+#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
#define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_FW_CTL_ITR_INDX_S 11
#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_MBX_CTL 0x0016B280
+#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_MBX_CTL_ITR_INDX_S 11
+#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR 0x0016CA00
#define PFINT_OICR_ECC_ERR_M BIT(16)
#define PFINT_OICR_MAL_DETECT_M BIT(19)
@@ -100,6 +136,7 @@
#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
#define PFINT_OICR_HMC_ERR_M BIT(26)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
+#define PFINT_OICR_VFLR_M BIT(29)
#define PFINT_OICR_CTL 0x0016CA80
#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_OICR_CTL_ITR_INDX_S 11
@@ -114,6 +151,12 @@
#define QINT_TQCTL_MSIX_INDX_S 0
#define QINT_TQCTL_ITR_INDX_S 11
#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
+#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
+#define VPINT_ALLOC_FIRST_S 0
+#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_LAST_S 12
+#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_VALID_M BIT(31)
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
#define QRX_CTRL_MAX_INDEX 2047
@@ -126,6 +169,20 @@
#define QRX_TAIL_MAX_INDEX 2047
#define QRX_TAIL_TAIL_S 0
#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
+#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
+#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
+#define VPLAN_RX_QBASE_VFNUMQ_S 16
+#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
+#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
+#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
+#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
+#define VPLAN_TX_QBASE_VFNUMQ_S 16
+#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
+#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
#define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -162,6 +219,14 @@
#define PF_MDET_TX_PQM_VALID_M BIT(0)
#define PF_MDET_TX_TCLAN 0x000FC000
#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
+#define VP_MDET_RX_VALID_M BIT(0)
+#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4))
+#define VP_MDET_TX_PQM_VALID_M BIT(0)
+#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4))
+#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
+#define VP_MDET_TX_TDPU_VALID_M BIT(0)
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_M BIT(6)
#define GLNVM_GENS 0x000B6100
@@ -173,6 +238,12 @@
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
+#define PF_PCI_CIAA 0x0009E580
+#define PF_PCI_CIAA_VF_NUM_S 12
+#define PF_PCI_CIAD 0x0009E500
+#define GL_PWR_MODE_CTL 0x000B820C
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
@@ -250,5 +321,8 @@
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define VSIQF_HKEY_MAX_INDEX 12
+#define VSIQF_HLUT_MAX_INDEX 15
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
+#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 94504023d86e..7d2a66739e3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -418,6 +418,7 @@ struct ice_tlan_ctx {
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
@@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
{
return ice_ptype_lkup[ptype];
}
+
+#define ICE_LINK_SPEED_UNKNOWN 0
+#define ICE_LINK_SPEED_10MBPS 10
+#define ICE_LINK_SPEED_100MBPS 100
+#define ICE_LINK_SPEED_1000MBPS 1000
+#define ICE_LINK_SPEED_2500MBPS 2500
+#define ICE_LINK_SPEED_5000MBPS 5000
+#define ICE_LINK_SPEED_10000MBPS 10000
+#define ICE_LINK_SPEED_20000MBPS 20000
+#define ICE_LINK_SPEED_25000MBPS 25000
+#define ICE_LINK_SPEED_40000MBPS 40000
+
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 95588fe0e22f..49f1940772ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -68,18 +68,20 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile id;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile id;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -90,6 +92,9 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
return -EIO;
}
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -132,12 +137,17 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF id */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
default:
return;
}
/* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = vsi->vsi_num;
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
@@ -285,6 +295,16 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
+ case ICE_VSI_VF:
+ vsi->alloc_txq = pf->num_vf_qps;
+ vsi->alloc_rxq = pf->num_vf_qps;
+ /* pf->num_vf_msix includes (VF miscellaneous vector +
+ * data queue interrupts). Since vsi->num_q_vectors is number
+ * of queues vectors, subtract 1 from the original vector
+ * count
+ */
+ vsi->num_q_vectors = pf->num_vf_msix - 1;
+ break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -331,6 +351,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
struct ice_vsi_ctx ctxt;
enum ice_status status;
+ if (vsi->type == ICE_VSI_VF)
+ ctxt.vf_num = vsi->vf_id;
ctxt.vsi_num = vsi->vsi_num;
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
@@ -466,6 +488,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
+ case ICE_VSI_VF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
@@ -685,6 +711,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table
+ * For VSI_LUT, LUT size should be set to 64 bytes
+ */
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -773,17 +808,17 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
+ qcount = numq_tc;
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
- if (vsi->type == ICE_VSI_PF)
- max_rss = ICE_MAX_LG_RSS_QS;
- else
- max_rss = ICE_MAX_SMALL_RSS_QS;
-
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
- } else {
- qcount = numq_tc;
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_PF)
+ max_rss = ICE_MAX_LG_RSS_QS;
+ else
+ max_rss = ICE_MAX_SMALL_RSS_QS;
+ qcount = min_t(int, numq_tc, max_rss);
+ qcount = min_t(int, qcount, vsi->rss_size);
+ }
}
/* find the (rounded up) power-of-2 of qcount */
@@ -813,6 +848,14 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+ dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ /* since there is a chance that num_rxq could have been changed
+ * in the above for loop, make num_txq equal to num_rxq.
+ */
+ vsi->num_txq = vsi->num_rxq;
+ }
+
/* Rx queue mapping */
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
/* q_mapping buffer holds the info for the first queue allocated for
@@ -838,6 +881,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table which is a VSI LUT type */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -868,6 +916,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
case ICE_VSI_PF:
ctxt.flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_VF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_VF;
+ /* VF number here is the absolute VF number (0-255) */
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ break;
default:
return -ENODEV;
}
@@ -961,6 +1014,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
@@ -973,6 +1028,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
NAPI_POLL_WEIGHT);
+out:
/* tie q_vector and VSI together */
vsi->q_vectors[v_idx] = q_vector;
@@ -1039,9 +1095,9 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int num_q_vectors = 0;
- if (vsi->base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
- vsi->vsi_num, vsi->base_vector);
+ if (vsi->sw_base_vector || vsi->hw_base_vector) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n",
+ vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector);
return -EEXIST;
}
@@ -1051,6 +1107,28 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
num_q_vectors = vsi->num_q_vectors;
+ /* reserve slots from OS requested IRQs */
+ vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ if (vsi->sw_base_vector < 0) {
+ dev_err(&pf->pdev->dev,
+ "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num,
+ vsi->sw_base_vector);
+ return -ENOENT;
+ }
+ pf->num_avail_sw_msix -= num_q_vectors;
+
+ /* reserve slots from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ break;
+ case ICE_VSI_VF:
+ /* take VF misc vector and data vectors into account */
+ num_q_vectors = pf->num_vf_msix;
+ /* For VF VSI, reserve slots only from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
break;
default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
@@ -1058,17 +1136,20 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
break;
}
- if (num_q_vectors)
- vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
- num_q_vectors, vsi->idx);
-
- if (vsi->base_vector < 0) {
+ if (vsi->hw_base_vector < 0) {
dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->base_vector);
+ "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
+ if (vsi->type != ICE_VSI_VF) {
+ ice_free_res(vsi->back->sw_irq_tracker,
+ vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += num_q_vectors;
+ }
return -ENOENT;
}
+ pf->num_avail_hw_msix -= num_q_vectors;
+
return 0;
}
@@ -1178,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
@@ -1193,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
@@ -1207,6 +1290,38 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_manage_rss_lut - disable/enable RSS
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ *
+ * In the event of disable request for RSS, this function will zero out RSS
+ * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+ * LUT.
+ */
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
+{
+ int err = 0;
+ u8 *lut;
+
+ lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
+ GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (ena) {
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ ice_fill_rss_lut(lut, vsi->rss_table_size,
+ vsi->rss_size);
+ }
+
+ err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
+ devm_kfree(&vsi->back->pdev->dev, lut);
+ return err;
+}
+
+/**
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
@@ -1230,8 +1345,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
else
ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
- status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
- lut, vsi->rss_table_size);
+ status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
+ vsi->rss_table_size);
if (status) {
dev_err(&vsi->back->pdev->dev,
@@ -1255,7 +1370,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
memcpy(&key->standard_rss_key, seed,
ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
+ status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
@@ -1290,10 +1405,10 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
return -ENOMEM;
tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src = vsi->vsi_num;
+ tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
+ tmp->fltr_info.vsi_handle = vsi->idx;
ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
INIT_LIST_HEAD(&tmp->list_entry);
@@ -1394,8 +1509,8 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src = vsi->vsi_num;
- tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
+ tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+ tmp->fltr_info.vsi_handle = vsi->idx;
tmp->fltr_info.l_data.vlan.vlan_id = vid;
INIT_LIST_HEAD(&tmp->list_entry);
@@ -1431,11 +1546,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
return -ENOMEM;
list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
+ list->fltr_info.vsi_handle = vsi->idx;
list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
list->fltr_info.l_data.vlan.vlan_id = vid;
list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src = vsi->vsi_num;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
@@ -1462,6 +1577,9 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
int err = 0;
u16 i;
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
vsi->max_frame = vsi->netdev->mtu +
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
@@ -1469,6 +1587,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
vsi->max_frame = ICE_RXBUF_2048;
vsi->rx_buf_len = ICE_RXBUF_2048;
+setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq && !err; i++)
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
@@ -1524,7 +1643,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
* comm scheduler queue doorbell.
*/
vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len, NULL);
if (status) {
dev_err(&vsi->back->pdev->dev,
@@ -1548,38 +1667,72 @@ err_cfg_txqs:
}
/**
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
+ *
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
+ */
+static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+{
+ u32 val = intrl / gran;
+
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ * @vector: HW vector index to apply the interrupt throttling to
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+static void
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+{
+ u8 itr_gran = hw->itr_gran;
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*/
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->base_vector;
+ u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
- int i, q, itr;
- u8 itr_gran;
+ int i, q;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- itr_gran = hw->itr_gran_200;
+ ice_cfg_itr(hw, q_vector, vector);
- if (q_vector->num_ring_rx) {
- q_vector->rx.itr =
- ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
- itr_gran);
- q_vector->rx.latency_range = ICE_LOW_LATENCY;
- }
-
- if (q_vector->num_ring_tx) {
- q_vector->tx.itr =
- ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
- itr_gran);
- q_vector->tx.latency_range = ICE_LOW_LATENCY;
- }
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
+ wr32(hw, GLINT_RATE(vector),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
/* Both Transmit Queue Interrupt Cause Control register
* and Receive Queue Interrupt Cause control register
@@ -1593,23 +1746,33 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
+ int itr_idx = q_vector->tx.itr_idx;
u32 val;
- itr = ICE_ITR_NONE;
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ else
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ (vector << QINT_TQCTL_MSIX_INDX_S);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
+ int itr_idx = q_vector->rx.itr_idx;
u32 val;
- itr = ICE_ITR_NONE;
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ else
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ (vector << QINT_RQCTL_MSIX_INDX_S);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++;
}
@@ -1636,9 +1799,8 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- ctxt.vsi_num = vsi->vsi_num;
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
@@ -1677,9 +1839,8 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- ctxt.vsi_num = vsi->vsi_num;
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
@@ -1715,8 +1876,11 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
*/
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1764,11 +1928,11 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
* the queue to schedule NAPI handler
*/
v_idx = vsi->tx_rings[i]->q_vector->v_idx;
- wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
+ wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- NULL);
+ rst_src, rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an active reset
* flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
* the reset operation disables queues at the hardware level anyway.
@@ -1829,11 +1993,11 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
- ctxt->vsi_num = vsi->vsi_num;
- status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL);
+
+ status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n",
- ena ? "Ena" : "Dis", vsi->vsi_num, status,
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
+ ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
vsi->back->hw.adminq.sq_last_status);
goto err_out;
}
@@ -1865,7 +2029,7 @@ err_out:
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 __always_unused vf_id)
+ enum ice_vsi_type type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev;
@@ -1880,6 +2044,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
vsi->port_info = pi;
vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
@@ -1918,6 +2084,34 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
break;
+ case ICE_VSI_VF:
+ /* VF driver will take care of creating netdev for this type and
+ * map queues to vectors through Virtchnl, PF driver only
+ * creates a VSI and corresponding structures for bookkeeping
+ * purpose
+ */
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_alloc_q_vector;
+
+ /* Setup Vector base only during VF init phase or when VF asks
+ * for more vectors than assigned number. In all other cases,
+ * assign hw_base_vector to the value given earlier.
+ */
+ if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ } else {
+ vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
+ }
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
+ break;
default:
/* if VSI type is not recognized, clean up the resources and
* exit
@@ -1931,8 +2125,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
if (ret) {
dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
goto unroll_vector_base;
@@ -1941,7 +2135,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
return vsi;
unroll_vector_base:
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupt back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
@@ -1962,7 +2161,7 @@ unroll_get_qs:
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->base_vector;
+ u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0;
u32 rxq = 0;
@@ -1971,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
txq++;
@@ -1994,7 +2193,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
+ int base = vsi->sw_base_vector;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
@@ -2002,6 +2201,10 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
if (!vsi->q_vectors || !vsi->irqs_ready)
return;
+ ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
@@ -2024,7 +2227,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
devm_free_irq(&pf->pdev->dev, irq_num,
vsi->q_vectors[i]);
}
- ice_vsi_release_msix(vsi);
}
}
@@ -2112,6 +2314,9 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
int start = res->search_hint;
int end = start;
+ if ((start + needed) > res->num_entries)
+ return -ENOMEM;
+
id |= ICE_RES_VALID_BIT;
do {
@@ -2185,9 +2390,9 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
*/
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
+ int base = vsi->sw_base_vector;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int base = vsi->base_vector;
u32 val;
int i;
@@ -2220,8 +2425,8 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
/* disable each interrupt */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- for (i = vsi->base_vector;
- i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ for (i = vsi->hw_base_vector;
+ i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
wr32(hw, GLINT_DYN_CTL(i), 0);
ice_flush(hw);
@@ -2239,10 +2444,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi)
{
struct ice_pf *pf;
+ struct ice_vf *vf;
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
+ vf = &pf->vf[vsi->vf_id];
/* do not unregister and free netdevs while driver is in the reset
* recovery pending state. Since reset/rebuild happens through PF
* service task workqueue, its not a good idea to unregister netdev
@@ -2264,10 +2471,25 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_close(vsi);
/* reclaim interrupt vectors back to PF */
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_msix += vsi->num_q_vectors;
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
+ vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
+ /* Reclaim VF resources back only while freeing all VFs or
+ * vector reassignment is requested
+ */
+ ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
- ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
@@ -2301,8 +2523,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
return -EINVAL;
ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
- vsi->base_vector = 0;
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ vsi->sw_base_vector = 0;
+ vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
ice_vsi_set_num_qs(vsi);
@@ -2332,6 +2556,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_map_rings_to_vectors(vsi);
break;
+ case ICE_VSI_VF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ vsi->back->q_left_tx -= vsi->alloc_txq;
+ vsi->back->q_left_rx -= vsi->alloc_rxq;
+ break;
default:
break;
}
@@ -2342,8 +2582,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Failed VSI lan queue config\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 4265464ee3d3..677db40338f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_tx_rings(struct ice_vsi *vsi);
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
@@ -70,5 +71,7 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+
irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index a3513acd272b..8f61b375e768 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,7 +8,7 @@
#include "ice.h"
#include "ice_lib.h"
-#define DRV_VERSION "0.7.1-k"
+#define DRV_VERSION "0.7.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -95,7 +95,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
/* Trigger sw interrupt to revive the queue */
v_idx = tx_ring->q_vector->v_idx;
wr32(&vsi->back->hw,
- GLINT_DYN_CTL(vsi->base_vector + v_idx),
+ GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
(itr << GLINT_DYN_CTL_ITR_INDX_S) |
GLINT_DYN_CTL_SWINT_TRIG_M |
GLINT_DYN_CTL_INTENA_MSK_M);
@@ -253,7 +253,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply TX filter rule to get traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i tx rule\n",
@@ -263,7 +263,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
/* Apply RX filter rule to get traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i rx rule\n",
@@ -274,7 +274,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* Clear TX filter rule to stop traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
@@ -283,8 +283,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
err = -EIO;
goto out_promisc;
}
- /* Clear filter RX to remove traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ /* Clear RX filter to remove traffic from wire */
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
+
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
@@ -661,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
}
+ ice_vc_notify_link_state(pf);
+
return 0;
}
@@ -711,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ qtype = "Mailbox";
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type);
@@ -792,6 +802,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_err(&pf->pdev->dev,
"Could not handle link event\n");
break;
+ case ice_mbx_opc_send_msg_to_pf:
+ ice_vc_process_vf_msg(pf, &event);
+ break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
break;
@@ -851,6 +864,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
}
/**
+ * ice_clean_mailboxq_subtask - clean the MailboxQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
+ return;
+
+ clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+
+ if (ice_ctrlq_pending(hw, &hw->mailboxq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
+
+ ice_flush(hw);
+}
+
+/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
@@ -916,6 +951,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
bool mdd_detected = false;
u32 reg;
+ int i;
if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return;
@@ -1005,6 +1041,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
}
+ /* see if one of the VFs needs to be reset */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ reg = rd32(hw, VP_MDET_TX_PQM(i));
+ if (reg & VP_MDET_TX_PQM_VALID_M) {
+ wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ if (reg & VP_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ if (reg & VP_MDET_TX_TDPU_VALID_M) {
+ wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_RX(i));
+ if (reg & VP_MDET_RX_VALID_M) {
+ wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
+ }
+
+ if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ }
+ }
+
/* re-enable MDD interrupt cause */
clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, PFINT_OICR_ENA);
@@ -1038,8 +1119,10 @@ static void ice_service_task(struct work_struct *work)
ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf);
+ ice_process_vflr_event(pf);
ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
+ ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1050,6 +1133,8 @@ static void ice_service_task(struct work_struct *work)
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -1064,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
/**
@@ -1122,7 +1211,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
+ int base = vsi->sw_base_vector;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
@@ -1197,13 +1286,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1220,6 +1310,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+ set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
@@ -1228,6 +1319,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
+ if (oicr & PFINT_OICR_VFLR_M) {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
@@ -1241,8 +1336,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
pf->corer_count++;
else if (reset == ICE_RESET_GLOBR)
pf->globr_count++;
- else
+ else if (reset == ICE_RESET_EMPR)
pf->empr_count++;
+ else
+ dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
+ reset);
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
@@ -1321,12 +1419,15 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
ice_flush(&pf->hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+ synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector, pf);
+ pf->msix_entries[pf->sw_oicr_idx].vector, pf);
}
- ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_hw_msix += 1;
+ ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
@@ -1356,39 +1457,58 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
if (ice_is_reset_in_progress(pf->state))
goto skip_req_irq;
- /* reserve one vector in irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ /* reserve one vector in sw_irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
- pf->oicr_idx = oicr_idx;
+ pf->num_avail_sw_msix -= 1;
+ pf->sw_oicr_idx = oicr_idx;
+
+ /* reserve one vector in hw_irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ if (oicr_idx < 0) {
+ ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ return oicr_idx;
+ }
+ pf->num_avail_hw_msix -= 1;
+ pf->hw_oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector,
+ pf->msix_entries[pf->sw_oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
- ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_hw_msix += 1;
return err;
}
skip_req_irq:
ice_ena_misc_vector(pf);
- val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
- val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
- itr_gran = hw->itr_gran_200;
+ /* This enables Mailbox queue Interrupt causes */
+ val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_MBX_CTL, val);
- wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+ itr_gran = hw->itr_gran;
+
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
ITR_TO_REG(ICE_ITR_8K, itr_gran));
ice_flush(hw);
@@ -1755,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
+ struct ice_hw *hw = &pf->hw;
+
+ set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+ pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
+ ICE_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
@@ -1797,6 +1926,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* reserve vectors for LAN traffic */
pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
v_budget += pf->num_lan_msix;
+ v_left -= pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
@@ -1824,10 +1954,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
"not enough vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
if (v_actual >= (pf->num_lan_msix + 1)) {
- pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
+ pf->num_avail_sw_msix = v_actual -
+ (pf->num_lan_msix + 1);
} else if (v_actual >= 2) {
pf->num_lan_msix = 1;
- pf->num_avail_msix = v_actual - 2;
+ pf->num_avail_sw_msix = v_actual - 2;
} else {
pci_disable_msix(pf->pdev);
err = -ERANGE;
@@ -1860,12 +1991,32 @@ static void ice_dis_msix(struct ice_pf *pf)
}
/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+static void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_dis_msix(pf);
+
+ if (pf->sw_irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
+ pf->sw_irq_tracker = NULL;
+ }
+
+ if (pf->hw_irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
+ pf->hw_irq_tracker = NULL;
+ }
+}
+
+/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
- int vectors = 0;
+ int vectors = 0, hw_vectors = 0;
ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
@@ -1879,30 +2030,31 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
- pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
- if (!pf->irq_tracker) {
+ pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->sw_irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
- pf->irq_tracker->num_entries = vectors;
+ /* populate SW interrupts pool with number of OS granted IRQs. */
+ pf->num_avail_sw_msix = vectors;
+ pf->sw_irq_tracker->num_entries = vectors;
- return 0;
-}
+ /* set up HW vector assignment tracking */
+ hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors);
-/**
- * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
- * @pf: board private structure
- */
-static void ice_clear_interrupt_scheme(struct ice_pf *pf)
-{
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_dis_msix(pf);
-
- if (pf->irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
- pf->irq_tracker = NULL;
+ pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->hw_irq_tracker) {
+ ice_clear_interrupt_scheme(pf);
+ return -ENOMEM;
}
+
+ /* populate HW interrupts pool with number of HW supported irqs. */
+ pf->num_avail_hw_msix = hw_vectors;
+ pf->hw_irq_tracker->num_entries = hw_vectors;
+
+ return 0;
}
/**
@@ -2087,6 +2239,7 @@ err_exit_unroll:
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
+ int i;
if (!pf)
return;
@@ -2094,8 +2247,15 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ ice_free_vfs(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
+ ice_for_each_vsi(pf, i) {
+ if (!pf->vsi[i])
+ continue;
+ ice_vsi_free_q_vectors(pf->vsi[i]);
+ }
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
@@ -2124,6 +2284,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+ .sriov_configure = ice_sriov_configure,
};
/**
@@ -2372,6 +2533,12 @@ static int ice_set_features(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ ret = ice_vsi_manage_rss_lut(vsi, true);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ ret = ice_vsi_manage_rss_lut(vsi, false);
+
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, true);
@@ -2853,7 +3020,7 @@ int ice_down(struct ice_vsi *vsi)
}
ice_vsi_dis_irq(vsi);
- tx_err = ice_vsi_stop_tx_rings(vsi);
+ tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
@@ -3047,13 +3214,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
set_bit(__ICE_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && netif_running(vsi->netdev) &&
- vsi->type == ICE_VSI_PF) {
- rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- rtnl_unlock();
- } else {
- ice_vsi_close(vsi);
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
}
}
@@ -3065,12 +3233,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
{
int err = 0;
- if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
- if (vsi->netdev && netif_running(vsi->netdev)) {
+ if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
+ vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
rtnl_lock();
err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
rtnl_unlock();
+ } else {
+ err = ice_vsi_open(vsi);
}
+ }
return err;
}
@@ -3119,6 +3291,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
if (!pf->vsi[i])
continue;
+ /* VF VSI rebuild isn't supported yet */
+ if (pf->vsi[i]->type == ICE_VSI_VF)
+ continue;
+
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
@@ -3136,6 +3312,44 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
}
/**
+ * ice_vsi_replay_all - replay all VSIs configuration in the PF
+ * @pf: the PF
+ */
+static int ice_vsi_replay_all(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status ret;
+ int i;
+
+ /* loop through pf->vsi array and replay the VSI if found */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
+
+ ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VSI at index %d replay failed %d\n",
+ pf->vsi[i]->idx, ret);
+ return -EIO;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
+
+ dev_info(&pf->pdev->dev,
+ "VSI at index %d filter replayed successfully - vsi_num %i\n",
+ pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ }
+
+ /* Clean up replay filter after successful re-configuration */
+ ice_replay_post(hw);
+ return 0;
+}
+
+/**
* ice_rebuild - rebuild after reset
* @pf: pf to rebuild
*/
@@ -3175,16 +3389,26 @@ static void ice_rebuild(struct ice_pf *pf)
if (err)
goto err_sched_init_port;
+ /* reset search_hint of irq_trackers to 0 since interrupts are
+ * reclaimed and could be allocated from beginning during VSI rebuild
+ */
+ pf->sw_irq_tracker->search_hint = 0;
+ pf->hw_irq_tracker->search_hint = 0;
+
err = ice_vsi_rebuild_all(pf);
if (err) {
dev_err(dev, "ice_vsi_rebuild_all failed\n");
goto err_vsi_rebuild;
}
- ret = ice_replay_all_fltr(&pf->hw);
- if (ret) {
+ err = ice_update_link_info(hw->port_info);
+ if (err)
+ dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
+
+ /* Replay all VSIs Configuration, including filters after reset */
+ if (ice_vsi_replay_all(pf)) {
dev_err(&pf->pdev->dev,
- "error replaying switch filter rules\n");
+ "error replaying VSI configurations with switch filter rules\n");
goto err_vsi_rebuild;
}
@@ -3207,6 +3431,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
+ ice_reset_all_vfs(pf, true);
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;
@@ -3310,7 +3535,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
- status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
+ status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(&pf->pdev->dev,
@@ -3321,8 +3546,8 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
if (lut) {
- status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
- vsi->rss_lut_type, lut, lut_size);
+ status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot set RSS lut, err %d aq_err %d\n",
@@ -3353,7 +3578,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
- status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
+ status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS key, err %d aq_err %d\n",
@@ -3363,8 +3588,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
if (lut) {
- status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
- vsi->rss_lut_type, lut, lut_size);
+ status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS lut, err %d aq_err %d\n",
@@ -3426,9 +3651,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
else
/* change from VEB to VEPA mode */
ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
- ctxt.vsi_num = vsi->vsi_num;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
if (status) {
dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
@@ -3568,7 +3793,7 @@ static void ice_tx_timeout(struct net_device *netdev)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(&pf->hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->base_vector - 1));
+ tx_ring->vsi->hw_base_vector));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
@@ -3715,6 +3940,12 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
+ .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
+ .ndo_set_vf_mac = ice_set_vf_mac,
+ .ndo_get_vf_config = ice_get_vf_cfg,
+ .ndo_set_vf_trust = ice_set_vf_trust,
+ .ndo_set_vf_vlan = ice_set_vf_port_vlan,
+ .ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 9c4f408f222d..7cc8aa18a22b 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -599,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
struct ice_sched_agg_info *agg_info;
- struct ice_sched_vsi_info *vsi_elem;
struct ice_sched_agg_info *atmp;
- struct ice_sched_vsi_info *tmp;
struct ice_hw *hw;
if (!pi)
@@ -620,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
}
}
- /* remove the vsi list */
- list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
- list_entry) {
- list_del(&vsi_elem->list_entry);
- devm_kfree(ice_hw_to_dev(hw), vsi_elem);
- }
-
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -677,31 +668,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
- * ice_sched_create_vsi_info_entry - create an empty new VSI entry
- * @pi: port information structure
- * @vsi_id: VSI Id
- *
- * This function creates a new VSI entry and adds it to list
- */
-static struct ice_sched_vsi_info *
-ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
- struct ice_sched_vsi_info *vsi_elem;
-
- if (!pi)
- return NULL;
-
- vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
- GFP_KERNEL);
- if (!vsi_elem)
- return NULL;
-
- list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
- vsi_elem->vsi_id = vsi_id;
- return vsi_elem;
-}
-
-/**
* ice_sched_add_elems - add nodes to hw and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
@@ -1072,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
INIT_LIST_HEAD(&pi->agg_list);
- INIT_LIST_HEAD(&pi->vsi_info_list);
err_init_port:
if (status && pi->root) {
@@ -1142,27 +1107,6 @@ sched_query_out:
}
/**
- * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
- * @pi: port information structure
- * @vsi_id: vsi id
- *
- * This function retrieves the vsi list for the given vsi id
- */
-static struct ice_sched_vsi_info *
-ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
- struct ice_sched_vsi_info *list_elem;
-
- if (!pi)
- return NULL;
-
- list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
- if (list_elem->vsi_id == vsi_id)
- return list_elem;
- return NULL;
-}
-
-/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
* @hw: pointer to the hw struct
* @base: pointer to the base node
@@ -1198,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
/**
* ice_sched_get_free_qparent - Get a free lan or rdma q group node
* @pi: port information structure
- * @vsi_id: vsi id
+ * @vsi_handle: software VSI handle
* @tc: branch number
* @owner: lan or rdma
*
* This function retrieves a free lan or rdma q group node
*/
struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
struct ice_sched_node *vsi_node, *qgrp_node = NULL;
- struct ice_sched_vsi_info *list_elem;
+ struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
- list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!list_elem)
- goto lan_q_exit;
-
- vsi_node = list_elem->vsi_node[tc];
-
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return NULL;
+ vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI id */
if (!vsi_node)
goto lan_q_exit;
@@ -1245,14 +1187,14 @@ lan_q_exit:
* ice_sched_get_vsi_node - Get a VSI node based on VSI id
* @hw: pointer to the hw struct
* @tc_node: pointer to the TC node
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
*
* This function retrieves a VSI node for a given VSI id from a given
* TC branch
*/
static struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
- u16 vsi_id)
+ u16 vsi_handle)
{
struct ice_sched_node *node;
u8 vsi_layer;
@@ -1262,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/* Check whether it already exists */
while (node) {
- if (node->vsi_id == vsi_id)
+ if (node->vsi_handle == vsi_handle)
return node;
node = node->sibling;
}
@@ -1301,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
/**
* ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
* @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
* @owner: node owner (lan or rdma)
@@ -1310,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* lan and rdma separately.
*/
static enum ice_status
-ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
@@ -1323,7 +1265,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
return ICE_ERR_CFG;
@@ -1436,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/**
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1444,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
-ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
@@ -1478,7 +1420,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
return ICE_ERR_CFG;
if (i == vsil)
- parent->vsi_id = vsi_id;
+ parent->vsi_handle = vsi_handle;
}
return 0;
@@ -1487,13 +1429,13 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
/**
* ice_sched_add_vsi_to_topo - add a new VSI into tree
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
*
* This function adds a new VSI into scheduler tree
*/
static enum ice_status
-ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
+ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *tc_node;
@@ -1507,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
/* add vsi supported nodes to tc subtree */
- return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
+ return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ num_nodes);
}
/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
* @new_numqs: new number of max queues
* @owner: owner of this subtree
@@ -1521,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
* This function updates the VSI child nodes based on the number of queues
*/
static enum ice_status
-ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
- u16 new_numqs, u8 owner)
+ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ u8 tc, u16 new_numqs, u8 owner)
{
u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
- struct ice_sched_vsi_info *vsi;
+ struct ice_vsi_ctx *vsi_ctx;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
@@ -1538,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
if (!tc_node)
return ICE_ERR_CFG;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
- vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- return ICE_ERR_CFG;
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi->max_lanq[tc];
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
else
return ICE_ERR_PARAM;
@@ -1572,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
new_num_nodes[i] -= prev_num_nodes[i];
- status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
return status;
}
- vsi->max_lanq[tc] = new_numqs;
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
return status;
}
@@ -1586,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
/**
* ice_sched_cfg_vsi - configure the new/exisiting VSI
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
* @owner: lan or rdma
@@ -1597,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
* disabled then suspend the VSI if it is not already.
*/
enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
- struct ice_sched_vsi_info *vsi;
+ struct ice_vsi_ctx *vsi_ctx;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
-
- vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- return ICE_ERR_NO_MEMORY;
-
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
/* suspend the VSI if tc is not enabled */
if (!enable) {
@@ -1632,20 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
/* TC is enabled, if it is a new VSI then add it to the tree */
if (!vsi_node) {
- status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
+ status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
if (status)
return status;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
- vsi->vsi_node[tc] = vsi_node;
+ vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
+ /* invalidate the max queues whenever VSI gets added first time
+ * into the scheduler tree (boot or after reset). We need to
+ * recreate the child nodes all the time in these cases.
+ */
+ vsi_ctx->sched.max_lanq[tc] = 0;
}
/* update the VSI child nodes */
- status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
+ status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
+ owner);
if (status)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index badadcc120d3..5dc9cfa04c58 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,7 +12,6 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
- u16 vsi_id;
};
struct ice_sched_agg_info {
@@ -35,9 +34,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
new file mode 100644
index 000000000000..027eba4e13f8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+#include "ice_sriov.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ /* fall through */
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
new file mode 100644
index 000000000000..3d78a0795138
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SRIOV_H_
+#define _ICE_SRIOV_H_
+
+#include "ice_common.h"
+
+#ifdef CONFIG_PCI_IOV
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+#else /* CONFIG_PCI_IOV */
+static inline enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index d2dae913d81e..f49f299ddf2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -6,6 +6,9 @@
/* Error Codes */
enum ice_status {
+ ICE_SUCCESS = 0,
+
+ /* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 65b4e1cca6be..33403f39f1b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -106,6 +106,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
+ INIT_LIST_HEAD(&recps[i].filt_replay_rules);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -186,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
if (!vsi_ctx->alloc_from_pool)
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
ICE_AQ_VSI_IS_VALID);
+ cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
@@ -247,7 +249,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-enum ice_status
+static enum ice_status
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
@@ -277,72 +279,13 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
- * ice_update_fltr_vsi_map - update given filter VSI map
- * @list_head: list for which filters needs to be updated
- * @list_lock: filter lock which needs to be updated
- * @old_vsi_num: old VSI HW id
- * @new_vsi_num: new VSI HW id
- *
- * update the VSI map for a given filter list
- */
-static void
-ice_update_fltr_vsi_map(struct list_head *list_head,
- struct mutex *list_lock, u16 old_vsi_num,
- u16 new_vsi_num)
-{
- struct ice_fltr_mgmt_list_entry *itr;
-
- mutex_lock(list_lock);
- if (list_empty(list_head))
- goto exit_update_map;
-
- list_for_each_entry(itr, list_head, list_entry) {
- if (itr->vsi_list_info &&
- test_bit(old_vsi_num, itr->vsi_list_info->vsi_map)) {
- clear_bit(old_vsi_num, itr->vsi_list_info->vsi_map);
- set_bit(new_vsi_num, itr->vsi_list_info->vsi_map);
- } else if (itr->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- itr->fltr_info.fwd_id.vsi_id == old_vsi_num) {
- itr->fltr_info.fwd_id.vsi_id = new_vsi_num;
- itr->fltr_info.src = new_vsi_num;
- }
- }
-exit_update_map:
- mutex_unlock(list_lock);
-}
-
-/**
- * ice_update_all_fltr_vsi_map - update all filters VSI map
- * @hw: pointer to the hardware structure
- * @old_vsi_num: old VSI HW id
- * @new_vsi_num: new VSI HW id
- *
- * update all filters VSI map
- */
-static void
-ice_update_all_fltr_vsi_map(struct ice_hw *hw, u16 old_vsi_num, u16 new_vsi_num)
-{
- struct ice_switch_info *sw = hw->switch_info;
- u8 i;
-
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
- struct list_head *head = &sw->recp_list[i].filt_rules;
- struct mutex *lock; /* Lock to protect filter rule list */
-
- lock = &sw->recp_list[i].filt_rule_lock;
- ice_update_fltr_vsi_map(head, lock, old_vsi_num,
- new_vsi_num);
- }
-}
-
-/**
* ice_is_vsi_valid - check whether the VSI is valid or not
* @hw: pointer to the hw struct
* @vsi_handle: VSI handle
*
* check whether the VSI is valid or not
*/
-static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
{
return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
}
@@ -355,7 +298,7 @@ static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
* return the hw VSI number
* Caution: call this function only if VSI is valid (ice_is_vsi_valid)
*/
-static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
{
return hw->vsi_ctx[vsi_handle]->vsi_num;
}
@@ -367,7 +310,7 @@ static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
*
* return the VSI context entry for a given VSI handle
*/
-static struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
{
return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
}
@@ -440,12 +383,8 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
} else {
/* update with new HW VSI num */
- if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) {
- /* update all filter lists with new HW VSI num */
- ice_update_all_fltr_vsi_map(hw, tmp_vsi_ctx->vsi_num,
- vsi_ctx->vsi_num);
+ if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
- }
}
return status;
@@ -477,6 +416,25 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
}
/**
+ * ice_update_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware
+ */
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ return ice_aq_update_vsi(hw, vsi_ctx, cd);
+}
+
+/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the hw struct
* @vsi_list_id: VSI list id returned or used for lookup
@@ -698,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 *eth_hdr;
u32 act = 0;
__be16 *off;
+ u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
s_rule->pdata.lkup_tx_rx.act = 0;
@@ -716,7 +675,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
switch (f_info->fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
+ act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
ICE_SINGLE_ACT_VSI_ID_M;
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
@@ -736,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
ICE_SINGLE_ACT_Q_INDEX_M;
break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
case ICE_FWD_TO_QGRP:
+ q_rgn = f_info->qgrp_size > 0 ?
+ (u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
ICE_SINGLE_ACT_Q_REGION_M;
break;
- case ICE_DROP_PACKET:
- act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
- break;
default:
return;
}
@@ -832,8 +796,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
enum ice_status status;
u16 lg_act_size;
u16 rules_size;
- u16 vsi_info;
u32 act;
+ u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM;
@@ -859,12 +823,11 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
*/
- vsi_info = (m_ent->vsi_count > 1) ?
- m_ent->fltr_info.fwd_id.vsi_list_id :
- m_ent->fltr_info.fwd_id.vsi_id;
+ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
+ m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
@@ -917,15 +880,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/**
* ice_create_vsi_list_map
* @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
*
* Helper function to create a new entry of VSI list id to VSI mapping
* using the given VSI list id
*/
static struct ice_vsi_list_map_info *
-ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -937,9 +900,9 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
return NULL;
v_map->vsi_list_id = vsi_list_id;
-
+ v_map->ref_cnt = 1;
for (i = 0; i < num_vsi; i++)
- set_bit(vsi_array[i], v_map->vsi_map);
+ set_bit(vsi_handle_arr[i], v_map->vsi_map);
list_add(&v_map->list_entry, &sw->vsi_list_map_head);
return v_map;
@@ -948,8 +911,8 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
/**
* ice_update_vsi_list_rule
* @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
@@ -959,7 +922,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
* using the given VSI list id
*/
static enum ice_status
-ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
@@ -990,9 +953,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
-
- for (i = 0; i < num_vsi; i++)
- s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
+ for (i = 0; i < num_vsi; i++) {
+ if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
+ status = ICE_ERR_PARAM;
+ goto exit;
+ }
+ /* AQ call requires hw_vsi_id(s) */
+ s_rule->pdata.vsi_list.vsi[i] =
+ cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
+ }
s_rule->type = cpu_to_le16(type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
@@ -1000,6 +969,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
+exit:
devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -1007,21 +977,16 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
* @hw: pointer to the hw struct
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: number of VSIs in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
static enum ice_status
-ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
enum ice_status status;
- int i;
-
- for (i = 0; i < num_vsi; i++)
- if (vsi_array[i] >= ICE_MAX_VSI)
- return ICE_ERR_OUT_OF_RANGE;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1029,9 +994,9 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
return status;
/* Update the newly created VSI list to include the specified VSIs */
- return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
- false, ice_aqc_opc_add_sw_rules,
- lkup_type);
+ return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
+ *vsi_list_id, false,
+ ice_aqc_opc_add_sw_rules, lkup_type);
}
/**
@@ -1217,15 +1182,15 @@ ice_add_update_vsi_list(struct ice_hw *hw,
* new VSIs.
*/
struct ice_fltr_info tmp_fltr;
- u16 vsi_id_arr[2];
+ u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
+ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
return ICE_ERR_ALREADY_EXISTS;
- vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
- vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
- status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
+ vsi_handle_arr[0] = cur_fltr->vsi_handle;
+ vsi_handle_arr[1] = new_fltr->vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
&vsi_list_id,
new_fltr->lkup_type);
if (status)
@@ -1245,7 +1210,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
m_entry->vsi_list_info =
- ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
/* If this entry was large action then the large action needs
@@ -1257,11 +1222,11 @@ ice_add_update_vsi_list(struct ice_hw *hw,
m_entry->sw_marker_id,
m_entry->lg_act_idx);
} else {
- u16 vsi_id = new_fltr->fwd_id.vsi_id;
+ u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
/* A rule already exists with the new VSI being added */
- if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
/* Update the previously created VSI list set with
@@ -1270,12 +1235,12 @@ ice_add_update_vsi_list(struct ice_hw *hw,
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
- false, opcode,
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false, opcode,
new_fltr->lkup_type);
/* update VSI list mapping info with new VSI id */
if (!status)
- set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
if (!status)
m_entry->vsi_count++;
@@ -1311,6 +1276,39 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
}
/**
+ * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which VSI lists needs to be searched
+ * @vsi_handle: VSI handle to be found in VSI list
+ * @vsi_list_id: VSI list id found containing vsi_handle
+ *
+ * Helper function to search a VSI list with single entry containing given VSI
+ * handle element. This can be extended further to search VSI list with more
+ * than 1 vsi_count. Returns pointer to VSI list entry if found.
+ */
+static struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+ u16 *vsi_list_id)
+{
+ struct ice_vsi_list_map_info *map_info = NULL;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (test_bit(vsi_handle, map_info->vsi_map)) {
+ *vsi_list_id = map_info->vsi_list_id;
+ return map_info;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
* ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
* @recp_id: lookup type (recipe id) for which rule has to be added
@@ -1328,6 +1326,11 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
mutex_lock(rule_lock);
@@ -1335,7 +1338,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
if (new_fltr->flag & ICE_FLTR_RX)
new_fltr->src = hw->port_info->lport;
else if (new_fltr->flag & ICE_FLTR_TX)
- new_fltr->src = f_entry->fltr_info.fwd_id.vsi_id;
+ new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
if (!m_entry) {
@@ -1388,12 +1391,12 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
/**
* ice_rem_update_vsi_list
* @hw: pointer to the hardware structure
- * @vsi_id: ID of the VSI to remove
+ * @vsi_handle: VSI handle of the VSI to remove
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
static enum ice_status
-ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id,
+ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
@@ -1405,47 +1408,67 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id,
return ICE_ERR_PARAM;
/* A rule with the VSI being removed does not exist */
- if (!test_bit(vsi_id, fm_list->vsi_list_info->vsi_map))
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
return ICE_ERR_DOES_NOT_EXIST;
lkup_type = fm_list->fltr_info.lkup_type;
vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
-
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, true,
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
ice_aqc_opc_update_sw_rules,
lkup_type);
if (status)
return status;
fm_list->vsi_count--;
- clear_bit(vsi_id, fm_list->vsi_list_info->vsi_map);
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
- if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
- (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
+ struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
struct ice_vsi_list_map_info *vsi_list_info =
fm_list->vsi_list_info;
- u16 rem_vsi_id;
+ u16 rem_vsi_handle;
- rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
- ICE_MAX_VSI);
- if (rem_vsi_id == ICE_MAX_VSI)
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
return ICE_ERR_OUT_OF_RANGE;
- status = ice_update_vsi_list_rule(hw, &rem_vsi_id, 1,
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
vsi_list_id, true,
ice_aqc_opc_update_sw_rules,
lkup_type);
if (status)
return status;
+ tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ tmp_fltr_info.vsi_handle = rem_vsi_handle;
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr_info.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+
+ fm_list->fltr_info = tmp_fltr_info;
+ }
+
+ if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+ (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
- if (status)
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
return status;
-
- /* Change the list entry action from VSI_LIST to VSI */
- fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- fm_list->fltr_info.fwd_id.vsi_id = rem_vsi_id;
+ }
list_del(&vsi_list_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
@@ -1470,7 +1493,12 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
bool remove_rule = false;
- u16 vsi_id;
+ u16 vsi_handle;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
mutex_lock(rule_lock);
@@ -1482,9 +1510,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
remove_rule = true;
+ } else if (!list_elem->vsi_list_info) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
} else {
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- status = ice_rem_update_vsi_list(hw, vsi_id, list_elem);
+ if (list_elem->vsi_list_info->ref_cnt > 1)
+ list_elem->vsi_list_info->ref_cnt--;
+ vsi_handle = f_entry->fltr_info.vsi_handle;
+ status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status)
goto exit;
/* if vsi count goes to zero after updating the vsi list */
@@ -1556,8 +1589,19 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
+ u16 hw_vsi_id;
m_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ vsi_handle = m_list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ /* update the src in case it is vsi num */
+ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
+ return ICE_ERR_PARAM;
+ m_list_itr->fltr_info.src = hw_vsi_id;
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
return ICE_ERR_PARAM;
@@ -1676,57 +1720,145 @@ static enum ice_status
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *v_list_itr;
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ enum ice_sw_lkup_type lkup_type;
+ u16 vsi_list_id = 0, vsi_handle;
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
+
/* VLAN id should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
+ if (new_fltr->src_id != ICE_SRC_ID_VSI)
+ return ICE_ERR_PARAM;
+
+ new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
+ lkup_type = new_fltr->lkup_type;
+ vsi_handle = new_fltr->vsi_handle;
rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
mutex_lock(rule_lock);
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
if (!v_list_itr) {
- u16 vsi_id = ICE_VSI_INVAL_ID;
- u16 vsi_list_id = 0;
+ struct ice_vsi_list_map_info *map_info = NULL;
if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
- enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
-
- /* All VLAN pruning rules use a VSI list.
- * Convert the action to forwarding to a VSI list.
+ /* All VLAN pruning rules use a VSI list. Check if
+ * there is already a VSI list containing VSI that we
+ * want to add. If found, use the same vsi_list_id for
+ * this new VLAN rule or else create a new list.
*/
- vsi_id = new_fltr->fwd_id.vsi_id;
- status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
- &vsi_list_id,
- lkup_type);
- if (status)
- goto exit;
+ map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
+ vsi_handle,
+ &vsi_list_id);
+ if (!map_info) {
+ status = ice_create_vsi_list_rule(hw,
+ &vsi_handle,
+ 1,
+ &vsi_list_id,
+ lkup_type);
+ if (status)
+ goto exit;
+ }
+ /* Convert the action to forwarding to a VSI list. */
new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
new_fltr->fwd_id.vsi_list_id = vsi_list_id;
}
status = ice_create_pkt_fwd_rule(hw, f_entry);
- if (!status && vsi_id != ICE_VSI_INVAL_ID) {
+ if (!status) {
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
new_fltr);
if (!v_list_itr) {
status = ICE_ERR_DOES_NOT_EXIST;
goto exit;
}
- v_list_itr->vsi_list_info =
- ice_create_vsi_list_map(hw, &vsi_id, 1,
- vsi_list_id);
+ /* reuse VSI list for new rule and increment ref_cnt */
+ if (map_info) {
+ v_list_itr->vsi_list_info = map_info;
+ map_info->ref_cnt++;
+ } else {
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle,
+ 1, vsi_list_id);
+ }
}
+ } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
+ /* Update existing VSI list to add new VSI id only if it used
+ * by one VLAN rule.
+ */
+ cur_fltr = &v_list_itr->fltr_info;
+ status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
+ new_fltr);
+ } else {
+ /* If VLAN rule exists and VSI list being used by this rule is
+ * referenced by more than 1 VLAN rule. Then create a new VSI
+ * list appending previous VSI with new VSI and update existing
+ * VLAN rule to point to new VSI list id
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+ u16 cur_handle;
- goto exit;
- }
+ /* Current implementation only supports reusing VSI list with
+ * one VSI count. We should never hit below condition
+ */
+ if (v_list_itr->vsi_count > 1 &&
+ v_list_itr->vsi_list_info->ref_cnt > 1) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ status = ICE_ERR_CFG;
+ goto exit;
+ }
+
+ cur_handle =
+ find_first_bit(v_list_itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_handle == vsi_handle) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto exit;
+ }
+
+ vsi_handle_arr[0] = cur_handle;
+ vsi_handle_arr[1] = vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id, lkup_type);
+ if (status)
+ goto exit;
+
+ tmp_fltr = v_list_itr->fltr_info;
+ tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ /* Update the previous switch rule to a new VSI list which
+ * includes current VSI thats requested
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ goto exit;
+
+ /* before overriding VSI list map info. decrement ref_cnt of
+ * previous VSI list
+ */
+ v_list_itr->vsi_list_info->ref_cnt--;
- cur_fltr = &v_list_itr->fltr_info;
- status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, new_fltr);
+ /* now update to newly created list */
+ v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ v_list_itr->vsi_count++;
+ }
exit:
mutex_unlock(rule_lock);
@@ -1779,7 +1911,7 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
- * @vsi_id: number of VSI to set as default
+ * @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
*
@@ -1787,13 +1919,18 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* (represented by swid)
*/
enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
enum ice_status status;
u16 s_rule_size;
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
@@ -1806,15 +1943,17 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
f_info.lkup_type = ICE_SW_LKUP_DFLT;
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
- f_info.fwd_id.vsi_id = vsi_id;
+ f_info.fwd_id.hw_vsi_id = hw_vsi_id;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
+ f_info.src_id = ICE_SRC_ID_LPORT;
if (!set)
f_info.fltr_rule_id =
hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
- f_info.src = vsi_id;
+ f_info.src_id = ICE_SRC_ID_VSI;
+ f_info.src = hw_vsi_id;
if (!set)
f_info.fltr_rule_id =
hw->port_info->dflt_tx_vsi_rule_id;
@@ -1834,10 +1973,10 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = vsi_id;
+ hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
hw->port_info->dflt_tx_vsi_rule_id = index;
} else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = vsi_id;
+ hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
hw->port_info->dflt_rx_vsi_rule_id = index;
}
} else {
@@ -1871,12 +2010,12 @@ out:
enum ice_status
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_fltr_list_entry *list_itr;
+ struct ice_fltr_list_entry *list_itr, *tmp;
if (!m_list)
return ICE_ERR_PARAM;
- list_for_each_entry(list_itr, m_list, list_entry) {
+ list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_MAC)
@@ -1898,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
- struct ice_fltr_list_entry *v_list_itr;
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
return ICE_ERR_PARAM;
- list_for_each_entry(v_list_itr, v_list, list_entry) {
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN)
@@ -1920,21 +2059,21 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
/**
* ice_vsi_uses_fltr - Determine if given VSI uses specified filter
* @fm_entry: filter entry to inspect
- * @vsi_id: ID of VSI to compare with filter info
+ * @vsi_handle: VSI handle to compare with filter info
*/
static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id)
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
{
return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- fm_entry->fltr_info.fwd_id.vsi_id == vsi_id) ||
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
- (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map))));
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
}
/**
* ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @vsi_list_head: pointer to the list to add entry to
* @fi: pointer to fltr_info of filter entry to copy & add
*
@@ -1945,7 +2084,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id)
* extract which VSI to remove the fltr from, and pass on that information.
*/
static enum ice_status
-ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *vsi_list_head,
struct ice_fltr_info *fi)
{
@@ -1966,7 +2105,8 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
* values.
*/
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi_id;
+ tmp->fltr_info.vsi_handle = vsi_handle;
+ tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
list_add(&tmp->list_entry, vsi_list_head);
@@ -1976,9 +2116,9 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
/**
* ice_add_to_vsi_fltr_list - Add VSI filters to the list
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @lkup_list_head: pointer to the list that has certain lookup type filters
- * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
+ * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
*
* Locates all filters in lkup_list_head that are used by the given VSI,
* and adds COPIES of those entries to vsi_list_head (intended to be used
@@ -1987,7 +2127,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
* deallocated by the caller when done with list.
*/
static enum ice_status
-ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
@@ -1995,17 +2135,17 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
enum ice_status status = 0;
/* check to make sure VSI id is valid and within boundary */
- if (vsi_id >= ICE_MAX_VSI)
+ if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info;
- if (!ice_vsi_uses_fltr(fm_entry, vsi_id))
+ if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue;
- status = ice_add_entry_to_vsi_fltr_list(hw, vsi_id,
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
vsi_list_head, fi);
if (status)
return status;
@@ -2016,11 +2156,11 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @lkup: switch rule filter lookup type
*/
static void
-ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
+ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
enum ice_sw_lkup_type lkup)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -2035,7 +2175,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
rule_head = &sw->recp_list[lkup].filt_rules;
mutex_lock(rule_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id, rule_head,
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
&remove_list_head);
mutex_unlock(rule_lock);
if (status)
@@ -2069,102 +2209,121 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
/**
* ice_remove_vsi_fltr - Remove all filters for a VSI
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
*/
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
{
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
}
/**
- * ice_replay_fltr - Replay all the filters stored by a specific list head
+ * ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
- * @list_head: list for which filters needs to be replayed
+ * @vsi_handle: driver VSI handle
* @recp_id: Recipe id for which rules need to be replayed
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
+ * It is required to pass valid VSI handle.
*/
static enum ice_status
-ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct list_head *list_head)
+ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+ struct list_head *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- struct list_head l_head;
enum ice_status status = 0;
+ u16 hw_vsi_id;
if (list_empty(list_head))
return status;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- /* Move entries from the given list_head to a temporary l_head so that
- * they can be replayed. Otherwise when trying to re-add the same
- * filter, the function will return already exists
- */
- list_replace_init(list_head, &l_head);
-
- /* Mark the given list_head empty by reinitializing it so filters
- * could be added again by *handler
- */
- list_for_each_entry(itr, &l_head, list_entry) {
+ list_for_each_entry(itr, list_head, list_entry) {
struct ice_fltr_list_entry f_entry;
f_entry.fltr_info = itr->fltr_info;
- if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
+ if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
+ itr->fltr_info.vsi_handle == vsi_handle) {
+ /* update the src in case it is vsi num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_id, &f_entry);
if (status)
goto end;
continue;
}
-
- /* Add a filter per vsi separately */
- while (1) {
- u16 vsi;
-
- vsi = find_first_bit(itr->vsi_list_info->vsi_map,
- ICE_MAX_VSI);
- if (vsi == ICE_MAX_VSI)
- break;
-
- clear_bit(vsi, itr->vsi_list_info->vsi_map);
- f_entry.fltr_info.fwd_id.vsi_id = vsi;
- f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
- if (recp_id == ICE_SW_LKUP_VLAN)
- status = ice_add_vlan_internal(hw, &f_entry);
- else
- status = ice_add_rule_internal(hw, recp_id,
- &f_entry);
- if (status)
- goto end;
- }
+ if (!itr->vsi_list_info ||
+ !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
+ continue;
+ /* Clearing it so that the logic can add it back */
+ clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.vsi_handle = vsi_handle;
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ /* update the src in case it is vsi num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ if (recp_id == ICE_SW_LKUP_VLAN)
+ status = ice_add_vlan_internal(hw, &f_entry);
+ else
+ status = ice_add_rule_internal(hw, recp_id, &f_entry);
+ if (status)
+ goto end;
}
end:
- /* Clear the filter management list */
- ice_rem_sw_rule_info(hw, &l_head);
return status;
}
/**
- * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
+ * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
*
- * NOTE: This function does not clean up partially added filters on error.
- * It is up to caller of the function to issue a reset or fail early.
+ * Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
enum ice_status status = 0;
u8 i;
for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
- struct list_head *head = &sw->recp_list[i].filt_rules;
+ struct list_head *head;
- status = ice_replay_fltr(hw, i, head);
+ head = &sw->recp_list[i].filt_replay_rules;
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
if (status)
return status;
}
return status;
}
+
+/**
+ * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * @hw: pointer to the hw struct
+ *
+ * Deletes the filter replay rules.
+ */
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ if (!sw)
+ return;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
+ struct list_head *l_head;
+
+ l_head = &sw->recp_list[i].filt_replay_rules;
+ ice_rem_sw_rule_info(hw, l_head);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index e12940e70000..b88d96a1ef69 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,9 @@ struct ice_vsi_ctx {
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
+ struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
+ u8 vf_num;
};
enum ice_sw_fwd_act_type {
@@ -42,6 +44,14 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_LAST
};
+/* type of filter src id */
+enum ice_src_id {
+ ICE_SRC_ID_UNKNOWN = 0,
+ ICE_SRC_ID_VSI,
+ ICE_SRC_ID_QUEUE,
+ ICE_SRC_ID_LPORT,
+};
+
struct ice_fltr_info {
/* Look up information: how to look up packet */
enum ice_sw_lkup_type lkup_type;
@@ -56,6 +66,7 @@ struct ice_fltr_info {
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
+ enum ice_src_id src_id;
union {
struct {
@@ -77,7 +88,10 @@ struct ice_fltr_info {
u16 ethertype;
u8 mac_addr[ETH_ALEN]; /* optional */
} ethertype_mac;
- } l_data;
+ } l_data; /* Make sure to zero out the memory of l_data before using
+ * it or only set the data associated with lookup match
+ * rest everything should be zero
+ */
/* Depending on filter action */
union {
@@ -85,12 +99,16 @@ struct ice_fltr_info {
* queue id in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
- u16 vsi_id:10;
+ u16 hw_vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
+ /* Sw VSI handle */
+ u16 vsi_handle;
+
/* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
- * determines the range of queues the packet needs to be forwarded to
+ * determines the range of queues the packet needs to be forwarded to.
+ * Note that qgrp_size must be set to a power of 2.
*/
u8 qgrp_size;
@@ -109,6 +127,7 @@ struct ice_sw_recipe {
/* List of type ice_fltr_mgmt_list_entry */
struct list_head filt_rules;
+ struct list_head filt_replay_rules;
/* linked list of type recipe_list_entry */
struct list_head rg_list;
@@ -129,6 +148,8 @@ struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
u16 vsi_list_id;
+ /* counter to track how many rules are reusing this VSI list */
+ u16 ref_cnt;
};
struct ice_fltr_list_entry {
@@ -159,28 +180,33 @@ struct ice_fltr_mgmt_list_entry {
/* VSI related commands */
enum ice_status
-ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- struct ice_sq_cd *cd);
-enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
-
-enum ice_status ice_replay_all_fltr(struct ice_hw *hw);
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 839fd9ff6043..1d0f58bd389b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -104,10 +104,17 @@ enum ice_rx_dtype {
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define ICE_ITR_8K 0x003E
+#define ICE_ITR_8K 125
+#define ICE_ITR_20K 50
+#define ICE_DFLT_TX_ITR ICE_ITR_20K
+#define ICE_DFLT_RX_ITR ICE_ITR_20K
+/* apply ITR granularity translation to program the register. itr_gran is either
+ * 2 or 4 usecs so we need to divide by 2 first then shift by that value
+ */
+#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
+ ((itr_gran) / 2))
-/* apply ITR HW granularity translation to program the HW registers */
-#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
+#define ICE_DFLT_INTRL 0
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
@@ -129,14 +136,6 @@ struct ice_ring {
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware supports 2us/1us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
-
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -173,6 +172,7 @@ struct ice_ring_container {
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
+ int itr_idx; /* index in the interrupt vector */
u16 itr;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index e681804be4d4..12f9432abf11 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,9 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
return test_bit(tc, (unsigned long *)&bitmap);
}
+/* Driver always calls main vsi_handle first */
+#define ICE_MAIN_VSI_HANDLE 0
+
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
@@ -81,6 +84,7 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
+ ICE_VSI_VF,
};
struct ice_link_status {
@@ -100,6 +104,15 @@ struct ice_link_status {
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
};
+/* Different reset sources for which a disable queue AQ call has to be made in
+ * order to clean the TX scheduler as a part of the reset
+ */
+enum ice_disq_rst_src {
+ ICE_NO_RESET = 0,
+ ICE_VM_RESET,
+ ICE_VF_RESET,
+};
+
/* PHY info such as phy_type, etc... */
struct ice_phy_info {
struct ice_link_status link_info;
@@ -124,6 +137,9 @@ struct ice_hw_common_caps {
/* Max MTU for function or device */
u16 max_mtu;
+ /* Virtualization support */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
@@ -132,12 +148,15 @@ struct ice_hw_common_caps {
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
u32 guaranteed_num_vsi;
};
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
};
@@ -147,12 +166,18 @@ struct ice_mac_info {
u8 perm_addr[ETH_ALEN];
};
-/* Various RESET request, These are not tied with HW reset types */
+/* Reset types used to determine which kind of reset was requested. These
+ * defines match what the RESET_TYPE field of the GLGEN_RSTAT register.
+ * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register
+ * because its reset source is different than the other types listed.
+ */
enum ice_reset_req {
+ ICE_RESET_POR = 0,
ICE_RESET_INVAL = 0,
- ICE_RESET_PFR = 1,
- ICE_RESET_CORER = 2,
- ICE_RESET_GLOBR = 3,
+ ICE_RESET_CORER = 1,
+ ICE_RESET_GLOBR = 2,
+ ICE_RESET_EMPR = 3,
+ ICE_RESET_PFR = 4,
};
/* Bus parameters */
@@ -186,7 +211,7 @@ struct ice_sched_node {
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
- u16 vsi_id;
+ u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
@@ -245,8 +270,6 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- struct ice_sched_tx_policy sched_policy;
- struct list_head vsi_info_list;
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
@@ -314,6 +337,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
@@ -326,16 +350,26 @@ struct ice_hw {
u32 fw_build; /* firmware build number */
struct ice_fw_log_cfg fw_log;
- /* minimum allowed value for different speeds */
-#define ICE_ITR_GRAN_MIN_200 1
-#define ICE_ITR_GRAN_MIN_100 1
-#define ICE_ITR_GRAN_MIN_50 2
-#define ICE_ITR_GRAN_MIN_25 4
+
+/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
+ * register. Used for determining the itr/intrl granularity during
+ * initialization.
+ */
+#define ICE_MAX_AGG_BW_200G 0x0
+#define ICE_MAX_AGG_BW_100G 0X1
+#define ICE_MAX_AGG_BW_50G 0x2
+#define ICE_MAX_AGG_BW_25G 0x3
+ /* ITR granularity for different speeds */
+#define ICE_ITR_GRAN_ABOVE_25 2
+#define ICE_ITR_GRAN_MAX_25 4
/* ITR granularity in 1 us */
- u8 itr_gran_200;
- u8 itr_gran_100;
- u8 itr_gran_50;
- u8 itr_gran_25;
+ u8 itr_gran;
+ /* INTRL granularity for different speeds */
+#define ICE_INTRL_GRAN_ABOVE_25 4
+#define ICE_INTRL_GRAN_MAX_25 8
+ /* INTRL granularity in 1 us */
+ u8 intrl_gran;
+
u8 ucast_shared; /* true if VSIs can share unicast addr */
};
@@ -409,4 +443,7 @@ struct ice_hw_port_stats {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
+/* Hash redirection LUT for VSI - maximum array size */
+#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644
index 000000000000..c25e486706f3
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -0,0 +1,2668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+ msglen, NULL);
+ }
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ int ice_link_speed, bool link_up)
+{
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_status = link_up;
+ /* Speed in Mbps */
+ pfe->event_data.link_event_adv.link_speed =
+ ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+ } else {
+ pfe->event_data.link_event.link_status = link_up;
+ /* Legacy method for virtchnl link speeds */
+ pfe->event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
+ ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+ }
+}
+
+/**
+ * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ bool link_up)
+{
+ u16 link_speed;
+
+ if (link_up)
+ link_speed = ICE_AQ_LINK_SPEED_40GB;
+ else
+ link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, pfe, link_speed, link_up);
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ ls = &hw->port_info->phy.link_info;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
+ ICE_AQ_LINK_UP);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+}
+
+/**
+ * ice_get_vf_vector - get VF interrupt vector register offset
+ * @vf_msix: number of MSIx vector per VF on a PF
+ * @vf_id: VF identifier
+ * @i: index of MSIx vector
+ */
+static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
+{
+ return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
+ VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, pf_vf_msix;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx) {
+ ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
+ vf->lan_vsi_num = 0;
+ vf->num_mac = 0;
+ }
+
+ pf_vf_msix = pf->num_vf_msix;
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = 0; i < pf_vf_msix; i++) {
+ u32 reg_idx;
+
+ reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
+ wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/***********************enable_vf routines*****************************/
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->num_vf_msix - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int tmp, i;
+
+ if (!pf->vf)
+ return;
+
+ while (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Avoid wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
+ continue;
+
+ /* stop rings without wait time */
+ ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ ICE_NO_RESET, i);
+ ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+ clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
+ }
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ tmp = pf->num_alloc_vfs;
+ pf->num_vf_qps = 0;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ /* disable VF qp mappings */
+ ice_dis_vf_mappings(&pf->vf[i]);
+
+ /* Set this state so that assigned VF vectors can be
+ * reclaimed by PF for reuse in ice_vsi_release(). No
+ * need to clear this bit since pf->vf array is being
+ * freed anyways after this for loop
+ */
+ set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
+ ice_free_vf_res(&pf->vf[i]);
+ }
+ }
+
+ devm_kfree(&pf->pdev->dev, pf->vf);
+ pf->vf = NULL;
+
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
+ if (!pci_vfs_assigned(pf->pdev)) {
+ int vf_id;
+
+ /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+ }
+ clear_bit(__ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ struct ice_hw *hw;
+ int vf_abs_id, i;
+
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in ice_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!is_vflr) {
+ /* reset VF using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_M) != 0)
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(1);
+ }
+}
+
+/**
+ * ice_vsi_set_pvid - Set port VLAN id for the VSI
+ * @vsi: the VSI being changed
+ * @vid: the VLAN id to set as a PVID
+ */
+static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ ctxt.info.pvid = cpu_to_le16(vid);
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.pvid = ctxt.info.pvid;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_vsi_manage_vlan_stripping(vsi, false)) {
+ dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
+ vsi->vsi_num);
+ return -ENODEV;
+ }
+
+ vsi->info.pvid = 0;
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @vf_id: defines VF id to which this VSI connects.
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+}
+
+/**
+ * ice_alloc_vsi_res - Setup VF VSI and its resources
+ * @vf: pointer to the VF structure
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_alloc_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status = 0;
+
+ vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+ return -ENOMEM;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ /* first vector index is the VFs OICR index */
+ vf->first_vector_idx = vsi->hw_base_vector;
+ /* Since hw_base_vector holds the vector where data queue interrupts
+ * starts, increment by 1 since VFs allocated vectors include OICR intr
+ * as well.
+ */
+ vsi->hw_base_vector += 1;
+
+ /* Check if port VLAN exist before, and restore it accordingly */
+ if (vf->port_vlan_id)
+ ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+
+ eth_broadcast_addr(broadcast);
+
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+
+ if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vf->dflt_lan_addr.addr);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+ }
+
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status)
+ dev_err(&pf->pdev->dev, "could not add mac filters\n");
+
+ /* Clear this bit after VF initialization since we shouldn't reclaim
+ * and reassign interrupts for synchronous or asynchronous VFR events.
+ * We don't want to reconfigure interrupts since AVF driver doesn't
+ * expect vector assignment to be changed unless there is a request for
+ * more vectors.
+ */
+ clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
+ice_alloc_vsi_res_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+}
+
+/**
+ * ice_alloc_vf_res - Allocate VF resources
+ * @vf: pointer to the VF structure
+ */
+static int ice_alloc_vf_res(struct ice_vf *vf)
+{
+ int status;
+
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* VF is now completely initialized */
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ return status;
+
+ice_alloc_vf_res_exit:
+ ice_free_vf_res(vf);
+ return status;
+}
+
+/**
+ * ice_ena_vf_mappings
+ * @vf: pointer to the VF structure
+ *
+ * Enable VF vectors and queues allocation by writing the details into
+ * respective registers.
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+ int abs_vf_id;
+ u32 reg;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ first = vf->first_vector_idx;
+ last = (first + pf->num_vf_msix) - 1;
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* VF Vector allocation */
+ reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ /* map the interrupts to its functions */
+ for (v = first; v <= last; v++) {
+ reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
+ VPLAN_TXQ_MAPENA_TX_ENA_M);
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
+ VPLAN_RXQ_MAPENA_RX_ENA_M);
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_determine_res
+ * @pf: pointer to the PF structure
+ * @avail_res: available resources in the PF structure
+ * @max_res: maximum resources that can be given per VF
+ * @min_res: minimum resources that can be given per VF
+ *
+ * Returns non-zero value if resources (queues/vectors) are available or
+ * returns zero if PF cannot accommodate for all num_alloc_vfs.
+ */
+static int
+ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
+{
+ bool checked_min_res = false;
+ int res;
+
+ /* start by checking if PF can assign max number of resources for
+ * all num_alloc_vfs.
+ * if yes, return number per VF
+ * If no, divide by 2 and roundup, check again
+ * repeat the loop till we reach a point where even minimum resources
+ * are not available, in that case return 0
+ */
+ res = max_res;
+ while ((res >= min_res) && !checked_min_res) {
+ int num_all_res;
+
+ num_all_res = pf->num_alloc_vfs * res;
+ if (num_all_res <= avail_res)
+ return res;
+
+ if (res == min_res)
+ checked_min_res = true;
+
+ res = DIV_ROUND_UP(res, 2);
+ }
+ return 0;
+}
+
+/**
+ * ice_check_avail_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ *
+ * This function is where we calculate actual number of resources for VF VSIs,
+ * we don't reserve ahead of time during probe. Returns success if vectors and
+ * queues resources are available, otherwise returns error code
+ */
+static int ice_check_avail_res(struct ice_pf *pf)
+{
+ u16 num_msix, num_txq, num_rxq;
+
+ if (!pf->num_alloc_vfs)
+ return -EINVAL;
+
+ /* Grab from HW interrupts common pool
+ * Note: By the time the user decides it needs more vectors in a VF
+ * its already too late since one must decide this prior to creating the
+ * VF interface. So the best we can do is take a guess as to what the
+ * user might want.
+ *
+ * We have two policies for vector allocation:
+ * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
+ * number of NFV VFs used for NFV appliances, since this is a special
+ * case, we try to assign maximum vectors per VF (65) as much as
+ * possible, based on determine_resources algorithm.
+ * 2. if num_alloc_vfs is from 17 to 256, then its large number of
+ * regular VFs which are not used for any special purpose. Hence try to
+ * grab default interrupt vectors (5 as supported by AVF driver).
+ */
+ if (pf->num_alloc_vfs <= 16) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_MAX_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_DFLT_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Number of VFs %d exceeds max VF count %d\n",
+ pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ return -EIO;
+ }
+
+ if (!num_msix)
+ return -EIO;
+
+ /* Grab from the common pool
+ * start by requesting Default queues (4 as supported by AVF driver),
+ * Note that, the main difference between queues and vectors is, latter
+ * can only be reserved at init time but queues can be requested by VF
+ * at runtime through Virtchnl, that is the reason we start by reserving
+ * few queues.
+ */
+ num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq)
+ return -EIO;
+
+ /* since AVF driver works with only queue pairs which means, it expects
+ * to have equal number of Rx and Tx queues, so take the minimum of
+ * available Tx or Rx queues
+ */
+ pf->num_vf_qps = min_t(int, num_txq, num_rxq);
+ pf->num_vf_msix = num_msix;
+
+ return 0;
+}
+
+/**
+ * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed. Reallocate VF resources back to make
+ * VF state active
+ */
+static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+
+ /* PF software completes the flow by notifying VF that reset flow is
+ * completed. This is done by enabling hardware by clearing the reset
+ * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
+ * register to VFR completed (done at the end of this function)
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in ice_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!ice_alloc_vf_res(vf)) {
+ ice_ena_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v, i;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return false;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_trigger_vf_reset(&pf->vf[v], is_vflr);
+
+ /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
+ * queues to inform Firmware about VF reset.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
+ ICE_VF_RESET, v, NULL);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence */
+ while (v < pf->num_alloc_vfs) {
+ struct ice_vf *vf = &pf->vf[v];
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
+ }
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_free_vf_res(&pf->vf[v]);
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_cleanup_and_realloc_vf(&pf->vf[v]);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Returns true if the VF is reset, false otherwise.
+ */
+static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ /* If the VFs have been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue.
+ */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ ice_trigger_vf_reset(vf, is_vflr);
+
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
+ vf->vf_id);
+ ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ } else {
+ /* Call Disable LAN Tx queue AQ call even when queues are not
+ * enabled. This is needed for successful completiom of VFR
+ */
+ ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
+ NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ usleep_range(10000, 20000);
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M) {
+ rsd = true;
+ break;
+ }
+ }
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_free_vf_res(vf);
+
+ ice_cleanup_and_realloc_vf(vf);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ if (!pf->num_alloc_vfs)
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_notify_vf_reset(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * ice_alloc_vfs - Allocate and set up VFs resources
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vfs;
+ int i, ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ pf->num_alloc_vfs = 0;
+ goto err_unroll_intr;
+ }
+ /* allocate memory */
+ vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_unroll_sriov;
+ }
+ pf->vf = vfs;
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].vf_sw_id = pf->first_sw;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
+
+ /* Set this state so that PF driver does VF vector assignment */
+ set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
+ }
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+ /* VF resources get allocated during reset */
+ if (!ice_reset_all_vfs(pf, false))
+ goto err_unroll_sriov;
+
+ goto err_unroll_intr;
+
+err_unroll_sriov:
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ return ret;
+}
+
+/**
+ * ice_pf_state_is_nominal - checks the pf for nominal state
+ * @pf: pointer to pf to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state.
+ * Returns false otherwise
+ */
+static bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+ DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+ if (!pf)
+ return false;
+
+ bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -ENODEV;
+ }
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return num_vfs;
+
+ if (num_vfs > pf->num_vfs_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->num_vfs_supported);
+ return -ENOTSUPP;
+ }
+
+ dev_info(dev, "Allocating %d VFs\n", num_vfs);
+ err = ice_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
+
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return num_vfs;
+}
+
+/**
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * This function is called when the user updates the number of VFs in sysfs.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return ice_pci_sriov_ena(pf, num_vfs);
+
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
+ *
+ * called from the VLFR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int vf_id;
+ u32 reg;
+
+ if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !pf->num_alloc_vfs)
+ return;
+
+ /* Re-enable the VFLR interrupt cause here, before looking for which
+ * VF got reset. Otherwise, if another VF gets a reset while the
+ * first one is being processed, that interrupt will be lost, and
+ * that VF will be stuck in reset forever.
+ */
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+
+ clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, true);
+ }
+}
+
+/**
+ * ice_vc_dis_vf - Disable a given VF via SW reset
+ * @vf: pointer to the VF info
+ *
+ * Disable the VF through a SW reset
+ */
+static void ice_vc_dis_vf(struct ice_vf *vf)
+{
+ ice_vc_notify_vf_reset(vf);
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ enum ice_status aq_ret;
+ struct ice_pf *pf;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_inval_msgs++;
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
+ if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ return -EIO;
+ }
+ } else {
+ vf->num_valid_msgs++;
+ /* reset the invalid counter, if a valid message is received. */
+ vf->num_inval_msgs = 0;
+ }
+
+ aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
+ (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_resource *vfres = NULL;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource);
+
+ vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = ICE_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi->info.pvid)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ vfres->num_vsis = 1;
+ /* Tx and Rx queue are equal for VF */
+ vfres->num_queue_pairs = vsi->num_txq;
+ vfres->max_vectors = pf->num_vf_msix;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->dflt_lan_addr.addr);
+
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ (u8 *)vfres, len);
+
+ devm_kfree(&pf->pdev->dev, vfres);
+ return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_find_vsi_from_id
+ * @pf: the pf structure to search for the VSI
+ * @id: id of the VSI it is searching for
+ *
+ * searches for the VSI with the given id
+ */
+static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ */
+static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_find_vsi_from_id(pf, vsi_id);
+
+ return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @qid: VSI relative queue id
+ *
+ * check for the valid queue id
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+ struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, vrk->key, NULL, 0);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_eth_stats stats;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ memset(&stats, 0, sizeof(struct ice_eth_stats));
+ ice_update_eth_stats(vsi);
+
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ if (ice_vsi_start_rx_rings(vsi))
+ aq_ret = ICE_ERR_PARAM;
+
+ /* Set flag to indicate that queues are enabled */
+ if (!aq_ret)
+ set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop tx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ if (ice_vsi_stop_rx_rings(vsi)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop rx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ /* Clear enabled queues flag */
+ if (!aq_ret)
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ u16 vsi_id, vsi_q_id, vector_id;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi = NULL;
+ struct ice_pf *pf = vf->pf;
+ enum ice_status aq_ret = 0;
+ unsigned long qmap;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!(vector_id < pf->hw.func_caps.common_cap
+ .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+ }
+
+ if (vsi)
+ ice_vsi_cfg_msix(vsi);
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ /* copy Tx queue info from VF into VSI */
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ /* copy Rx queue info from VF into vsi */
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ qpi->rxq.max_pkt_size < 64) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ }
+
+ /* VF can request to configure less than allocated queues
+ * or default allocated queues. So update the VSI with new number
+ */
+ vsi->num_txq = qci->num_queue_pairs;
+ vsi->num_rxq = qci->num_queue_pairs;
+
+ if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+ aq_ret = 0;
+ else
+ aq_ret = ICE_ERR_PARAM;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+static bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if mac filters are being set, false otherwise
+ *
+ * add guest mac address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ enum virtchnl_ops vc_op;
+ enum ice_status ret;
+ LIST_HEAD(mac_list);
+ struct ice_vsi *vsi;
+ int mac_count = 0;
+ int i;
+
+ if (set)
+ vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+ else
+ vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (set && !ice_is_vf_trusted(vf) &&
+ (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *maddr = al->list[i].addr;
+
+ if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
+ is_broadcast_ether_addr(maddr)) {
+ if (set) {
+ /* VF is trying to add filters that the PF
+ * already added. Just continue.
+ */
+ dev_info(&pf->pdev->dev,
+ "mac %pM already set for VF %d\n",
+ maddr, vf->vf_id);
+ continue;
+ } else {
+ /* VF can't remove dflt_lan_addr/bcast mac */
+ dev_err(&pf->pdev->dev,
+ "can't remove mac %pM for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+ }
+
+ /* check for the invalid cases and bail if necessary */
+ if (is_zero_ether_addr(maddr)) {
+ dev_err(&pf->pdev->dev,
+ "invalid mac %pM provided for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (is_unicast_ether_addr(maddr) &&
+ !ice_can_vf_change_mac(vf)) {
+ dev_err(&pf->pdev->dev,
+ "can't change unicast mac for untrusted VF %d\n",
+ vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ /* get here if maddr is multicast or if VF can change mac */
+ if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto handle_mac_exit;
+ }
+ mac_count++;
+ }
+
+ /* program the updated filter list */
+ if (set)
+ ret = ice_add_mac(&pf->hw, &mac_list);
+ else
+ ret = ice_remove_mac(&pf->hw, &mac_list);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "can't update mac filters for VF %d, error %d\n",
+ vf->vf_id, ret);
+ } else {
+ if (set)
+ vf->num_mac += mac_count;
+ else
+ vf->num_mac -= mac_count;
+ }
+
+handle_mac_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &mac_list);
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ int req_queues = vfres->num_queue_pairs;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
+ int cur_queues;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = pf->num_vf_qps;
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ if (req_queues <= 0) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request %d queues. Ignoring.\n",
+ vf->vf_id, req_queues);
+ } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but only %d left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev,
+ "VF %d granted request of %d queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ aq_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN id being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
+ *
+ * program VF Port VLAN id and/or qos
+ */
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
+{
+ u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (vlan_id > ICE_MAX_VLANID || qos > 7) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ return -EINVAL;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ /* duplicate request, so just return success */
+ dev_info(&pf->pdev->dev,
+ "Duplicate pvid %d request\n", vlanprio);
+ return ret;
+ }
+
+ /* If pvid, then remove all filters on the old VLAN */
+ if (vsi->info.pvid)
+ ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+
+ if (vlan_id || qos) {
+ ret = ice_vsi_set_pvid(vsi, vlanprio);
+ if (ret)
+ goto error_set_pvid;
+ } else {
+ ice_vsi_kill_pvid(vsi);
+ }
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter for each MAC */
+ ret = ice_vsi_add_vlan(vsi, vlan_id);
+ if (ret)
+ goto error_set_pvid;
+ }
+
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+error_set_pvid:
+ return ret;
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN id
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ aq_ret = ICE_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->info.pvid) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
+ dev_err(&pf->pdev->dev,
+ "%sable VLAN stripping failed for VSI %i\n",
+ add_v ? "en" : "dis", vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v) {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ if (!ice_vsi_add_vlan(vsi, vid)) {
+ vf->num_vlan++;
+ set_bit(vid, vsi->active_vlans);
+
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid))
+ if (ice_cfg_vlan_pruning(vsi, true))
+ aq_ret = ICE_ERR_PARAM;
+ } else {
+ aq_ret = ICE_ERR_PARAM;
+ }
+ }
+ } else {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ /* Make sure ice_vsi_kill_vlan is successful before
+ * updating VLAN information
+ */
+ if (!ice_vsi_kill_vlan(vsi, vid)) {
+ vf->num_vlan--;
+ clear_bit(vid, vsi->active_vlans);
+
+ /* Disable VLAN pruning when removing VLAN 0 */
+ if (unlikely(!vid))
+ ice_cfg_vlan_pruning(vsi, false);
+ }
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ if (add_v)
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ NULL, 0);
+ else
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN id
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN id
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, true))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, false))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ u16 msglen = event->msg_len;
+ u8 *msg = event->msg_buf;
+ struct ice_vf *vf = NULL;
+ int err = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+ err = -EPERM;
+ goto error_handler;
+ }
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+ if (err) {
+ if (err == VIRTCHNL_ERR_PARAM)
+ err = -EPERM;
+ else
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ /* Perform additional checks specific to RSS and Virtchnl */
+ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
+ err = -EINVAL;
+ } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
+ err = -EINVAL;
+ }
+
+error_handler:
+ if (err) {
+ ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ vf_id, v_opcode, msglen, err);
+ return;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ err = ice_vc_get_ver_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ err = ice_vc_get_vf_res_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ice_vc_reset_vf_msg(vf);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ err = ice_vc_add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ err = ice_vc_del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ err = ice_vc_cfg_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ err = ice_vc_ena_qs_msg(vf, msg);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ err = ice_vc_dis_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ err = ice_vc_request_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ err = ice_vc_cfg_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ err = ice_vc_config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ err = ice_vc_config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ err = ice_vc_get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ err = ice_vc_add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ err = ice_vc_remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ err = ice_vc_ena_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ err = ice_vc_dis_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+ v_opcode, vf_id);
+ err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ NULL, 0);
+ break;
+ }
+ if (err) {
+ /* Helper function cares less about error return values here
+ * as it is busy with pending work.
+ */
+ dev_info(&pf->pdev->dev,
+ "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ vf_id, v_opcode, err);
+ }
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ */
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
+ ICE_VLAN_PRIORITY_S;
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
+ return 0;
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_ctx ctx = { 0 };
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int status;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+ ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (ena) {
+ ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
+ }
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
+ if (status) {
+ dev_dbg(&pf->pdev->dev,
+ "Error %d, failed to update VSI* parameters\n", status);
+ return -EIO;
+ }
+
+ vf->spoofchk = ena;
+ vsi->info.sec_flags = ctx.info.sec_flags;
+ vsi->info.sw_flags2 = ctx.info.sw_flags2;
+
+ return status;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ return -EINVAL;
+ }
+
+ /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ * flow will use the updated dflt_lan_addr and add a MAC filter
+ * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
+ * set the MAC address for this VF.
+ */
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+ vf->pf_set_mac = true;
+ netdev_info(netdev,
+ "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ vf_id, mac);
+
+ ice_vc_dis_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ /* Check if already trusted */
+ if (trusted == vf->trusted)
+ return 0;
+
+ vf->trusted = trusted;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
+
+ return 0;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_vf *vf;
+ struct ice_hw *hw;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ hw = &pf->hw;
+ ls = &pf->hw.port_info->phy.link_info;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
+
+ /* Notify the VF of its new link state */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644
index 000000000000..10131e0180f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_PF_H_
+#define _ICE_VIRTCHNL_PF_H_
+#include "ice.h"
+
+#define ICE_MAX_VLANID 4095
+#define ICE_VLAN_PRIORITY_S 12
+#define ICE_VLAN_M 0xFFF
+#define ICE_PRIORITY_M 0x7000
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+#define ICE_MAX_MACADDR_PER_VF 12
+
+/* Malicious Driver Detection */
+#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
+#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
+
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0,
+ ICE_VF_STATE_ACTIVE,
+ ICE_VF_STATE_ENA,
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ /* state to indicate if PF needs to do vector assignment for VF.
+ * This needs to be set during first time VF initialization or later
+ * when VF asks for more Vectors through virtchnl OP.
+ */
+ ICE_VF_STATE_CFG_INTR,
+ ICE_VF_STATES_NBITS
+};
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_L2 = 0,
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct ice_pf *pf;
+
+ s16 vf_id; /* VF id in the PF space */
+ u32 driver_caps; /* reported by VF driver */
+ int first_vector_idx; /* first vector index of this VF */
+ struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ struct virtchnl_ether_addr dflt_lan_addr;
+ u16 port_vlan_id;
+ u8 pf_set_mac; /* VF MAC address set by VMM admin */
+ u8 trusted;
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_inval_msgs; /* number of continuous invalid msgs */
+ u64 num_valid_msgs; /* number of valid msgs detected */
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ u8 link_forced;
+ u8 link_up; /* only valid if VF link is forced */
+ u8 spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+
+int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
+
+int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+#else /* CONFIG_PCI_IOV */
+#define ice_process_vflr_event(pf) do {} while (0)
+#define ice_free_vfs(pf) do {} while (0)
+#define ice_vc_process_vf_msg(pf, event) do {} while (0)
+#define ice_vc_notify_link_state(pf) do {} while (0)
+#define ice_vc_notify_reset(pf) do {} while (0)
+
+static inline bool
+ice_reset_all_vfs(struct ice_pf __always_unused *pf,
+ bool __always_unused is_vflr)
+{
+ return true;
+}
+
+static inline int
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 5414685189ce..ca6b0c458e4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5c6fd42e90ed..7a7679e7be84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -228,13 +228,17 @@ struct ixgbe_tx_buffer {
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ixgbe_queue_stats {
@@ -271,6 +275,7 @@ enum ixgbe_ring_state_t {
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_TX_XDP_RING,
+ __IXGBE_TX_DISABLED,
};
#define ring_uses_build_skb(ring) \
@@ -347,6 +352,10 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca; /* ZC allocator anchor */
+ u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
+ u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -764,6 +773,11 @@ struct ixgbe_adapter {
#ifdef CONFIG_XFRM_OFFLOAD
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_XFRM_OFFLOAD */
+
+ /* AF_XDP zero-copy */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 970f71d5da04..0bd1294ba517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3485,17 +3485,6 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
}
/**
- * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
- * @hw: pointer to hardware structure
- */
-bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
-{
- if (hw->mac.ops.fw_recovery_mode)
- return hw->mac.ops.fw_recovery_mode(hw);
- return false;
-}
-
-/**
* ixgbe_get_device_caps_generic - Get additional device capabilities
* @hw: pointer to hardware structure
* @device_caps: the EEPROM word with the extra device capabilities
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index d361f570ca37..62e6499e4146 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
int txr_remaining = adapter->num_tx_queues;
int xdp_remaining = adapter->num_xdp_queues;
int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
- int err;
+ int err, i;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
@@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
xdp_idx += xqpv;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->rx_ring[i])
+ adapter->rx_ring[i]->ring_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i])
+ adapter->tx_ring[i]->ring_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ if (adapter->xdp_ring[i])
+ adapter->xdp_ring[i]->ring_idx = i;
+ }
+
return 0;
err_out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 140e87a10ff5..51268772a999 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -34,12 +34,14 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
+#include <net/xdp_sock.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
+#include "ixgbe_txrx_common.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
}
}
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
{
u32 mask;
@@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
u32 flags = rx_ring->q_vector->adapter->flags;
@@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
}
-static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
{
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *netdev = rx_ring->netdev;
@@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED BIT(0)
-#define IXGBE_XDP_TX BIT(1)
-#define IXGBE_XDP_REDIR BIT(2)
-
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_frame *xdpf);
-
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
@@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ bool wd = ring->xsk_umem ?
+ ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
+ ixgbe_clean_tx_irq(q_vector, ring, budget);
+
+ if (!wd)
clean_complete = false;
}
@@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+ int cleaned = ring->xsk_umem ?
+ ixgbe_clean_rx_irq_zc(q_vector, ring,
+ per_ring_budget) :
+ ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
@@ -3196,11 +3197,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter,
+ BIT_ULL(q_vector->v_idx));
+ }
return min(work_done, budget - 1);
}
@@ -3473,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
+ ring->xsk_umem = NULL;
+ if (ring_is_xdp(ring))
+ ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
IXGBE_WRITE_FLUSH(hw);
@@ -3577,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
else
mtqc |= IXGBE_MTQC_64VF;
} else {
- if (tcs > 4)
+ if (tcs > 4) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- else if (tcs > 1)
+ } else if (tcs > 1) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else
- mtqc = IXGBE_MTQC_64Q_1PB;
+ } else {
+ u8 max_txq = adapter->num_tx_queues +
+ adapter->num_xdp_queues;
+ if (max_txq > 63)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
@@ -3705,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+ if (rx_ring->xsk_umem) {
+ u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+
+ /* If the MAC support setting RXDCTL.RLPML, the
+ * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
+ * RXDCTL.RLPML is set to the actual UMEM buffer
+ * size. If not, then we are stuck with a 1k buffer
+ * size resolution. In this case frames larger than
+ * the UMEM buffer size viewed in a 1k resolution will
+ * be dropped.
+ */
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
+ } else {
srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4031,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ if (ring->xsk_umem) {
+ ring->zca.free = ixgbe_zca_free;
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca));
+
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+ }
+
/* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -4080,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
+ if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
+ rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
+
+ ring->rx_buf_len = xsk_buf_len;
+ }
+
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
sizeof(struct ixgbe_rx_buffer) * ring->count);
@@ -4093,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+ if (ring->xsk_umem)
+ ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
+ else
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -5173,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
+ u64 action;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5181,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
+ action = filter->action;
+ if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
+ action =
+ (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
filter->sw_idx,
- (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ (action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[filter->action]->reg_idx);
+ adapter->rx_ring[action]->reg_idx);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -5201,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_ring->xsk_umem) {
+ ixgbe_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
@@ -5239,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
}
}
+skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5883,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (tx_ring->xsk_umem) {
+ ixgbe_xsk_clean_tx_ring(tx_ring);
+ goto out;
+ }
+
while (i != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
@@ -5934,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
if (!ring_is_xdp(tx_ring))
netdev_tx_reset_queue(txring_txq(tx_ring));
+out:
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -6434,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int ring_node = -1;
- int size, err;
+ int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -6471,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index) < 0)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- goto err;
- }
-
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -8102,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
- IXGBE_TXD_CMD_RS)
-
static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
@@ -8457,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_frame *xdpf)
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_frame *xdpf)
{
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
@@ -8680,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
+ return NETDEV_TX_BUSY;
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
@@ -10191,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
+ case XDP_QUERY_XSK_UMEM:
+ return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ case XDP_SETUP_XSK_UMEM:
+ return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+
default:
return -EINVAL;
}
}
-static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
{
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -10226,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(!ring))
return -ENXIO;
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
+ return -ENXIO;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -10287,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
+ .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit,
};
+static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 reg_idx = tx_ring->reg_idx;
+ int wait_loop;
+ u32 txdctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+
+ /* delay mechanism from ixgbe_disable_tx */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ ixgbe_disable_txr_hw(adapter, tx_ring);
+}
+
+static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 reg_idx = rx_ring->reg_idx;
+ int wait_loop;
+ u32 rxdctl;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* delay mechanism from ixgbe_disable_rx */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
+{
+ memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+{
+ memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+}
+
+/**
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function disables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+{
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+ rx_ring = adapter->rx_ring[ring];
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+ ixgbe_disable_txr(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_disable_txr(adapter, xdp_ring);
+ ixgbe_disable_rxr_hw(adapter, rx_ring);
+
+ if (xdp_ring)
+ synchronize_sched();
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ ixgbe_clean_tx_ring(tx_ring);
+ if (xdp_ring)
+ ixgbe_clean_tx_ring(xdp_ring);
+ ixgbe_clean_rx_ring(rx_ring);
+
+ ixgbe_reset_txr_stats(tx_ring);
+ if (xdp_ring)
+ ixgbe_reset_txr_stats(xdp_ring);
+ ixgbe_reset_rxr_stats(rx_ring);
+}
+
+/**
+ * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function enables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+{
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+ rx_ring = adapter->rx_ring[ring];
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+
+ ixgbe_configure_tx_ring(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_configure_tx_ring(adapter, xdp_ring);
+ ixgbe_configure_rx_ring(adapter, rx_ring);
+
+ clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+}
+
/**
* ixgbe_enumerate_functions - Get the number of ports this device has
* @adapter: adapter structure
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
new file mode 100644
index 000000000000..53d4089f5644
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef _IXGBE_TXRX_COMMON_H_
+#define _IXGBE_TXRX_COMMON_H_
+
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED BIT(0)
+#define IXGBE_XDP_TX BIT(1)
+#define IXGBE_XDP_REDIR BIT(2)
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
+
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_frame *xdpf);
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
+
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+ u16 qid);
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+ u16 qid);
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ const int budget);
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget);
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+
+#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
new file mode 100644
index 000000000000..65c3e2c979d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+
+#include "ixgbe.h"
+#include "ixgbe_txrx_common.h"
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ bool xdp_on = READ_ONCE(adapter->xdp_prog);
+ int qid = ring->ring_idx;
+
+ if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
+ qid >= adapter->num_xsk_umems || !xdp_on)
+ return NULL;
+
+ return adapter->xsk_umems[qid];
+}
+
+static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
+{
+ if (adapter->xsk_umems)
+ return 0;
+
+ adapter->num_xsk_umems_used = 0;
+ adapter->num_xsk_umems = adapter->num_rx_queues;
+ adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
+ sizeof(*adapter->xsk_umems),
+ GFP_KERNEL);
+ if (!adapter->xsk_umems) {
+ adapter->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem,
+ u16 qid)
+{
+ int err;
+
+ err = ixgbe_alloc_xsk_umems(adapter);
+ if (err)
+ return err;
+
+ adapter->xsk_umems[qid] = umem;
+ adapter->num_xsk_umems_used++;
+
+ return 0;
+}
+
+static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
+{
+ adapter->xsk_umems[qid] = NULL;
+ adapter->num_xsk_umems_used--;
+
+ if (adapter->num_xsk_umems == 0) {
+ kfree(adapter->xsk_umems);
+ adapter->xsk_umems = NULL;
+ adapter->num_xsk_umems = 0;
+ }
+}
+
+static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int i, j;
+ dma_addr_t dma;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma))
+ goto out_unmap;
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (j = 0; j < i; j++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -1;
+}
+
+static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int i;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem,
+ u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (adapter->xsk_umems) {
+ if (qid >= adapter->num_xsk_umems)
+ return -EINVAL;
+ if (adapter->xsk_umems[qid])
+ return -EBUSY;
+ }
+
+ reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ixgbe_xsk_umem_dma_map(adapter, umem);
+ if (err)
+ return err;
+
+ if_running = netif_running(adapter->netdev) &&
+ READ_ONCE(adapter->xdp_prog);
+
+ if (if_running)
+ ixgbe_txrx_ring_disable(adapter, qid);
+
+ err = ixgbe_add_xsk_umem(adapter, umem, qid);
+
+ if (if_running)
+ ixgbe_txrx_ring_enable(adapter, qid);
+
+ return err;
+}
+
+static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+{
+ bool if_running;
+
+ if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
+ !adapter->xsk_umems[qid])
+ return -EINVAL;
+
+ if_running = netif_running(adapter->netdev) &&
+ READ_ONCE(adapter->xdp_prog);
+
+ if (if_running)
+ ixgbe_txrx_ring_disable(adapter, qid);
+
+ ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
+ ixgbe_remove_xsk_umem(adapter, qid);
+
+ if (if_running)
+ ixgbe_txrx_ring_enable(adapter, qid);
+
+ return 0;
+}
+
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+ u16 qid)
+{
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (adapter->xsk_umems) {
+ if (qid >= adapter->num_xsk_umems)
+ return -EINVAL;
+ *umem = adapter->xsk_umems[qid];
+ return 0;
+ }
+
+ *umem = NULL;
+ return 0;
+}
+
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+ u16 qid)
+{
+ return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
+ ixgbe_xsk_umem_disable(adapter, qid);
+}
+
+static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int err, result = IXGBE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf)) {
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+static struct
+ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ struct ixgbe_rx_buffer *bi;
+
+ bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ bi->dma, 0,
+ size,
+ DMA_BIDIRECTIONAL);
+
+ return bi;
+}
+
+static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *obi)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ixgbe_rx_buffer *nbi;
+
+ nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ nbi->dma = obi->dma & mask;
+ nbi->dma += hr;
+
+ nbi->addr = (void *)((unsigned long)obi->addr & mask);
+ nbi->addr += hr;
+
+ nbi->handle = obi->handle & mask;
+ nbi->handle += rx_ring->xsk_umem->headroom;
+
+ obi->addr = NULL;
+ obi->skb = NULL;
+}
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
+{
+ struct ixgbe_rx_buffer *bi;
+ struct ixgbe_ring *rx_ring;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(alloc, struct ixgbe_ring, zca);
+ hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ mask = rx_ring->xsk_umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ bi = rx_ring->rx_buffer_info;
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
+ bi->addr += hr;
+
+ bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+}
+
+static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = bi->addr;
+ u64 handle, hr;
+
+ if (addr)
+ return true;
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, hr;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ handle &= rx_ring->xsk_umem->chunk_mask;
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+static __always_inline bool
+__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
+ bool alloc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi))
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ u16 i = rx_ring->next_to_use;
+ bool ok = true;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return true;
+
+ rx_desc = IXGBE_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ if (!alloc(rx_ring, bi)) {
+ ok = false;
+ break;
+ }
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IXGBE_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+
+ return ok;
+}
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
+{
+ __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+ ixgbe_alloc_buffer_slow_zc);
+}
+
+static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
+ u16 count)
+{
+ return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+ ixgbe_alloc_buffer_zc);
+}
+
+static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ return skb;
+}
+
+static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(IXGBE_RX_DESC(rx_ring, ntc));
+}
+
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ const int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < budget)) {
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ unsigned int size;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+ failure = failure ||
+ !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
+
+ if (unlikely(!ixgbe_test_staterr(rx_desc,
+ IXGBE_RXD_STAT_EOP))) {
+ struct ixgbe_rx_buffer *next_bi;
+
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ ixgbe_inc_ntc(rx_ring);
+ next_bi =
+ &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ next_bi->skb = ERR_PTR(-EINVAL);
+ continue;
+ }
+
+ if (unlikely(bi->skb)) {
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+ }
+
+ xdp.data = bi->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = bi->handle;
+
+ xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
+
+ if (xdp_res) {
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ bi->addr = NULL;
+ bi->skb = NULL;
+ } else {
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ }
+ total_rx_packets++;
+ total_rx_bytes += size;
+
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
+ ixgbe_rx_skb(q_vector, skb);
+ }
+
+ if (xdp_xmit & IXGBE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IXGBE_XDP_TX) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+ }
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ u16 i = rx_ring->next_to_clean;
+ struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
+
+ while (i != rx_ring->next_to_alloc) {
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
+ i++;
+ bi++;
+ if (i == rx_ring->count) {
+ i = 0;
+ bi = rx_ring->rx_buffer_info;
+ }
+ }
+}
+
+static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbe_tx_buffer *tx_bi;
+ bool work_done = true;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+
+ while (budget-- > 0) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+ break;
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
+ tx_bi->bytecount = len;
+ tx_bi->xdpf = NULL;
+
+ tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ixgbe_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return !!budget && work_done;
+}
+
+static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_bi)
+{
+ xdp_return_frame(tx_bi->xdpf);
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+}
+
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget)
+{
+ unsigned int total_packets = 0, total_bytes = 0;
+ u32 i = tx_ring->next_to_clean, xsk_frames = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_bi;
+ bool xmit_done;
+
+ tx_bi = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ total_bytes += tx_bi->bytecount;
+ total_packets += tx_bi->gso_segs;
+
+ if (tx_bi->xdpf)
+ ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+ total_bytes += tx_bi->bytecount;
+
+ tx_bi++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_bi = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+
+ xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+ return budget > 0 && xmit_done;
+}
+
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ring *ring;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(adapter->xdp_prog))
+ return -ENXIO;
+
+ if (qid >= adapter->num_xdp_queues)
+ return -ENXIO;
+
+ if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
+ return -ENXIO;
+
+ ring = adapter->xdp_ring[qid];
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ u64 eics = BIT_ULL(ring->q_vector->v_idx);
+
+ ixgbe_irq_rearm_queues(adapter, eics);
+ }
+
+ return 0;
+}
+
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
+{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct ixgbe_tx_buffer *tx_bi;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ tx_bi = &tx_ring->tx_buffer_info[ntc];
+
+ if (tx_bi->xdpf)
+ ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+
+ ntc++;
+ if (ntc == tx_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 997cea675a37..e8a3231be0bf 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -21,7 +21,6 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
struct sa_mbx_msg *sam;
- u16 msglen;
int ret;
/* send the important bits to the PF */
@@ -38,16 +37,14 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
msgbuf[0] = IXGBE_VF_IPSEC_ADD;
- msglen = sizeof(*sam) + sizeof(msgbuf[0]);
spin_lock_bh(&adapter->mbx_lock);
- ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen);
+ ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
- msglen = sizeof(msgbuf[0]) * 2;
- ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen);
+ ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
if (ret)
goto out;
@@ -80,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
- err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf));
+ err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
if (err)
goto out;
- err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf));
+ err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
if (err)
goto out;
@@ -470,7 +467,7 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
}
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
- if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, sa_idx, xs->xso.offload_handle);
return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2373cd41a625..14f9679c957c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1755,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -2645,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -2664,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ef7a44eb9adb..1d743bd5d212 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "en_stats.h"
#include "en/fs.h"
+extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bbf69e859b78..1431232c9a09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde694d20..8657e0f26995 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
- __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+ priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+ mlx5e_dbg(HW, priv,
+ "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq,
+ tuple->ip_proto, err);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 35aca9a8e3d6..bc034958c846 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4318,7 +4318,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 90c7607b1f44..b7d4896c7c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -93,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -170,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
+ s->rx_arfs_err += rq_stats->arfs_err;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
@@ -1161,6 +1163,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
};
static const struct counter_desc sq_stats_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index a5fb3dc27f50..77f74ce11280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -106,6 +106,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_busy;
u64 rx_cache_waive;
u64 rx_congst_umr;
+ u64 rx_arfs_err;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
@@ -202,6 +203,7 @@ struct mlx5e_rq_stats {
u64 cache_busy;
u64 cache_waive;
u64 congst_umr;
+ u64 arfs_err;
};
struct mlx5e_sq_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 82723a0e509a..6de21d9f4fad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -532,7 +532,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec, u8 *match_prio)
+ struct mlx5_flow_spec *spec, u8 *match_prio,
+ struct netlink_ext_ack *extack)
{
void *headers_c, *headers_v;
u8 prio_val, prio_mask = 0;
@@ -540,8 +541,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
- netdev_warn(priv->netdev,
- "only PCP trust state supported for hairpin\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "only PCP trust state supported for hairpin");
return -EOPNOTSUPP;
}
#endif
@@ -557,8 +558,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
if (!vlan_present || !prio_mask) {
prio_val = UNKNOWN_MATCH_PRIO;
} else if (prio_mask != 0x7) {
- netdev_warn(priv->netdev,
- "masked priority match not supported for hairpin\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "masked priority match not supported for hairpin");
return -EOPNOTSUPP;
}
@@ -568,7 +569,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
int peer_ifindex = parse_attr->mirred_ifindex;
struct mlx5_hairpin_params params;
@@ -583,12 +585,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
- netdev_warn(priv->netdev, "hairpin is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
}
peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
+ extack);
if (err)
return err;
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
@@ -677,7 +680,8 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
@@ -694,7 +698,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
int err, dest_ix = 0;
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
- err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err) {
rule = ERR_PTR(err);
goto err_add_hairpin_flow;
@@ -753,6 +757,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create tc offload table\n");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
rule = ERR_CAST(priv->fs.tc.t);
@@ -819,12 +825,14 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
- struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -838,7 +846,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex);
err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
- out_dev, &encap_dev, flow);
+ out_dev, &encap_dev, flow, extack);
if (err) {
rule = ERR_PTR(err);
if (err != -EAGAIN)
@@ -1105,6 +1113,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1133,6 +1142,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
return -EOPNOTSUPP;
@@ -1149,6 +1160,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
udp_sport, ntohs(key->src));
} else { /* udp dst port must be given */
vxlan_match_offload_err:
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP tunnel decap offload supported only for vxlan, must set UDP dport");
netdev_warn(priv->netdev,
"IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
return -EOPNOTSUPP;
@@ -1225,6 +1238,16 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+ if (mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB
+ (priv->mdev,
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
+ return -EOPNOTSUPP;
+ }
+
}
/* Enforce DMAC when offloading incoming tunneled flows.
@@ -1247,6 +1270,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u8 *match_level)
{
+ struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1277,6 +1301,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -1368,6 +1393,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1550,8 +1578,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
- ft_field_support.outer_ipv4_ttl))
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
return -EOPNOTSUPP;
+ }
if (mask->tos || mask->ttl)
*match_level = MLX5_MATCH_L3;
@@ -1593,6 +1624,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
udp_dport, ntohs(key->dst));
break;
default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only UDP and TCP transports are supported for L4 matching");
netdev_err(priv->netdev,
"Only UDP and TCP transport are supported\n");
return -EINVAL;
@@ -1629,6 +1662,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1643,6 +1677,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (rep->vport != FDB_UPLINK_VPORT &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
match_level, esw->offloads.inline_mode);
@@ -1744,7 +1780,8 @@ static struct mlx5_fields fields[] = {
*/
static int offload_pedit_fields(struct pedit_headers *masks,
struct pedit_headers *vals,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
int i, action_size, nactions, max_actions, first, last, next_z;
@@ -1783,11 +1820,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
continue;
if (s_mask && a_mask) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't set and add to the same HW field");
printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
return -EOPNOTSUPP;
}
if (nactions == max_actions) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "too many pedit actions, can't offload");
printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
return -EOPNOTSUPP;
}
@@ -1820,6 +1861,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
next_z = find_next_zero_bit(&mask, field_bsize, first);
last = find_last_bit(&mask, field_bsize);
if (first < next_z && next_z < last) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rewrite of few sub-fields isn't supported");
printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
@@ -1878,7 +1921,8 @@ static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct tc_action *a, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
int nkeys, i, err = -EOPNOTSUPP;
@@ -1896,12 +1940,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
err = -EOPNOTSUPP; /* can't be all optimistic */
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "legacy pedit isn't offloaded");
goto out_err;
}
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
+ NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
goto out_err;
}
@@ -1918,13 +1963,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
if (err)
goto out_err;
- err = offload_pedit_fields(masks, vals, parse_attr);
+ err = offload_pedit_fields(masks, vals, parse_attr, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
cmd_masks = &masks[cmd];
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
@@ -1941,19 +1988,26 @@ out_err:
return err;
}
-static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+static bool csum_offload_supported(struct mlx5e_priv *priv,
+ u32 action,
+ u32 update_flags,
+ struct netlink_ext_ack *extack)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC csum action is only offloaded with pedit");
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload TC csum action for some header/s");
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
@@ -1964,7 +2018,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
- struct tcf_exts *exts)
+ struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
const struct tc_action *a;
bool modify_ip_header;
@@ -2002,6 +2057,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of non TCP/UDP");
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
return false;
}
@@ -2013,7 +2070,8 @@ out_ok:
static bool actions_match_supported(struct mlx5e_priv *priv,
struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
u32 actions;
@@ -2027,7 +2085,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(&parse_attr->spec, exts);
+ return modify_header_match_supported(&parse_attr->spec, exts,
+ extack);
return true;
}
@@ -2048,7 +2107,8 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
@@ -2072,7 +2132,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_pedit(a)) {
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr);
+ parse_attr, extack);
if (err)
return err;
@@ -2083,7 +2143,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a)))
+ tcf_csum_update_flags(a),
+ extack))
continue;
return -EOPNOTSUPP;
@@ -2099,6 +2160,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "device is not on same HW, can't offload");
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
return -EINVAL;
@@ -2110,8 +2173,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 mark = tcf_skbedit_mark(a);
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
- netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
- mark);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad flow mark - only 16 bit is supported");
return -EINVAL;
}
@@ -2124,7 +2187,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow))
+ if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -2526,7 +2589,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
@@ -2544,6 +2608,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
/* setting udp src port isn't supported */
if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
vxlan_encap_offload_err:
+ NL_SET_ERR_MSG_MOD(extack,
+ "must set udp dst port and not set udp src port");
netdev_warn(priv->netdev,
"must set udp dst port and not set udp src port\n");
return -EOPNOTSUPP;
@@ -2553,6 +2619,8 @@ vxlan_encap_offload_err:
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
@@ -2657,7 +2725,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -2683,7 +2752,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_pedit(a)) {
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr);
+ parse_attr, extack);
if (err)
return err;
@@ -2694,7 +2763,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a)))
+ tcf_csum_update_flags(a),
+ extack))
continue;
return -EOPNOTSUPP;
@@ -2707,6 +2777,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
out_dev = tcf_mirred_dev(a);
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't support more output ports, can't offload forwarding");
pr_err("can't support more than %d output ports, can't offload forwarding\n",
attr->out_count);
return -EOPNOTSUPP;
@@ -2730,6 +2802,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EINVAL;
@@ -2766,10 +2840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow))
+ if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
return -EOPNOTSUPP;
}
@@ -2811,6 +2887,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
int mlx5e_configure_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct rhashtable *tc_ht = get_tc_ht(priv);
@@ -2822,6 +2899,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
if (flow) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flow cookie already exists, ignoring");
netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
return 0;
}
@@ -2850,15 +2929,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
goto err_free;
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
+ err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow,
+ extack);
if (err < 0)
goto err_free;
- flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
+ flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow,
+ extack);
} else {
- err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
+ err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow,
+ extack);
if (err < 0)
goto err_free;
- flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
+ flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow,
+ extack);
}
if (IS_ERR(flow->rule[0])) {
@@ -2946,14 +3029,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+ struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ u16 peer_vhca_id;
+ int bkt;
+
+ if (!same_hw_devs(priv, peer_priv))
+ return;
+
+ peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+ if (hpe->peer_vhca_id == peer_vhca_id)
+ hpe->hp->pair->peer_gone = true;
+ }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_priv *peer_priv;
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_priv *priv;
+
+ if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+ event != NETDEV_UNREGISTER ||
+ ndev->reg_state == NETREG_REGISTERED)
+ return NOTIFY_DONE;
+
+ tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+ fs = container_of(tc, struct mlx5e_flow_steering, tc);
+ priv = container_of(fs, struct mlx5e_priv, fs);
+ peer_priv = netdev_priv(ndev);
+ if (priv == peer_priv ||
+ !(priv->netdev->features & NETIF_F_HW_TC))
+ return NOTIFY_DONE;
+
+ mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+ return NOTIFY_DONE;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ int err;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
- return rhashtable_init(&tc->ht, &tc_ht_params);
+ err = rhashtable_init(&tc->ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+ if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ tc->netdevice_nb.notifier_call = NULL;
+ mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ }
+
+ return err;
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3109,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ if (tc->netdevice_nb.notifier_call)
+ unregister_netdevice_notifier(&tc->netdevice_nb);
+
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b252cde5cc2..ea7dedc2d5ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 0b05bf2b91f6..dfc642de4e6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -269,12 +269,15 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 21e957083f65..a35a2310f871 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -810,29 +810,35 @@ out:
return flow_rule;
}
-static int esw_offloads_start(struct mlx5_eswitch *esw)
+static int esw_offloads_start(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
- esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err) {
- esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch to offloads");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
- if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
+ if (err1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch back to legacy");
+ }
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
- esw_warn(esw->dev, "Inline mode is different between vports\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Inline mode is different between vports");
}
}
return err;
@@ -973,17 +979,20 @@ create_ft_err:
return err;
}
-static int esw_offloads_stop(struct mlx5_eswitch *esw)
+static int esw_offloads_stop(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
- esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
- if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ if (err1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch back to offloads");
+ }
}
/* enable back PF RoCE */
@@ -1092,7 +1101,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
return 0;
}
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u16 cur_mlx5_mode, mlx5_mode = 0;
@@ -1111,9 +1121,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
return 0;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- return esw_offloads_start(dev->priv.eswitch);
+ return esw_offloads_start(dev->priv.eswitch, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- return esw_offloads_stop(dev->priv.eswitch);
+ return esw_offloads_stop(dev->priv.eswitch, extack);
else
return -EINVAL;
}
@@ -1130,7 +1140,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1147,14 +1158,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
return 0;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
- esw_warn(dev, "Inline mode can't be set\n");
+ NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
return -EOPNOTSUPP;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (esw->offloads.num_flows > 0) {
- esw_warn(dev, "Can't set inline mode when flows are configured\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
}
@@ -1165,8 +1177,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
for (vport = 1; vport < esw->enabled_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
- esw_warn(dev, "Failed to set min inline on vport %d\n",
- vport);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
goto revert_inline_mode;
}
}
@@ -1232,7 +1244,8 @@ out:
return 0;
}
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1259,7 +1272,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return 0;
if (esw->offloads.num_flows > 0) {
- esw_warn(dev, "Can't set encapsulation when flows are configured\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
}
@@ -1268,7 +1282,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
esw->offloads.encap = encap;
err = esw_create_offloads_fast_fdb_table(esw);
if (err) {
- esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed re-creating fast FDB table");
esw->offloads.encap = !encap;
(void)esw_create_offloads_fast_fdb_table(esw);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 41ad24f0de2c..1ab6f7e3bec6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
if (ret)
return ret;
- force_state = MLX5_GET(teardown_hca_out, out, force_state);
+ force_state = MLX5_GET(teardown_hca_out, out, state);
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
return -EIO;
@@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
+#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+{
+ unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ int state;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, fast_teardown)) {
+ mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ MLX5_SET(teardown_hca_in, in, profile,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ state = MLX5_GET(teardown_hca_out, out, state);
+ if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
+ mlx5_core_warn(dev, "teardown with fast mode failed\n");
+ return -EIO;
+ }
+
+ mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+
+ /* Loop until device state turns to disable */
+ end = jiffies + msecs_to_jiffies(delay_ms);
+ do {
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
+
+ cond_resched();
+ } while (!time_after(jiffies, end));
+
+ if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
+ mlx5_get_nic_state(dev), delay_ms);
+ return -EIO;
+ }
+
+ return 0;
+}
+
enum mlxsw_reg_mcc_instruction {
MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9f39aeca863f..43118de8ee99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -59,22 +59,25 @@ enum {
};
enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_INVALID = 3
-};
-
-enum {
MLX5_DROP_NEW_HEALTH_WORK,
MLX5_DROP_NEW_RECOVERY_WORK,
};
-static u8 get_nic_state(struct mlx5_core_dev *dev)
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
+{
+ u32 cur_cmdq_addr_l_sz;
+
+ cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
+ iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
+ state << MLX5_NIC_IFC_OFFSET,
+ &dev->iseg->cmdq_addr_l_sz);
+}
+
static void trigger_cmd_completions(struct mlx5_core_dev *dev)
{
unsigned long flags;
@@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -133,7 +136,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_state(dev);
+ u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
- nic_state = get_nic_state(dev);
+ nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) {
dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b5e9f664fc66..28132c7dc05f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = {
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
{
- int ret;
+ bool fast_teardown = false, force_teardown = false;
+ int ret = 1;
+
+ fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
+ force_teardown = MLX5_CAP_GEN(dev, force_teardown);
+
+ mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
+ mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
- if (!MLX5_CAP_GEN(dev, force_teardown)) {
- mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ if (!fast_teardown && !force_teardown)
return -EOPNOTSUPP;
- }
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
@@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
+ ret = mlx5_cmd_fast_teardown_hca(dev);
+ if (!ret)
+ goto succeed;
+
ret = mlx5_cmd_force_teardown_hca(dev);
- if (ret) {
- mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
- mlx5_start_health_poll(dev);
- return ret;
- }
+ if (!ret)
+ goto succeed;
+
+ mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ mlx5_start_health_poll(dev);
+ return ret;
+succeed:
mlx5_enter_error_state(dev, true);
/* Some platforms requiring freeing the IRQ's in the shutdown
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b4134fa0bba3..cc298527baf1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -95,6 +95,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
+
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
@@ -214,4 +216,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+
+enum {
+ MLX5_NIC_IFC_FULL = 0,
+ MLX5_NIC_IFC_DISABLED = 1,
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
+};
+
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index d2f76070ea7c..a1ee9a8a769e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
- mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ if (!hp->peer_gone)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
}
}
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQs */
+ if (hp->peer_gone)
+ return;
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4d271fb3de3d..5890fdfd62c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
- u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
- switch (event_type) {
- case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ /* Command interface completion events are always received on
+ * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+ * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+ */
+ switch (q->num) {
+ case MLXSW_PCI_EQ_ASYNC_NUM:
mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
q->u.eq.ev_cmd_count++;
break;
- case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ case MLXSW_PCI_EQ_COMP_NUM:
cqn = mlxsw_pci_eqe_cqn_get(eqe);
set_bit(cqn, active_cqns);
cq_handle = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88c33a8474eb..2b14fd0dcc42 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4855,6 +4855,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
upper_dev = info->upper_dev;
if (info->linking)
break;
+ if (is_vlan_dev(upper_dev))
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
if (netif_is_macvlan(upper_dev))
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
break;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 36c84625d54e..bcec0587cf61 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH
config MSCC_OCELOT_SWITCH_OCELOT
tristate "Ocelot switch driver on Ocelot"
depends on MSCC_OCELOT_SWITCH
+ depends on GENERIC_PHY
+ depends on OF_NET
help
This driver supports the Ocelot network switch device as present on
the Ocelot SoCs.
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1a4f2bb48ead..8f11fdba8d0e 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ enum phy_mode phy_mode;
int err;
/* Enable receiving frames on the port, and activate auto-learning of
@@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev)
ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
ANA_PORT_PORT_CFG, port->chip_port);
+ if (port->serdes) {
+ if (port->phy_mode == PHY_INTERFACE_MODE_SGMII)
+ phy_mode = PHY_MODE_SGMII;
+ else
+ phy_mode = PHY_MODE_QSGMII;
+
+ err = phy_set_mode(port->serdes, phy_mode);
+ if (err) {
+ netdev_err(dev, "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
- PHY_INTERFACE_MODE_NA);
+ port->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 616bec30dfa3..62c7c8eb00d9 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -11,12 +11,13 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "ocelot_ana.h"
#include "ocelot_dev.h"
-#include "ocelot_hsio.h"
#include "ocelot_qsys.h"
#include "ocelot_rew.h"
#include "ocelot_sys.h"
@@ -333,79 +334,6 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
- HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET,
- HSIO_PLL5G_CFG1,
- HSIO_PLL5G_CFG2,
- HSIO_PLL5G_CFG3,
- HSIO_PLL5G_CFG4,
- HSIO_PLL5G_CFG5,
- HSIO_PLL5G_CFG6,
- HSIO_PLL5G_STATUS0,
- HSIO_PLL5G_STATUS1,
- HSIO_PLL5G_BIST_CFG0,
- HSIO_PLL5G_BIST_CFG1,
- HSIO_PLL5G_BIST_CFG2,
- HSIO_PLL5G_BIST_STAT0,
- HSIO_PLL5G_BIST_STAT1,
- HSIO_RCOMP_CFG0,
- HSIO_RCOMP_STATUS,
- HSIO_SYNC_ETH_CFG,
- HSIO_SYNC_ETH_PLL_CFG,
- HSIO_S1G_DES_CFG,
- HSIO_S1G_IB_CFG,
- HSIO_S1G_OB_CFG,
- HSIO_S1G_SER_CFG,
- HSIO_S1G_COMMON_CFG,
- HSIO_S1G_PLL_CFG,
- HSIO_S1G_PLL_STATUS,
- HSIO_S1G_DFT_CFG0,
- HSIO_S1G_DFT_CFG1,
- HSIO_S1G_DFT_CFG2,
- HSIO_S1G_TP_CFG,
- HSIO_S1G_RC_PLL_BIST_CFG,
- HSIO_S1G_MISC_CFG,
- HSIO_S1G_DFT_STATUS,
- HSIO_S1G_MISC_STATUS,
- HSIO_MCB_S1G_ADDR_CFG,
- HSIO_S6G_DIG_CFG,
- HSIO_S6G_DFT_CFG0,
- HSIO_S6G_DFT_CFG1,
- HSIO_S6G_DFT_CFG2,
- HSIO_S6G_TP_CFG0,
- HSIO_S6G_TP_CFG1,
- HSIO_S6G_RC_PLL_BIST_CFG,
- HSIO_S6G_MISC_CFG,
- HSIO_S6G_OB_ANEG_CFG,
- HSIO_S6G_DFT_STATUS,
- HSIO_S6G_ERR_CNT,
- HSIO_S6G_MISC_STATUS,
- HSIO_S6G_DES_CFG,
- HSIO_S6G_IB_CFG,
- HSIO_S6G_IB_CFG1,
- HSIO_S6G_IB_CFG2,
- HSIO_S6G_IB_CFG3,
- HSIO_S6G_IB_CFG4,
- HSIO_S6G_IB_CFG5,
- HSIO_S6G_OB_CFG,
- HSIO_S6G_OB_CFG1,
- HSIO_S6G_SER_CFG,
- HSIO_S6G_COMMON_CFG,
- HSIO_S6G_PLL_CFG,
- HSIO_S6G_ACJTAG_CFG,
- HSIO_S6G_GP_CFG,
- HSIO_S6G_IB_STATUS0,
- HSIO_S6G_IB_STATUS1,
- HSIO_S6G_ACJTAG_STATUS,
- HSIO_S6G_PLL_STATUS,
- HSIO_S6G_REVID,
- HSIO_MCB_S6G_ADDR_CFG,
- HSIO_HW_CFG,
- HSIO_HW_QSGMII_CFG,
- HSIO_HW_QSGMII_STAT,
- HSIO_CLK_CFG,
- HSIO_TEMP_SENSOR_CTRL,
- HSIO_TEMP_SENSOR_CFG,
- HSIO_TEMP_SENSOR_STAT,
};
enum ocelot_regfield {
@@ -527,6 +455,9 @@ struct ocelot_port {
u8 vlan_aware;
u64 *stats;
+
+ phy_interface_t phy_mode;
+ struct phy *serdes;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 3cdf63e35b53..953b32677383 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -6,9 +6,11 @@
*/
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of_net.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
#include "ocelot.h"
@@ -168,6 +170,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
struct ocelot *ocelot;
+ struct regmap *hsio;
u32 val;
struct {
@@ -179,7 +182,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
- { HSIO, "hsio" },
};
if (!np && !pdev->dev.platform_data)
@@ -202,6 +204,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[res[i].id] = target;
}
+ hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
+ if (IS_ERR(hsio)) {
+ dev_err(&pdev->dev, "missing hsio syscon\n");
+ return PTR_ERR(hsio);
+ }
+
+ ocelot->targets[HSIO] = hsio;
+
err = ocelot_chip_init(ocelot);
if (err)
return err;
@@ -244,18 +254,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ocelot->multicast);
ocelot_init(ocelot);
- ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE |
- HSIO_HW_CFG_DEV1G_6_MODE |
- HSIO_HW_CFG_DEV1G_9_MODE,
- HSIO_HW_CFG_DEV1G_4_MODE |
- HSIO_HW_CFG_DEV1G_6_MODE |
- HSIO_HW_CFG_DEV1G_9_MODE,
- HSIO_HW_CFG);
-
for_each_available_child_of_node(ports, portnp) {
struct device_node *phy_node;
struct phy_device *phy;
struct resource *res;
+ struct phy *serdes;
+ enum phy_mode phy_mode;
void __iomem *regs;
char res_name[8];
u32 port;
@@ -280,10 +284,45 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
- if (err) {
- dev_err(&pdev->dev, "failed to probe ports\n");
+ if (err)
+ return err;
+
+ err = of_get_phy_mode(portnp);
+ if (err < 0)
+ ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
+ else
+ ocelot->ports[port]->phy_mode = err;
+
+ switch (ocelot->ports[port]->phy_mode) {
+ case PHY_INTERFACE_MODE_NA:
+ continue;
+ case PHY_INTERFACE_MODE_SGMII:
+ phy_mode = PHY_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ phy_mode = PHY_MODE_QSGMII;
+ break;
+ default:
+ dev_err(ocelot->dev,
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ return -EINVAL;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(ocelot->dev, "deferring probe\n");
+ else
+ dev_err(ocelot->dev,
+ "missing SerDes phys for port%d\n",
+ port);
+
goto err_probe_ports;
}
+
+ ocelot->ports[port]->serdes = serdes;
}
register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h
deleted file mode 100644
index d93ddec3931b..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_hsio.h
+++ /dev/null
@@ -1,785 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_HSIO_H_
-#define _MSCC_OCELOT_HSIO_H_
-
-#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31)
-#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30)
-#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29)
-#define HSIO_PLL5G_CFG0_DIV4 BIT(28)
-#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27)
-#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23))
-#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23)
-#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23)
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18))
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18)
-#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16)
-#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15)
-#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14)
-#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13)
-#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12)
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6)
-#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0))
-#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0)
-
-#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18)
-#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17)
-#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16)
-#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15)
-#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14)
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6)
-#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
-#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5)
-#define HSIO_PLL5G_CFG1_PWD_TX BIT(4)
-#define HSIO_PLL5G_CFG1_PWD_RX BIT(3)
-#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2)
-#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1)
-#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0)
-
-#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30)
-#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29)
-#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28)
-#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27)
-#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26)
-#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25)
-#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24)
-#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16)
-#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15)
-#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14)
-#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13)
-#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12)
-#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11)
-#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10)
-#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5))
-#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5)
-#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4)
-#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3)
-#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2)
-#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1)
-#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0)
-
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22))
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22)
-#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22)
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19))
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19)
-#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18)
-#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17)
-#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16)
-#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15)
-#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14)
-#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13)
-#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12)
-#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11)
-#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10)
-#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9)
-#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8)
-#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0))
-#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0)
-
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16)
-#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0)
-
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16)
-#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0)
-
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23)
-#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20))
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20)
-#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20)
-#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19)
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16)
-#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8)
-#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7)
-#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6)
-#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0))
-#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0)
-
-#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12)
-#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11)
-#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10)
-#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9)
-#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1))
-#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1)
-#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0)
-
-#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21))
-#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21)
-#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21)
-#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16))
-#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16)
-#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16)
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4))
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4)
-#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4)
-#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1)
-#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0)
-
-#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16)
-#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0)
-
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1)
-#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0)
-
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16))
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16)
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0))
-#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0)
-
-#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13)
-#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12)
-#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10))
-#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10)
-#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10)
-#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8))
-#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8)
-#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4)
-#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0))
-#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0)
-
-#define HSIO_RCOMP_STATUS_BUSY BIT(12)
-#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7)
-#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0))
-#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0)
-
-#define HSIO_SYNC_ETH_CFG_RSZ 0x4
-
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1)
-#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0)
-
-#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0)
-
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
-#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11)
-#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8))
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8)
-#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5))
-#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5)
-#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4)
-#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1)
-#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0)
-
-#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27)
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24))
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24)
-#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24)
-#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19))
-#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19)
-#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14)
-#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13)
-#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12)
-#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11)
-#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10)
-#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9)
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6)
-#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4)
-#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
-#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0)
-
-#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17))
-#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17)
-#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17)
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13)
-#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10))
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10)
-#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
-#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9)
-#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8)
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4)
-#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
-#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
-
-#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9)
-#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8)
-#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7)
-#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6)
-#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
-#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3)
-#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2)
-#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1)
-#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0)
-
-#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31)
-#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21)
-#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18)
-#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17)
-#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16)
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13))
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13)
-#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13)
-#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12)
-#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11)
-#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10)
-#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9)
-#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8)
-#define HSIO_S1G_COMMON_CFG_HRATE BIT(7)
-#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0)
-
-#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22)
-#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6)
-#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5)
-#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3)
-
-#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12)
-#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11)
-#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10)
-#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
-#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
-
-#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31)
-#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23)
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
-#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
-#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
-#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
-#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
-#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2)
-#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0)
-
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3)
-#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
-#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
-#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3)
-#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
-#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
-#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
-#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
-
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
-#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
-#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
-#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
-#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
-#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
-#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
-#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0)
-
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
-#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
-#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3)
-#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2)
-#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
-#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0)
-
-#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
-
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31)
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30)
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0))
-#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0)
-
-#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16)
-#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7)
-#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6)
-#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3))
-#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3)
-#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3)
-#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0))
-#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0)
-
-#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31)
-#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23)
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
-#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
-#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
-#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
-#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
-#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2)
-#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0)
-
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3)
-#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
-#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
-#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
-#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
-#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3)
-#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
-#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
-#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
-
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
-#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
-
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13))
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13)
-#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13)
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
-#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
-#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
-#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7)
-#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6)
-#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
-#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
-#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
-#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
-#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0)
-
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0)
-
-#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
-#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
-#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3)
-#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2)
-#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
-#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0)
-
-#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
-
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
-#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10))
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10)
-#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8))
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8)
-#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
-#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5))
-#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5)
-#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5)
-#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4)
-#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1))
-#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1)
-#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1)
-#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0)
-
-#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29))
-#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29)
-#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29)
-#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28)
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24))
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24)
-#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24)
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20))
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20)
-#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20)
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18))
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18)
-#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15))
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7))
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7)
-#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7)
-#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6)
-#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5)
-#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4)
-#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3)
-#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2)
-#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1)
-#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0)
-
-#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17))
-#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17)
-#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17)
-#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12))
-#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12)
-#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12)
-#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8))
-#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8)
-#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8)
-#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7)
-#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6)
-#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5)
-#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4)
-#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3)
-#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2)
-#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1)
-#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0)
-
-#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27))
-#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27)
-#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27)
-#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22))
-#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22)
-#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22)
-#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19))
-#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19)
-#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19)
-#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16))
-#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16)
-#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16)
-#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10))
-#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10)
-#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10)
-#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5))
-#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5)
-#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3))
-#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3)
-#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0))
-#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0)
-
-#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0)
-
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0)
-
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18)
-#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12)
-#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6)
-#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0)
-
-#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31)
-#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30)
-#define HSIO_S6G_OB_CFG_OB_POL BIT(29)
-#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
-#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23)
-#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
-#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18))
-#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18)
-#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18)
-#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17)
-#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16)
-#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11))
-#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11)
-#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11)
-#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10)
-#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9)
-#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8)
-#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4))
-#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4)
-#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
-#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
-
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6)
-#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
-#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0)
-
-#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8)
-#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7)
-#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6)
-#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
-#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
-#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
-#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3)
-#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2)
-#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1)
-#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0)
-
-#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17)
-#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16)
-#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15)
-#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14)
-#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13)
-#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12)
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9))
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9)
-#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9)
-#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8)
-#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7)
-#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6)
-#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5)
-#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4)
-#define HSIO_S6G_COMMON_CFG_HRATE BIT(3)
-#define HSIO_S6G_COMMON_CFG_QRATE BIT(2)
-#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0))
-#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0)
-
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16))
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16)
-#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16)
-#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15)
-#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4)
-#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3)
-#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2)
-#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1)
-#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0)
-
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3)
-#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2)
-#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1)
-#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0)
-
-#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16))
-#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16)
-#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0))
-#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0)
-
-#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8)
-#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7)
-#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6)
-#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2)
-#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1)
-#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0)
-
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18))
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18)
-#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18)
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12))
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12)
-#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12)
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6))
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6)
-#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6)
-#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0))
-#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0)
-
-#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2)
-#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1)
-#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0)
-
-#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10)
-#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9)
-#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8)
-#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
-#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
-
-#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26))
-#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26)
-#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26)
-#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21))
-#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21)
-#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21)
-#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16))
-#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16)
-#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16)
-#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10))
-#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10)
-#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10)
-#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5))
-#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5)
-#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5)
-#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0))
-#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0)
-
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31)
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30)
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0))
-#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0)
-
-#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6)
-#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5)
-#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4)
-#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3)
-#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2)
-#define HSIO_HW_CFG_PCIE_ENA BIT(1)
-#define HSIO_HW_CFG_QSGMII_ENA BIT(0)
-
-#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3)
-#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2)
-#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1)
-#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0)
-
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1))
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1)
-#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1)
-#define HSIO_HW_QSGMII_STAT_SYNC BIT(0)
-
-#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1))
-#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1)
-#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0)
-
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2)
-#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1)
-#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0)
-
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8))
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8)
-#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8)
-#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0))
-#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0)
-
-#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8)
-#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0))
-#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index e334b406c40c..9271af18b93b 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017 Microsemi Corporation
*/
#include "ocelot.h"
+#include <soc/mscc/ocelot_hsio.h>
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
@@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = {
REG(QS_INH_DBG, 0x000048),
};
-static const u32 ocelot_hsio_regmap[] = {
- REG(HSIO_PLL5G_CFG0, 0x000000),
- REG(HSIO_PLL5G_CFG1, 0x000004),
- REG(HSIO_PLL5G_CFG2, 0x000008),
- REG(HSIO_PLL5G_CFG3, 0x00000c),
- REG(HSIO_PLL5G_CFG4, 0x000010),
- REG(HSIO_PLL5G_CFG5, 0x000014),
- REG(HSIO_PLL5G_CFG6, 0x000018),
- REG(HSIO_PLL5G_STATUS0, 0x00001c),
- REG(HSIO_PLL5G_STATUS1, 0x000020),
- REG(HSIO_PLL5G_BIST_CFG0, 0x000024),
- REG(HSIO_PLL5G_BIST_CFG1, 0x000028),
- REG(HSIO_PLL5G_BIST_CFG2, 0x00002c),
- REG(HSIO_PLL5G_BIST_STAT0, 0x000030),
- REG(HSIO_PLL5G_BIST_STAT1, 0x000034),
- REG(HSIO_RCOMP_CFG0, 0x000038),
- REG(HSIO_RCOMP_STATUS, 0x00003c),
- REG(HSIO_SYNC_ETH_CFG, 0x000040),
- REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048),
- REG(HSIO_S1G_DES_CFG, 0x00004c),
- REG(HSIO_S1G_IB_CFG, 0x000050),
- REG(HSIO_S1G_OB_CFG, 0x000054),
- REG(HSIO_S1G_SER_CFG, 0x000058),
- REG(HSIO_S1G_COMMON_CFG, 0x00005c),
- REG(HSIO_S1G_PLL_CFG, 0x000060),
- REG(HSIO_S1G_PLL_STATUS, 0x000064),
- REG(HSIO_S1G_DFT_CFG0, 0x000068),
- REG(HSIO_S1G_DFT_CFG1, 0x00006c),
- REG(HSIO_S1G_DFT_CFG2, 0x000070),
- REG(HSIO_S1G_TP_CFG, 0x000074),
- REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078),
- REG(HSIO_S1G_MISC_CFG, 0x00007c),
- REG(HSIO_S1G_DFT_STATUS, 0x000080),
- REG(HSIO_S1G_MISC_STATUS, 0x000084),
- REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088),
- REG(HSIO_S6G_DIG_CFG, 0x00008c),
- REG(HSIO_S6G_DFT_CFG0, 0x000090),
- REG(HSIO_S6G_DFT_CFG1, 0x000094),
- REG(HSIO_S6G_DFT_CFG2, 0x000098),
- REG(HSIO_S6G_TP_CFG0, 0x00009c),
- REG(HSIO_S6G_TP_CFG1, 0x0000a0),
- REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4),
- REG(HSIO_S6G_MISC_CFG, 0x0000a8),
- REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac),
- REG(HSIO_S6G_DFT_STATUS, 0x0000b0),
- REG(HSIO_S6G_ERR_CNT, 0x0000b4),
- REG(HSIO_S6G_MISC_STATUS, 0x0000b8),
- REG(HSIO_S6G_DES_CFG, 0x0000bc),
- REG(HSIO_S6G_IB_CFG, 0x0000c0),
- REG(HSIO_S6G_IB_CFG1, 0x0000c4),
- REG(HSIO_S6G_IB_CFG2, 0x0000c8),
- REG(HSIO_S6G_IB_CFG3, 0x0000cc),
- REG(HSIO_S6G_IB_CFG4, 0x0000d0),
- REG(HSIO_S6G_IB_CFG5, 0x0000d4),
- REG(HSIO_S6G_OB_CFG, 0x0000d8),
- REG(HSIO_S6G_OB_CFG1, 0x0000dc),
- REG(HSIO_S6G_SER_CFG, 0x0000e0),
- REG(HSIO_S6G_COMMON_CFG, 0x0000e4),
- REG(HSIO_S6G_PLL_CFG, 0x0000e8),
- REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec),
- REG(HSIO_S6G_GP_CFG, 0x0000f0),
- REG(HSIO_S6G_IB_STATUS0, 0x0000f4),
- REG(HSIO_S6G_IB_STATUS1, 0x0000f8),
- REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc),
- REG(HSIO_S6G_PLL_STATUS, 0x000100),
- REG(HSIO_S6G_REVID, 0x000104),
- REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108),
- REG(HSIO_HW_CFG, 0x00010c),
- REG(HSIO_HW_QSGMII_CFG, 0x000110),
- REG(HSIO_HW_QSGMII_STAT, 0x000114),
- REG(HSIO_CLK_CFG, 0x000118),
- REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c),
- REG(HSIO_TEMP_SENSOR_CFG, 0x000120),
- REG(HSIO_TEMP_SENSOR_STAT, 0x000124),
-};
-
static const u32 ocelot_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
@@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = {
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
- [HSIO] = ocelot_hsio_regmap,
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
@@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
/* Configure PLL5. This will need a proper CCF driver
* The values are coming from the VTSS API for Ocelot
*/
- ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
- HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4);
- ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+ HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
HSIO_PLL5G_CFG0_ENA_BIAS |
HSIO_PLL5G_CFG0_ENA_VCO_BUF |
@@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
HSIO_PLL5G_CFG0_SELBGV820(4) |
HSIO_PLL5G_CFG0_DIV4 |
HSIO_PLL5G_CFG0_ENA_CLKTREE |
- HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0);
- ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG0_ENA_LANE);
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
HSIO_PLL5G_CFG2_ENA_AMPCTRL |
HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
- HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2);
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
int ocelot_chip_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index db463e20a876..4213fe42ac4d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -177,7 +177,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return nfp_app_eswitch_mode_get(pf->app, mode);
}
-static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d05e37fcc1b2..24c8f5bb1eb4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
return true;
}
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
{
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
struct nfp_net *nn = r_vec->nfp_net;
struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
continue;
+
+ return budget;
}
static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
__nfp_ctrl_tx_queued(r_vec);
spin_unlock(&r_vec->lock);
- nfp_ctrl_rx(r_vec);
-
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
}
/* Setup and Configuration
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69aa7fc392c5..59c70be22a84 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
work_func_t func, int delay);
static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_tx_timeout = netxen_tx_timeout,
.ndo_fix_features = netxen_fix_features,
.ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
};
static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
- int ring;
- struct nx_host_sds_ring *sds_ring;
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
- disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- netxen_intr(adapter->irq, sds_ring);
- }
- enable_irq(adapter->irq);
-}
-#endif
-
static int
nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0fbeafeef7a0..7ceb2b97538d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2679,6 +2679,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
link->speed.forced_speed = 10000;
break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
+ link->speed.forced_speed = 20000;
+ break;
case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
link->speed.forced_speed = 25000;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index d4d08383c753..bf431ab86864 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12102,6 +12102,7 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
+ u64 reserved;
u32 data_ptr;
u32 data_size;
};
@@ -13154,6 +13155,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -13164,6 +13166,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index f99797a149a4..beb8e5d6401a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1709,7 +1709,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
- cm_info->ip_version = TCP_IPV4;
+ cm_info->ip_version = QED_TCP_IPV4;
ip_hlen = (iph->ihl) * sizeof(u32);
*payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1729,7 +1729,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->remote_ip[i] =
ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
}
- cm_info->ip_version = TCP_IPV6;
+ cm_info->ip_version = QED_TCP_IPV6;
ip_hlen = sizeof(*ip6h);
*payload_len = ntohs(ip6h->payload_len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index da13117a604a..aa633381aa47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
tx_pkt.vlan = p_buffer->vlan;
tx_pkt.bd_flags = bd_flags;
tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
- tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+ switch (p_ll2_conn->tx_dest) {
+ case CORE_TX_DEST_NW:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ break;
+ case CORE_TX_DEST_LB:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ break;
+ case CORE_TX_DEST_DROP:
+ default:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ break;
+ }
tx_pkt.first_frag = first_frag;
tx_pkt.first_frag_len = p_buffer->packet_length;
tx_pkt.cookie = p_buffer;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2094d86a7a08..75d217aaf8ce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1337,6 +1337,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
@@ -1503,6 +1506,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ if_link->advertised_caps |= QED_LM_20000baseKR2_Full_BIT;
+ if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
if (params.speed.advertised_speeds &
@@ -1523,6 +1529,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ if_link->supported_caps |= QED_LM_20000baseKR2_Full_BIT;
+ if (link_caps.speed_capabilities &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
if (link_caps.speed_capabilities &
@@ -1559,6 +1568,8 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
+ if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index be941cfaa2d4..c71391b9c757 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
num_cons, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
+ "Failed to allocate toggle bits, rc = %d\n", rc);
goto free_cq_map;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 7d7a64c55ff1..f9167d1354bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
{
- enum roce_flavor flavor;
-
switch (roce_mode) {
case ROCE_V1:
- flavor = PLAIN_ROCE;
- break;
+ return PLAIN_ROCE;
case ROCE_V2_IPV4:
- flavor = RROCE_IPV4;
- break;
+ return RROCE_IPV4;
case ROCE_V2_IPV6:
- flavor = ROCE_V2_IPV6;
- break;
+ return RROCE_IPV6;
default:
- flavor = MAX_ROCE_MODE;
- break;
+ return MAX_ROCE_FLAVOR;
}
- return flavor;
}
static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 8de644b4721e..77b6248ad3b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
struct qed_tunnel_info *p_src)
{
- enum tunnel_clss type;
+ int type;
p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 3d4269659820..be118d057b92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
if (!p_iov->b_pre_fp_hsi &&
- ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
static void
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask, u8 *p_cls)
+ enum qed_tunn_mode mask, u8 *p_cls)
{
if (p_src->b_update_mode) {
p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
static void
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask,
+ enum qed_tunn_mode mask,
u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
u8 *p_update_port, u16 *p_udp_port)
{
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 19652cd27ca7..7ff50b4488f6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -420,6 +420,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
{QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
{QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
{QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
{QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
{QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
@@ -535,6 +536,14 @@ static int qede_set_link_ksettings(struct net_device *dev,
}
params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
break;
+ case SPEED_20000:
+ if (!(current_link.supported_caps &
+ QED_LM_20000baseKR2_Full_BIT)) {
+ DP_INFO(edev, "20G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
+ break;
case SPEED_25000:
if (!(current_link.supported_caps &
QED_LM_25000baseKR_Full_BIT)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81312924df14..0c443ea98479 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 569d54ededec..a79d84f99102 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2135,7 +2135,8 @@ out:
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index b75a81246856..73fe2f64491d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3..56a3bd9e37dc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 84dd83031a1b..9647578cbe6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2d38d1ac2aae..dbd48012224f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
.ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = qlcnic_poll_controller,
-#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
.ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx;
- struct qlcnic_host_tx_ring *tx_ring;
- int ring;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- return;
-
- recv_ctx = adapter->recv_ctx;
-
- for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_sds_intr(adapter, sds_ring);
- napi_schedule(&sds_ring->napi);
- }
-
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- /* Only Multi-Tx queue capable devices need to
- * schedule NAPI for TX rings
- */
- if ((qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
- (qlcnic_82xx_check(adapter) &&
- !qlcnic_check_multi_tx(adapter)))
- return;
-
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_intr(adapter, tx_ring);
- napi_schedule(&tx_ring->napi);
- }
- }
-}
-#endif
-
static void
qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
{
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 7fd86d40a337..11167abe5934 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
struct sk_buff *skbn;
if (skb->dev->type == ARPHRD_ETHER) {
- if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
kfree_skb(skb);
return;
}
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
}
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+ if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
if (!skb)
goto done;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ return RX_HANDLER_PASS;
+
dev = skb->dev;
port = rmnet_get_port(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ed8ffd498c88..7d3f671e1bb3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4059,13 +4059,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
genphy_soft_reset(dev->phydev);
- /* It was reported that chip version 33 ends up with 10MBit/Half on a
+ /* It was reported that several chips end up with 10MBit/Half on a
* 1GBit link after resuming from S3. For whatever reason the PHY on
- * this chip doesn't properly start a renegotiation when soft-reset.
+ * these chips doesn't properly start a renegotiation when soft-reset.
* Explicitly requesting a renegotiation fixes this.
*/
- if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
- dev->phydev->autoneg == AUTONEG_ENABLE)
+ if (dev->phydev->autoneg == AUTONEG_ENABLE)
phy_restart_aneg(dev->phydev);
}
@@ -4523,9 +4522,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
- /* Set DMA burst size and Interframe Gap Time */
- RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ u32 val = TX_DMA_BURST << TxDMAShift |
+ InterFrameGap << TxInterFrameGapShift;
+
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39)
+ val |= TXCFG_AUTO_FIFO;
+
+ RTL_W32(tp, TxConfig, val);
}
static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -5020,7 +5024,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
/* Adjust EEE LED frequency */
@@ -5054,7 +5057,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5099,8 +5101,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5198,8 +5198,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5282,8 +5280,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5605,7 +5601,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -6856,8 +6851,10 @@ static int rtl8169_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
+ clk_disable_unprepare(tp->clk);
return 0;
}
@@ -6885,6 +6882,9 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ clk_prepare_enable(tp->clk);
if (netif_running(dev))
__rtl8169_resume(dev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..3d0dd39c289e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index dd5530a4f8c8..03e2455c502e 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
- struct ef4_nic *efx = netdev_priv(net_dev);
- struct ef4_channel *channel;
-
- ef4_for_each_channel(channel, efx)
- ef4_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_set_mac_address = ef4_set_mac_address,
.ndo_set_rx_mode = ef4_set_rx_mode,
.ndo_set_features = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ef4_netpoll,
-#endif
.ndo_setup_tc = ef4_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = ef4_filter_rfs,
OpenPOWER on IntegriCloud